summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2009-04-22 12:23:38 +0200
committerPhilipp Reisner <philipp.reisner@linbit.com>2009-07-29 10:24:47 +0200
commit1ff8d0aa6fb4011b8f49dcb79919946c1bd32196 (patch)
treefcbab8c2afa7cfd53baf1514218b4bc3deba1f7a /drivers/block
parent67ebb6d6d27fdc9462d6719c8066899da3db51d7 (diff)
Removing CamelCase, minor cleanups
9b8b170585e2959d552b0ba592b649d758b933b8 Merge branch 'drbd-8.3' into drbd-8.3-cleanups 1922274c822b34cf4b66843819ccfed97e1da4f1 Improvements to drbd_nl_net_conf() * Removing macros * Fixed the code for 1a59b007715215697968cfaed3f2f159d262c030 Removing a workaround for bugs in blk_queue_stack_limits() in older kernels 56d788e4fa7a4809fc41f8c17a02032fb8793080 Merge branch 'drbd-8.3' into drbd-8.3-cleanups a95b4cfb595ced59ca6d2b26d450fd7e5e08fec8 The coding style guide says: Constants are ALL_CAPS 84cc8259557954b39fa8b0c57931b08cdc3df3f6 Merge branch 'drbd-8.2' into drbd-8.3 555b9f742cce70430a5769a35128ee3fa06fa9f4 Merge branch 'drbd-8.0' into drbd-8.2 f96e2776b2e49ef992d43a624d7c6be7d0610bfa Removing CamelCase from structs, enums and unions 727aca1fcc001381ab6f0a7229fa7db54b9431ea Removing '#if 0' code 546cbdec6779e997a37eac0d3762f4e0a3a390f2 Changing the remaining constants to UPPER_CASE only 57e478a0d328405ed97c3bada9d617e5d21a3868 Merge branch 'drbd-8.3' into drbd-8.3-cleanups 23b99bfb4ceca5a00c180620ac3f6db91edf7f85 Andi does not want to get it CC, he prefers to pick it off LKML d362ab639c966107670f4b375cbff757314f5cba Changing the remaining constants to UPPER_CASE only 6fe98c4f0302a28174dd3a852e72fbc6a0caa45e Merge branch 'drbd-8.2' into drbd-8.3 ff327744475ffc66795fc49dcdc232b21589185e Changing the remaining constants to UPPER_CASE only 185392bf5ce63936a5c424b97e38512ab4bcfdb7 Merge branch 'drbd-8.0' into drbd-8.2 5f87618f7272e9ef92b17b2b7c36dcc1c3d59031 Making all constants UPPER_CASE only 2ebf5224634bf5fda709fb54b43a5de6e3c10bee Adding explicit numbers to the return codes to user space Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_actlog.c58
-rw-r--r--drivers/block/drbd/drbd_bitmap.c4
-rw-r--r--drivers/block/drbd/drbd_buildtag.c4
-rw-r--r--drivers/block/drbd/drbd_int.h676
-rw-r--r--drivers/block/drbd/drbd_main.c1040
-rw-r--r--drivers/block/drbd/drbd_nl.c576
-rw-r--r--drivers/block/drbd/drbd_proc.c16
-rw-r--r--drivers/block/drbd/drbd_receiver.c916
-rw-r--r--drivers/block/drbd/drbd_req.c94
-rw-r--r--drivers/block/drbd/drbd_req.h4
-rw-r--r--drivers/block/drbd/drbd_strings.c118
-rw-r--r--drivers/block/drbd/drbd_worker.c210
12 files changed, 1839 insertions, 1877 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 90ad8cbeafee..fbbddd3d7f31 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -223,7 +223,7 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
- MTRACE(TraceTypeALExts, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_AL_EXTS, TRACE_LVL_METRICS,
dev_info(DEV, "al_begin_io( sec=%llus (al_enr=%u) (rs_enr=%d) )\n",
(unsigned long long) sector, enr,
(int)BM_SECT_TO_EXT(sector));
@@ -260,7 +260,7 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
struct lc_element *extent;
unsigned long flags;
- MTRACE(TraceTypeALExts, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_AL_EXTS, TRACE_LVL_METRICS,
dev_info(DEV, "al_complete_io( sec=%llus (al_enr=%u) (rs_enr=%d) )\n",
(unsigned long long) sector, enr,
(int)BM_SECT_TO_EXT(sector));
@@ -305,7 +305,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
* TODO reduce maximum latency:
* submit both bios, then wait for both,
* instead of doing two synchronous sector writes. */
- if (mdev->state.conn < Connected && evicted != LC_FREE)
+ if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */
@@ -607,7 +607,7 @@ STATIC int atodb_prepare_unless_covered(struct drbd_conf *mdev,
atomic_inc(&wc->count);
/* we already know that we may do this...
- * inc_local_if_state(mdev,Attaching);
+ * inc_local_if_state(mdev,D_ATTACHING);
* just get the extra reference, so that the local_cnt reflects
* the number of pending IO requests DRBD at its backing device.
*/
@@ -629,7 +629,7 @@ out_bio_put:
* drbd_al_to_on_disk_bm:
* Writes the areas of the bitmap which are covered by the AL.
* called when we detach (unconfigure) local storage,
- * or when we go from Primary to Secondary state.
+ * or when we go from R_PRIMARY to R_SECONDARY state.
*/
void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
{
@@ -638,7 +638,7 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
struct bio **bios;
struct drbd_atodb_wait wc;
- ERR_IF (!inc_local_if_state(mdev, Attaching))
+ ERR_IF (!inc_local_if_state(mdev, D_ATTACHING))
return; /* sorry, I don't have any act_log etc... */
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
@@ -763,7 +763,7 @@ static inline int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
lc_del(mdev->act_log, al_ext);
spin_unlock_irq(&mdev->al_lock);
- MTRACE(TraceTypeALExts, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_AL_EXTS, TRACE_LVL_METRICS,
if (unlikely(!rv))
dev_info(DEV, "Waiting for extent in drbd_al_shrink()\n");
);
@@ -810,8 +810,8 @@ STATIC int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
switch (mdev->state.conn) {
- case SyncSource: case SyncTarget:
- case PausedSyncS: case PausedSyncT:
+ case C_SYNC_SOURCE: case C_SYNC_TARGET:
+ case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
drbd_resync_finished(mdev);
default:
/* nothing to do */
@@ -860,7 +860,7 @@ STATIC void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
dump_stack();
lc_put(mdev->resync, &ext->lce);
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return;
}
} else {
@@ -916,7 +916,7 @@ STATIC void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
* size byte of data starting from sector. Only clear a bits of the affected
* one ore more _aligned_ BM_BLOCK_SIZE blocks.
*
- * called by worker on SyncTarget and receiver on SyncSource.
+ * called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
*/
void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
@@ -953,7 +953,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
- MTRACE(TraceTypeResync, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_METRICS,
dev_info(DEV, "drbd_set_in_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
(unsigned long long)sector, size, sbnr, ebnr);
);
@@ -973,8 +973,8 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
/* should be roling marks,
* but we estimate only anyways. */
if (mdev->rs_mark_left != drbd_bm_total_weight(mdev) &&
- mdev->state.conn != PausedSyncT &&
- mdev->state.conn != PausedSyncS) {
+ mdev->state.conn != C_PAUSED_SYNC_T &&
+ mdev->state.conn != C_PAUSED_SYNC_S) {
mdev->rs_mark_time = jiffies;
mdev->rs_mark_left = drbd_bm_total_weight(mdev);
}
@@ -1032,7 +1032,7 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
- MTRACE(TraceTypeResync, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_METRICS,
dev_info(DEV, "drbd_set_out_of_sync: sector=%llus size=%u "
"sbnr=%lu ebnr=%lu\n",
(unsigned long long)sector, size, sbnr, ebnr);
@@ -1133,7 +1133,7 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
struct bm_extent *bm_ext;
int i, sig;
- MTRACE(TraceTypeResync, TraceLvlAll,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_ALL,
dev_info(DEV, "drbd_rs_begin_io: sector=%llus (rs_end=%d)\n",
(unsigned long long)sector, enr);
);
@@ -1183,7 +1183,7 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
struct bm_extent *bm_ext;
int i;
- MTRACE(TraceTypeResync, TraceLvlAll,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_ALL,
dev_info(DEV, "drbd_try_rs_begin_io: sector=%llus\n",
(unsigned long long)sector);
);
@@ -1203,7 +1203,7 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
* the lc_put here...
* we also have to wake_up
*/
- MTRACE(TraceTypeResync, TraceLvlAll,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_ALL,
dev_info(DEV, "dropping %u, aparently got 'synced' "
"by application io\n", mdev->resync_wenr);
);
@@ -1232,7 +1232,7 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
* but then could not set BME_LOCKED,
* so we tried again.
* drop the extra reference. */
- MTRACE(TraceTypeResync, TraceLvlAll,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_ALL,
dev_info(DEV, "dropping extra reference on %u\n", enr);
);
bm_ext->lce.refcnt--;
@@ -1241,7 +1241,7 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
goto check_al;
} else {
if (mdev->resync_locked > mdev->resync->nr_elements-3) {
- MTRACE(TraceTypeResync, TraceLvlAll,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_ALL,
dev_info(DEV, "resync_locked = %u!\n", mdev->resync_locked);
);
goto try_again;
@@ -1268,7 +1268,7 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
goto check_al;
}
check_al:
- MTRACE(TraceTypeResync, TraceLvlAll,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_ALL,
dev_info(DEV, "checking al for %u\n", enr);
);
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
@@ -1284,7 +1284,7 @@ proceed:
return 0;
try_again:
- MTRACE(TraceTypeResync, TraceLvlAll,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_ALL,
dev_info(DEV, "need to try again for %u\n", enr);
);
if (bm_ext)
@@ -1299,7 +1299,7 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
struct bm_extent *bm_ext;
unsigned long flags;
- MTRACE(TraceTypeResync, TraceLvlAll,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_ALL,
dev_info(DEV, "drbd_rs_complete_io: sector=%llus (rs_enr=%d)\n",
(long long)sector, enr);
);
@@ -1336,13 +1336,13 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
*/
void drbd_rs_cancel_all(struct drbd_conf *mdev)
{
- MTRACE(TraceTypeResync, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_METRICS,
dev_info(DEV, "drbd_rs_cancel_all\n");
);
spin_lock_irq(&mdev->al_lock);
- if (inc_local_if_state(mdev, Failed)) { /* Makes sure ->resync is there. */
+ if (inc_local_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
lc_reset(mdev->resync);
dec_local(mdev);
}
@@ -1363,13 +1363,13 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
struct bm_extent *bm_ext;
int i;
- MTRACE(TraceTypeResync, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_METRICS,
dev_info(DEV, "drbd_rs_del_all\n");
);
spin_lock_irq(&mdev->al_lock);
- if (inc_local_if_state(mdev, Failed)) {
+ if (inc_local_if_state(mdev, D_FAILED)) {
/* ok, ->resync is there. */
for (i = 0; i < mdev->resync->nr_elements; i++) {
bm_ext = (struct bm_extent *) lc_entry(mdev->resync, i);
@@ -1406,7 +1406,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
/* Record information on a failure to resync the specified blocks
*
- * called on SyncTarget when resync write fails or NegRSDReply received
+ * called on C_SYNC_TARGET when resync write fails or P_NEG_RS_DREPLY received
*
*/
void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
@@ -1417,7 +1417,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
sector_t esector, nr_sectors;
int wake_up = 0;
- MTRACE(TraceTypeResync, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_SUMMARY,
dev_info(DEV, "drbd_rs_failed_io: sector=%llus, size=%u\n",
(unsigned long long)sector, size);
);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index d8e6d493012e..e602c778e712 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -580,7 +580,7 @@ unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
unsigned long flags;
/* if I don't have a disk, I don't know about out-of-sync status */
- if (!inc_local_if_state(mdev, Negotiating))
+ if (!inc_local_if_state(mdev, D_NEGOTIATING))
return 0;
ERR_IF(!b) return 0;
@@ -842,7 +842,7 @@ STATIC int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
drbd_blk_run_queue(bdev_get_queue(mdev->bc->md_bdev));
wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
- MTRACE(TraceTypeMDIO, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_MD_IO, TRACE_LVL_SUMMARY,
dev_info(DEV, "%s of bitmap took %lu jiffies\n",
rw == READ ? "reading" : "writing", jiffies - now);
);
diff --git a/drivers/block/drbd/drbd_buildtag.c b/drivers/block/drbd/drbd_buildtag.c
index a057f0a3d098..617078b3dc33 100644
--- a/drivers/block/drbd/drbd_buildtag.c
+++ b/drivers/block/drbd/drbd_buildtag.c
@@ -2,6 +2,6 @@
#include <linux/drbd_config.h>
const char *drbd_buildtag(void)
{
- return "GIT-hash: ae6080852f8359c8c175f90c3f3daa01409e1d1c drbd/linux/drbd.h"
- " build by phil@fat-tyre, 2009-04-17 15:14:48";
+ return "GIT-hash: 1a59b007715215697968cfaed3f2f159d262c030 drbd/drbd_nl.c"
+ " build by phil@fat-tyre, 2009-04-22 11:36:29";
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 1b51051e2fcf..94138cc08943 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -181,116 +181,113 @@ extern struct ratelimit_state drbd_ratelimit_state;
* on the wire
*********************************************************************/
-enum Drbd_Packet_Cmd {
+enum drbd_packets {
/* receiver (data socket) */
- Data = 0x00,
- DataReply = 0x01, /* Response to DataRequest */
- RSDataReply = 0x02, /* Response to RSDataRequest */
- Barrier = 0x03,
- ReportBitMap = 0x04,
- BecomeSyncTarget = 0x05,
- BecomeSyncSource = 0x06,
- UnplugRemote = 0x07, /* Used at various times to hint the peer */
- DataRequest = 0x08, /* Used to ask for a data block */
- RSDataRequest = 0x09, /* Used to ask for a data block for resync */
- SyncParam = 0x0a,
- ReportProtocol = 0x0b,
- ReportUUIDs = 0x0c,
- ReportSizes = 0x0d,
- ReportState = 0x0e,
- ReportSyncUUID = 0x0f,
- AuthChallenge = 0x10,
- AuthResponse = 0x11,
- StateChgRequest = 0x12,
+ P_DATA = 0x00,
+ P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
+ P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */
+ P_BARRIER = 0x03,
+ P_BITMAP = 0x04,
+ P_BECOME_SYNC_TARGET = 0x05,
+ P_BECOME_SYNC_SOURCE = 0x06,
+ P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */
+ P_DATA_REQUEST = 0x08, /* Used to ask for a data block */
+ P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */
+ P_SYNC_PARAM = 0x0a,
+ P_PROTOCOL = 0x0b,
+ P_UUIDS = 0x0c,
+ P_SIZES = 0x0d,
+ P_STATE = 0x0e,
+ P_SYNC_UUID = 0x0f,
+ P_AUTH_CHALLENGE = 0x10,
+ P_AUTH_RESPONSE = 0x11,
+ P_STATE_CHG_REQ = 0x12,
/* asender (meta socket */
- Ping = 0x13,
- PingAck = 0x14,
- RecvAck = 0x15, /* Used in protocol B */
- WriteAck = 0x16, /* Used in protocol C */
- RSWriteAck = 0x17, /* Is a WriteAck, additionally call set_in_sync(). */
- DiscardAck = 0x18, /* Used in proto C, two-primaries conflict detection */
- NegAck = 0x19, /* Sent if local disk is unusable */
- NegDReply = 0x1a, /* Local disk is broken... */
- NegRSDReply = 0x1b, /* Local disk is broken... */
- BarrierAck = 0x1c,
- StateChgReply = 0x1d,
+ P_PING = 0x13,
+ P_PING_ACK = 0x14,
+ P_RECV_ACK = 0x15, /* Used in protocol B */
+ P_WRITE_ACK = 0x16, /* Used in protocol C */
+ P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
+ P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */
+ P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
+ P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
+ P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
+ P_BARRIER_ACK = 0x1c,
+ P_STATE_CHG_REPLY = 0x1d,
/* "new" commands, no longer fitting into the ordering scheme above */
- OVRequest = 0x1e, /* data socket */
- OVReply = 0x1f,
- OVResult = 0x20, /* meta socket */
- CsumRSRequest = 0x21, /* data socket */
- RSIsInSync = 0x22, /* meta socket */
- SyncParam89 = 0x23, /* data socket, protocol version 89 replacement for SyncParam */
- ReportCBitMap = 0x24, /* compressed or otherwise encoded bitmap transfer */
+ P_OV_REQUEST = 0x1e, /* data socket */
+ P_OV_REPLY = 0x1f,
+ P_OV_RESULT = 0x20, /* meta socket */
+ P_CSUM_RS_REQUEST = 0x21, /* data socket */
+ P_RS_IS_IN_SYNC = 0x22, /* meta socket */
+ P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
+ P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
- MAX_CMD = 0x25,
- MayIgnore = 0x100, /* Flag to test if (cmd > MayIgnore) ... */
- MAX_OPT_CMD = 0x101,
+ P_MAX_CMD = 0x25,
+ P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
+ P_MAX_OPT_CMD = 0x101,
/* special command ids for handshake */
- HandShakeM = 0xfff1, /* First Packet on the MetaSock */
- HandShakeS = 0xfff2, /* First Packet on the Socket */
+ P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */
+ P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */
- HandShake = 0xfffe /* FIXED for the next century! */
+ P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */
};
-static inline const char *cmdname(enum Drbd_Packet_Cmd cmd)
+static inline const char *cmdname(enum drbd_packets cmd)
{
/* THINK may need to become several global tables
* when we want to support more than
* one PRO_VERSION */
static const char *cmdnames[] = {
- [Data] = "Data",
- [DataReply] = "DataReply",
- [RSDataReply] = "RSDataReply",
- [Barrier] = "Barrier",
- [ReportBitMap] = "ReportBitMap",
- [BecomeSyncTarget] = "BecomeSyncTarget",
- [BecomeSyncSource] = "BecomeSyncSource",
- [UnplugRemote] = "UnplugRemote",
- [DataRequest] = "DataRequest",
- [RSDataRequest] = "RSDataRequest",
- [SyncParam] = "SyncParam",
- [SyncParam89] = "SyncParam89",
- [ReportProtocol] = "ReportProtocol",
- [ReportUUIDs] = "ReportUUIDs",
- [ReportSizes] = "ReportSizes",
- [ReportState] = "ReportState",
- [ReportSyncUUID] = "ReportSyncUUID",
- [AuthChallenge] = "AuthChallenge",
- [AuthResponse] = "AuthResponse",
- [Ping] = "Ping",
- [PingAck] = "PingAck",
- [RecvAck] = "RecvAck",
- [WriteAck] = "WriteAck",
- [RSWriteAck] = "RSWriteAck",
- [DiscardAck] = "DiscardAck",
- [NegAck] = "NegAck",
- [NegDReply] = "NegDReply",
- [NegRSDReply] = "NegRSDReply",
- [BarrierAck] = "BarrierAck",
- [StateChgRequest] = "StateChgRequest",
- [StateChgReply] = "StateChgReply",
- [OVRequest] = "OVRequest",
- [OVReply] = "OVReply",
- [OVResult] = "OVResult",
- [CsumRSRequest] = "CsumRSRequest",
- [RSIsInSync] = "RSIsInSync",
- [ReportCBitMap] = "ReportCBitMap",
- [MAX_CMD] = NULL,
+ [P_DATA] = "Data",
+ [P_DATA_REPLY] = "DataReply",
+ [P_RS_DATA_REPLY] = "RSDataReply",
+ [P_BARRIER] = "Barrier",
+ [P_BITMAP] = "ReportBitMap",
+ [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
+ [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
+ [P_UNPLUG_REMOTE] = "UnplugRemote",
+ [P_DATA_REQUEST] = "DataRequest",
+ [P_RS_DATA_REQUEST] = "RSDataRequest",
+ [P_SYNC_PARAM] = "SyncParam",
+ [P_SYNC_PARAM89] = "SyncParam89",
+ [P_PROTOCOL] = "ReportProtocol",
+ [P_UUIDS] = "ReportUUIDs",
+ [P_SIZES] = "ReportSizes",
+ [P_STATE] = "ReportState",
+ [P_SYNC_UUID] = "ReportSyncUUID",
+ [P_AUTH_CHALLENGE] = "AuthChallenge",
+ [P_AUTH_RESPONSE] = "AuthResponse",
+ [P_PING] = "Ping",
+ [P_PING_ACK] = "PingAck",
+ [P_RECV_ACK] = "RecvAck",
+ [P_WRITE_ACK] = "WriteAck",
+ [P_RS_WRITE_ACK] = "RSWriteAck",
+ [P_DISCARD_ACK] = "DiscardAck",
+ [P_NEG_ACK] = "NegAck",
+ [P_NEG_DREPLY] = "NegDReply",
+ [P_NEG_RS_DREPLY] = "NegRSDReply",
+ [P_BARRIER_ACK] = "BarrierAck",
+ [P_STATE_CHG_REQ] = "StateChgRequest",
+ [P_STATE_CHG_REPLY] = "StateChgReply",
+ [P_OV_REQUEST] = "OVRequest",
+ [P_OV_REPLY] = "OVReply",
+ [P_OV_RESULT] = "OVResult",
+ [P_MAX_CMD] = NULL,
};
- if (cmd == HandShakeM)
+ if (cmd == P_HAND_SHAKE_M)
return "HandShakeM";
- if (cmd == HandShakeS)
+ if (cmd == P_HAND_SHAKE_S)
return "HandShakeS";
- if (cmd == HandShake)
+ if (cmd == P_HAND_SHAKE)
return "HandShake";
- if (cmd >= MAX_CMD)
+ if (cmd >= P_MAX_CMD)
return "Unknown";
return cmdnames[cmd];
}
@@ -308,7 +305,7 @@ struct bm_xfer_ctx {
unsigned long bit_offset;
unsigned long word_offset;
- /* statistics; index: (h->command == ReportBitMap) */
+ /* statistics; index: (h->command == P_BITMAP) */
unsigned packets[2];
unsigned bytes[2];
};
@@ -345,7 +342,7 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
* NOTE that the payload starts at a long aligned offset,
* regardless of 32 or 64 bit arch!
*/
-struct Drbd_Header {
+struct p_header {
u32 magic;
u16 command;
u16 length; /* bytes of data after this header */
@@ -354,19 +351,19 @@ struct Drbd_Header {
/* 8 bytes. packet FIXED for the next century! */
/*
- * short commands, packets without payload, plain Drbd_Header:
- * Ping
- * PingAck
- * BecomeSyncTarget
- * BecomeSyncSource
- * UnplugRemote
+ * short commands, packets without payload, plain p_header:
+ * P_PING
+ * P_PING_ACK
+ * P_BECOME_SYNC_TARGET
+ * P_BECOME_SYNC_SOURCE
+ * P_UNPLUG_REMOTE
*/
/*
* commands with out-of-struct payload:
- * ReportBitMap (no additional fields)
- * Data, DataReply (see Drbd_Data_Packet)
- * ReportCBitMap (see receive_compressed_bitmap)
+ * P_BITMAP (no additional fields)
+ * P_DATA, P_DATA_REPLY (see p_data)
+ * P_COMPRESSED_BITMAP (see receive_compressed_bitmap)
*/
/* these defines must not be changed without changing the protocol version */
@@ -374,8 +371,8 @@ struct Drbd_Header {
#define DP_RW_SYNC 2
#define DP_MAY_SET_IN_SYNC 4
-struct Drbd_Data_Packet {
- struct Drbd_Header head;
+struct p_data {
+ struct p_header head;
u64 sector; /* 64 bits sector number */
u64 block_id; /* to identify the request in protocol B&C */
u32 seq_num;
@@ -384,14 +381,14 @@ struct Drbd_Data_Packet {
/*
* commands which share a struct:
- * Drbd_BlockAck_Packet:
- * RecvAck (proto B), WriteAck (proto C),
- * DiscardAck (proto C, two-primaries conflict detection)
- * Drbd_BlockRequest_Packet:
- * DataRequest, RSDataRequest
+ * p_block_ack:
+ * P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
+ * P_DISCARD_ACK (proto C, two-primaries conflict detection)
+ * p_block_req:
+ * P_DATA_REQUEST, P_RS_DATA_REQUEST
*/
-struct Drbd_BlockAck_Packet {
- struct Drbd_Header head;
+struct p_block_ack {
+ struct p_header head;
u64 sector;
u64 block_id;
u32 blksize;
@@ -399,8 +396,8 @@ struct Drbd_BlockAck_Packet {
} __attribute((packed));
-struct Drbd_BlockRequest_Packet {
- struct Drbd_Header head;
+struct p_block_req {
+ struct p_header head;
u64 sector;
u64 block_id;
u32 blksize;
@@ -409,15 +406,15 @@ struct Drbd_BlockRequest_Packet {
/*
* commands with their own struct for additional fields:
- * HandShake
- * Barrier
- * BarrierAck
- * SyncParam
+ * P_HAND_SHAKE
+ * P_BARRIER
+ * P_BARRIER_ACK
+ * P_SYNC_PARAM
* ReportParams
*/
-struct Drbd_HandShake_Packet {
- struct Drbd_Header head; /* 8 bytes */
+struct p_handshake {
+ struct p_header head; /* 8 bytes */
u32 protocol_min;
u32 feature_flags;
u32 protocol_max;
@@ -431,36 +428,36 @@ struct Drbd_HandShake_Packet {
} __attribute((packed));
/* 80 bytes, FIXED for the next century */
-struct Drbd_Barrier_Packet {
- struct Drbd_Header head;
+struct p_barrier {
+ struct p_header head;
u32 barrier; /* barrier number _handle_ only */
u32 pad; /* to multiple of 8 Byte */
} __attribute((packed));
-struct Drbd_BarrierAck_Packet {
- struct Drbd_Header head;
+struct p_barrier_ack {
+ struct p_header head;
u32 barrier;
u32 set_size;
} __attribute((packed));
-struct Drbd_SyncParam_Packet {
- struct Drbd_Header head;
+struct p_rs_param {
+ struct p_header head;
u32 rate;
/* Since protocol version 88 and higher. */
char verify_alg[0];
} __attribute((packed));
-struct Drbd_SyncParam89_Packet {
- struct Drbd_Header head;
+struct p_rs_param_89 {
+ struct p_header head;
u32 rate;
/* protocol version 89: */
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
} __attribute((packed));
-struct Drbd_Protocol_Packet {
- struct Drbd_Header head;
+struct p_protocol {
+ struct p_header head;
u32 protocol;
u32 after_sb_0p;
u32 after_sb_1p;
@@ -473,18 +470,18 @@ struct Drbd_Protocol_Packet {
} __attribute((packed));
-struct Drbd_GenCnt_Packet {
- struct Drbd_Header head;
- u64 uuid[EXT_UUID_SIZE];
+struct p_uuids {
+ struct p_header head;
+ u64 uuid[UI_EXTENDED_SIZE];
} __attribute((packed));
-struct Drbd_SyncUUID_Packet {
- struct Drbd_Header head;
+struct p_rs_uuid {
+ struct p_header head;
u64 uuid;
} __attribute((packed));
-struct Drbd_Sizes_Packet {
- struct Drbd_Header head;
+struct p_sizes {
+ struct p_header head;
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
@@ -492,23 +489,23 @@ struct Drbd_Sizes_Packet {
u32 queue_order_type;
} __attribute((packed));
-struct Drbd_State_Packet {
- struct Drbd_Header head;
+struct p_state {
+ struct p_header head;
u32 state;
} __attribute((packed));
-struct Drbd_Req_State_Packet {
- struct Drbd_Header head;
+struct p_req_state {
+ struct p_header head;
u32 mask;
u32 val;
} __attribute((packed));
-struct Drbd_RqS_Reply_Packet {
- struct Drbd_Header head;
+struct p_req_state_reply {
+ struct p_header head;
u32 retcode;
} __attribute((packed));
-struct Drbd06_Parameter_P {
+struct p_drbd06_param {
u64 size;
u32 state;
u32 blksize;
@@ -518,8 +515,8 @@ struct Drbd06_Parameter_P {
u32 bit_map_gen[5];
} __attribute((packed));
-struct Drbd_Discard_Packet {
- struct Drbd_Header head;
+struct p_discard {
+ struct p_header head;
u64 block_id;
u32 seq_num;
u32 pad;
@@ -527,7 +524,7 @@ struct Drbd_Discard_Packet {
/* Valid values for the encoding field.
* Bump proto version when changing this. */
-enum Drbd_bitmap_code {
+enum drbd_bitmap_code {
RLE_VLI_Bytes = 0,
RLE_VLI_BitsFibD_0_1 = 1,
RLE_VLI_BitsFibD_1_1 = 2,
@@ -536,9 +533,9 @@ enum Drbd_bitmap_code {
RLE_VLI_BitsFibD_3_5 = 5,
};
-struct Drbd_Compressed_Bitmap_Packet {
- struct Drbd_Header head;
- /* (encoding & 0x0f): actual encoding, see enum Drbd_bitmap_code
+struct p_compressed_bm {
+ struct p_header head;
+ /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
* (encoding & 0x80): polarity (set/unset) of first runlength
* ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
* used to pad up to head.length bytes
@@ -548,93 +545,93 @@ struct Drbd_Compressed_Bitmap_Packet {
u8 code[0];
} __attribute((packed));
-static inline enum Drbd_bitmap_code
-DCBP_get_code(struct Drbd_Compressed_Bitmap_Packet *p)
+static inline enum drbd_bitmap_code
+DCBP_get_code(struct p_compressed_bm *p)
{
- return (enum Drbd_bitmap_code)(p->encoding & 0x0f);
+ return (enum drbd_bitmap_code)(p->encoding & 0x0f);
}
static inline void
-DCBP_set_code(struct Drbd_Compressed_Bitmap_Packet *p, enum Drbd_bitmap_code code)
+DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
{
BUG_ON(code & ~0xf);
p->encoding = (p->encoding & ~0xf) | code;
}
static inline int
-DCBP_get_start(struct Drbd_Compressed_Bitmap_Packet *p)
+DCBP_get_start(struct p_compressed_bm *p)
{
return (p->encoding & 0x80) != 0;
}
static inline void
-DCBP_set_start(struct Drbd_Compressed_Bitmap_Packet *p, int set)
+DCBP_set_start(struct p_compressed_bm *p, int set)
{
p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
}
static inline int
-DCBP_get_pad_bits(struct Drbd_Compressed_Bitmap_Packet *p)
+DCBP_get_pad_bits(struct p_compressed_bm *p)
{
return (p->encoding >> 4) & 0x7;
}
static inline void
-DCBP_set_pad_bits(struct Drbd_Compressed_Bitmap_Packet *p, int n)
+DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
{
BUG_ON(n & ~0x7);
p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
}
-/* one bitmap packet, including the Drbd_Header,
+/* one bitmap packet, including the p_header,
* should fit within one _architecture independend_ page.
* so we need to use the fixed size 4KiB page size
* most architechtures have used for a long time.
*/
-#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct Drbd_Header))
+#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header))
#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
-#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct Drbd_Compressed_Bitmap_Packet))
+#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
#if (PAGE_SIZE < 4096)
/* drbd_send_bitmap / receive_bitmap would break horribly */
#error "PAGE_SIZE too small"
#endif
-union Drbd_Polymorph_Packet {
- struct Drbd_Header head;
- struct Drbd_HandShake_Packet HandShake;
- struct Drbd_Data_Packet Data;
- struct Drbd_BlockAck_Packet BlockAck;
- struct Drbd_Barrier_Packet Barrier;
- struct Drbd_BarrierAck_Packet BarrierAck;
- struct Drbd_SyncParam89_Packet SyncParam89;
- struct Drbd_Protocol_Packet Protocol;
- struct Drbd_Sizes_Packet Sizes;
- struct Drbd_GenCnt_Packet GenCnt;
- struct Drbd_State_Packet State;
- struct Drbd_Req_State_Packet ReqState;
- struct Drbd_RqS_Reply_Packet RqSReply;
- struct Drbd_BlockRequest_Packet BlockRequest;
+union p_polymorph {
+ struct p_header header;
+ struct p_handshake handshake;
+ struct p_data data;
+ struct p_block_ack block_ack;
+ struct p_barrier barrier;
+ struct p_barrier_ack barrier_ack;
+ struct p_rs_param_89 rs_param_89;
+ struct p_protocol protocol;
+ struct p_sizes sizes;
+ struct p_uuids uuids;
+ struct p_state state;
+ struct p_req_state req_state;
+ struct p_req_state_reply req_state_reply;
+ struct p_block_req block_req;
} __attribute((packed));
/**********************************************************************/
-enum Drbd_thread_state {
+enum drbd_thread_state {
None,
Running,
Exiting,
Restarting
};
-struct Drbd_thread {
+struct drbd_thread {
spinlock_t t_lock;
struct task_struct *task;
struct completion stop;
- enum Drbd_thread_state t_state;
- int (*function) (struct Drbd_thread *);
+ enum drbd_thread_state t_state;
+ int (*function) (struct drbd_thread *);
struct drbd_conf *mdev;
int reset_cpu_mask;
};
-static inline enum Drbd_thread_state get_t_state(struct Drbd_thread *thi)
+static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
{
/* THINK testing the t_state seems to be uncritical in all cases
* (but thread_{start,stop}), so we can read it *without* the lock.
@@ -649,7 +646,7 @@ static inline enum Drbd_thread_state get_t_state(struct Drbd_thread *thi)
* Having this as the first member of a struct provides sort of "inheritance".
* "derived" structs can be "drbd_queue_work()"ed.
* The callback should know and cast back to the descendant struct.
- * drbd_request and Tl_epoch_entry are descendants of drbd_work.
+ * drbd_request and drbd_epoch_entry are descendants of drbd_work.
*/
struct drbd_work;
typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
@@ -658,7 +655,7 @@ struct drbd_work {
drbd_work_cb cb;
};
-struct drbd_barrier;
+struct drbd_tl_epoch;
struct drbd_request {
struct drbd_work w;
struct drbd_conf *mdev;
@@ -673,7 +670,7 @@ struct drbd_request {
* starting a new epoch...
*/
- /* up to here, the struct layout is identical to Tl_epoch_entry;
+ /* up to here, the struct layout is identical to drbd_epoch_entry;
* we might be able to use that to our advantage... */
struct list_head tl_requests; /* ring list in the transfer log */
@@ -683,10 +680,10 @@ struct drbd_request {
unsigned long start_time;
};
-struct drbd_barrier {
+struct drbd_tl_epoch {
struct drbd_work w;
struct list_head requests; /* requests before */
- struct drbd_barrier *next; /* pointer to the next barrier */
+ struct drbd_tl_epoch *next; /* pointer to the next barrier */
unsigned int br_number; /* the barriers identifier. */
int n_req; /* number of requests attached before this barrier */
};
@@ -696,8 +693,8 @@ struct drbd_request;
/* These Tl_epoch_entries may be in one of 6 lists:
active_ee .. data packet being written
sync_ee .. syncer block being written
- done_ee .. block written, need to send WriteAck
- read_ee .. [RS]DataRequest being read
+ done_ee .. block written, need to send P_WRITE_ACK
+ read_ee .. [RS]P_DATA_REQUEST being read
*/
struct drbd_epoch {
@@ -717,7 +714,7 @@ enum {
DE_IS_FINISHING,
};
-struct Tl_epoch_entry {
+struct drbd_epoch_entry {
struct drbd_work w;
struct drbd_conf *mdev;
struct bio *private_bio;
@@ -752,7 +749,7 @@ enum {
/* global flag bits */
enum {
- CREATE_BARRIER, /* next Data is preceeded by a Barrier */
+ CREATE_BARRIER, /* next P_DATA is preceeded by a P_BARRIER */
SIGNAL_ASENDER, /* whether asender wants to be interrupted */
SEND_PING, /* whether asender should send a ping asap */
WORK_PENDING, /* completion flag for drbd_disconnect */
@@ -767,7 +764,7 @@ enum {
CL_ST_CHG_FAIL,
CRASHED_PRIMARY, /* This node was a crashed primary.
* Gets cleared when the state.conn
- * goes into Connected state. */
+ * goes into C_CONNECTED state. */
WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
CONSIDER_RESYNC,
@@ -806,15 +803,15 @@ struct drbd_socket {
struct socket *socket;
/* this way we get our
* send/receive buffers off the stack */
- union Drbd_Polymorph_Packet sbuf;
- union Drbd_Polymorph_Packet rbuf;
+ union p_polymorph sbuf;
+ union p_polymorph rbuf;
};
struct drbd_md {
u64 md_offset; /* sector offset to 'super' block */
u64 la_size_sect; /* last agreed size, unit sectors */
- u64 uuid[UUID_SIZE];
+ u64 uuid[UI_SIZE];
u64 device_uuid;
u32 flags;
u32 md_size_sect;
@@ -892,9 +889,9 @@ struct drbd_conf {
struct timer_list md_sync_timer;
/* Used after attach while negotiating new disk state. */
- union drbd_state_t new_state_tmp;
+ union drbd_state new_state_tmp;
- union drbd_state_t state;
+ union drbd_state state;
wait_queue_head_t misc_wait;
wait_queue_head_t state_wait; /* upon each state change. */
unsigned int send_cnt;
@@ -910,9 +907,9 @@ struct drbd_conf {
atomic_t local_cnt; /* Waiting for local completion */
atomic_t net_cnt; /* Users of net_conf */
spinlock_t req_lock;
- struct drbd_barrier *unused_spare_barrier; /* for pre-allocation */
- struct drbd_barrier *newest_barrier;
- struct drbd_barrier *oldest_barrier;
+ struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
+ struct drbd_tl_epoch *newest_tle;
+ struct drbd_tl_epoch *oldest_tle;
struct list_head out_of_sequence_requests;
struct hlist_head *tl_hash;
unsigned int tl_hash_s;
@@ -940,9 +937,9 @@ struct drbd_conf {
struct crypto_hash *csums_tfm;
struct crypto_hash *verify_tfm;
- struct Drbd_thread receiver;
- struct Drbd_thread worker;
- struct Drbd_thread asender;
+ struct drbd_thread receiver;
+ struct drbd_thread worker;
+ struct drbd_thread asender;
struct drbd_bitmap *bitmap;
unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
@@ -968,7 +965,7 @@ struct drbd_conf {
unsigned int ee_hash_s;
/* this one is protected by ee_lock, single thread */
- struct Tl_epoch_entry *last_write_w_barrier;
+ struct drbd_epoch_entry *last_write_w_barrier;
int next_barrier_nr;
struct hlist_head *app_reads_hash; /* is proteced by req_lock */
@@ -1049,26 +1046,26 @@ static inline void drbd_put_data_sock(struct drbd_conf *mdev)
/* drbd_main.c */
enum chg_state_flags {
- ChgStateHard = 1,
- ChgStateVerbose = 2,
- ChgWaitComplete = 4,
- ChgSerialize = 8,
- ChgOrdered = ChgWaitComplete + ChgSerialize,
+ CS_HARD = 1,
+ CS_VERBOSE = 2,
+ CS_WAIT_COMPLETE = 4,
+ CS_SERIALIZE = 8,
+ CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
};
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
- union drbd_state_t mask, union drbd_state_t val);
-extern void drbd_force_state(struct drbd_conf *, union drbd_state_t,
- union drbd_state_t);
-extern int _drbd_request_state(struct drbd_conf *, union drbd_state_t,
- union drbd_state_t, enum chg_state_flags);
-extern int __drbd_set_state(struct drbd_conf *, union drbd_state_t,
+ union drbd_state mask, union drbd_state val);
+extern void drbd_force_state(struct drbd_conf *, union drbd_state,
+ union drbd_state);
+extern int _drbd_request_state(struct drbd_conf *, union drbd_state,
+ union drbd_state, enum chg_state_flags);
+extern int __drbd_set_state(struct drbd_conf *, union drbd_state,
enum chg_state_flags, struct completion *done);
-extern void print_st_err(struct drbd_conf *, union drbd_state_t,
- union drbd_state_t, int);
-extern int drbd_thread_start(struct Drbd_thread *thi);
-extern void _drbd_thread_stop(struct Drbd_thread *thi, int restart, int wait);
+extern void print_st_err(struct drbd_conf *, union drbd_state,
+ union drbd_state, int);
+extern int drbd_thread_start(struct drbd_thread *thi);
+extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
#ifdef CONFIG_SMP
extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev);
extern cpumask_t drbd_calc_cpu_mask(struct drbd_conf *mdev);
@@ -1080,7 +1077,7 @@ extern void drbd_free_resources(struct drbd_conf *mdev);
extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
unsigned int set_size);
extern void tl_clear(struct drbd_conf *mdev);
-extern void _tl_add_barrier(struct drbd_conf *, struct drbd_barrier *);
+extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
extern void drbd_free_sock(struct drbd_conf *mdev);
extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
void *buf, size_t size, unsigned msg_flags);
@@ -1092,39 +1089,39 @@ extern int drbd_send_sizes(struct drbd_conf *mdev);
extern int _drbd_send_state(struct drbd_conf *mdev);
extern int drbd_send_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum Drbd_Packet_Cmd cmd, struct Drbd_Header *h,
+ enum drbd_packets cmd, struct p_header *h,
size_t size, unsigned msg_flags);
#define USE_DATA_SOCKET 1
#define USE_META_SOCKET 0
extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum Drbd_Packet_Cmd cmd, struct Drbd_Header *h,
+ enum drbd_packets cmd, struct p_header *h,
size_t size);
-extern int drbd_send_cmd2(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
+extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
char *data, size_t size);
extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
u32 set_size);
-extern int drbd_send_ack(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
- struct Tl_epoch_entry *e);
-extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
- struct Drbd_BlockRequest_Packet *rp);
-extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
- struct Drbd_Data_Packet *dp);
-extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
+extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
+ struct drbd_epoch_entry *e);
+extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
+ struct p_block_req *rp);
+extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
+ struct p_data *dp);
+extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
sector_t sector, int blksize, u64 block_id);
extern int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
int offset, size_t size);
-extern int drbd_send_block(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
- struct Tl_epoch_entry *e);
+extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
+ struct drbd_epoch_entry *e);
extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
extern int _drbd_send_barrier(struct drbd_conf *mdev,
- struct drbd_barrier *barrier);
+ struct drbd_tl_epoch *barrier);
extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id);
extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
sector_t sector,int size,
void *digest, int digest_size,
- enum Drbd_Packet_Cmd cmd);
+ enum drbd_packets cmd);
extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
extern int drbd_send_bitmap(struct drbd_conf *mdev);
@@ -1356,25 +1353,25 @@ extern int trace_devs;
extern int trace_level;
enum {
- TraceLvlAlways = 0,
- TraceLvlSummary,
- TraceLvlMetrics,
- TraceLvlAll,
- TraceLvlMax
+ TRACE_LVL_ALWAYS = 0,
+ TRACE_LVL_SUMMARY,
+ TRACE_LVL_METRICS,
+ TRACE_LVL_ALL,
+ TRACE_LVL_MAX
};
enum {
- TraceTypePacket = 0x00000001,
- TraceTypeRq = 0x00000002,
- TraceTypeUuid = 0x00000004,
- TraceTypeResync = 0x00000008,
- TraceTypeEE = 0x00000010,
- TraceTypeUnplug = 0x00000020,
- TraceTypeNl = 0x00000040,
- TraceTypeALExts = 0x00000080,
- TraceTypeIntRq = 0x00000100,
- TraceTypeMDIO = 0x00000200,
- TraceTypeEpochs = 0x00000400,
+ TRACE_TYPE_PACKET = 0x00000001,
+ TRACE_TYPE_RQ = 0x00000002,
+ TRACE_TYPE_UUID = 0x00000004,
+ TRACE_TYPE_RESYNC = 0x00000008,
+ TRACE_TYPE_EE = 0x00000010,
+ TRACE_TYPE_UNPLUG = 0x00000020,
+ TRACE_TYPE_NL = 0x00000040,
+ TRACE_TYPE_AL_EXTS = 0x00000080,
+ TRACE_TYPE_INT_RQ = 0x00000100,
+ TRACE_TYPE_MD_IO = 0x00000200,
+ TRACE_TYPE_EPOCHS = 0x00000400,
};
static inline int
@@ -1423,28 +1420,28 @@ extern void _dump_bio(const char *pfx, struct drbd_conf *mdev, struct bio *bio,
static inline void dump_bio(struct drbd_conf *mdev,
struct bio *bio, int complete, struct drbd_request *r)
{
- MTRACE(TraceTypeRq, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_RQ, TRACE_LVL_SUMMARY,
_dump_bio("Rq", mdev, bio, complete, r);
);
}
static inline void dump_internal_bio(const char *pfx, struct drbd_conf *mdev, struct bio *bio, int complete)
{
- MTRACE(TraceTypeIntRq, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_INT_RQ, TRACE_LVL_SUMMARY,
_dump_bio(pfx, mdev, bio, complete, NULL);
);
}
/* Packet dumping support */
extern void _dump_packet(struct drbd_conf *mdev, struct socket *sock,
- int recv, union Drbd_Polymorph_Packet *p,
+ int recv, union p_polymorph *p,
char *file, int line);
static inline void
dump_packet(struct drbd_conf *mdev, struct socket *sock,
- int recv, union Drbd_Polymorph_Packet *p, char *file, int line)
+ int recv, union p_polymorph *p, char *file, int line)
{
- MTRACE(TraceTypePacket, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_PACKET, TRACE_LVL_SUMMARY,
_dump_packet(mdev, sock, recv, p, file, line);
);
}
@@ -1472,8 +1469,8 @@ extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
extern sector_t drbd_new_dev_size(struct drbd_conf *,
struct drbd_backing_dev *);
-enum determin_dev_size_enum { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determin_dev_size_enum drbd_determin_dev_size(struct drbd_conf *) __must_hold(local);
+enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
@@ -1482,7 +1479,7 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
/* drbd_worker.c */
-extern int drbd_worker(struct Drbd_thread *thi);
+extern int drbd_worker(struct drbd_thread *thi);
extern void drbd_alter_sa(struct drbd_conf *mdev, int na);
extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
extern void resume_next_sg(struct drbd_conf *mdev);
@@ -1529,12 +1526,12 @@ extern void resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
-extern struct Tl_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
+extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
u64 id,
sector_t sector,
unsigned int data_size,
gfp_t gfp_mask) __must_hold(local);
-extern void drbd_free_ee(struct drbd_conf *mdev, struct Tl_epoch_entry *e);
+extern void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e);
extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
struct list_head *head);
extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
@@ -1620,12 +1617,12 @@ extern void drbd_al_shrink(struct drbd_conf *mdev);
void drbd_nl_cleanup(void);
int __init drbd_nl_init(void);
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state_t);
+void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state);
void drbd_bcast_sync_progress(struct drbd_conf *mdev);
void drbd_bcast_ee(struct drbd_conf *mdev,
const char *reason, const int dgs,
const char* seen_hash, const char* calc_hash,
- const struct Tl_epoch_entry* e);
+ const struct drbd_epoch_entry* e);
/** DRBD State macros:
@@ -1640,37 +1637,40 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
* Besides the basic forms NS() and _NS() additional _?NS[23] are defined
* to express state changes that affect more than one aspect of the state.
*
- * E.g. NS2(conn, Connected, peer, Secondary)
+ * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
* Means that the network connection was established and that the peer
* is in secondary role.
*/
-#define peer_mask role_mask
-#define pdsk_mask disk_mask
-#define susp_mask 1
-#define user_isp_mask 1
-#define aftr_isp_mask 1
+#define role_MASK R_MASK
+#define peer_MASK R_MASK
+#define disk_MASK D_MASK
+#define pdsk_MASK D_MASK
+#define conn_MASK C_MASK
+#define susp_MASK 1
+#define user_isp_MASK 1
+#define aftr_isp_MASK 1
#define NS(T, S) \
- ({ union drbd_state_t mask; mask.i = 0; mask.T = T##_mask; mask; }), \
- ({ union drbd_state_t val; val.i = 0; val.T = (S); val; })
+ ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T = (S); val; })
#define NS2(T1, S1, T2, S2) \
- ({ union drbd_state_t mask; mask.i = 0; mask.T1 = T1##_mask; \
- mask.T2 = T2##_mask; mask; }), \
- ({ union drbd_state_t val; val.i = 0; val.T1 = (S1); \
+ ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+ mask.T2 = T2##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
val.T2 = (S2); val; })
#define NS3(T1, S1, T2, S2, T3, S3) \
- ({ union drbd_state_t mask; mask.i = 0; mask.T1 = T1##_mask; \
- mask.T2 = T2##_mask; mask.T3 = T3##_mask; mask; }), \
- ({ union drbd_state_t val; val.i = 0; val.T1 = (S1); \
+ ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+ mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
val.T2 = (S2); val.T3 = (S3); val; })
#define _NS(D, T, S) \
- D, ({ union drbd_state_t __ns; __ns.i = D->state.i; __ns.T = (S); __ns; })
+ D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; })
#define _NS2(D, T1, S1, T2, S2) \
- D, ({ union drbd_state_t __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
+ D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
__ns.T2 = (S2); __ns; })
#define _NS3(D, T1, S1, T2, S2, T3, S3) \
- D, ({ union drbd_state_t __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
+ D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
__ns.T2 = (S2); __ns.T3 = (S3); __ns; })
/*
@@ -1690,7 +1690,7 @@ static inline void drbd_state_unlock(struct drbd_conf *mdev)
}
static inline int _drbd_set_state(struct drbd_conf *mdev,
- union drbd_state_t ns, enum chg_state_flags flags,
+ union drbd_state ns, enum chg_state_flags flags,
struct completion *done)
{
int rv;
@@ -1703,10 +1703,10 @@ static inline int _drbd_set_state(struct drbd_conf *mdev,
}
static inline int drbd_request_state(struct drbd_conf *mdev,
- union drbd_state_t mask,
- union drbd_state_t val)
+ union drbd_state mask,
+ union drbd_state val)
{
- return _drbd_request_state(mdev, mask, val, ChgStateVerbose + ChgOrdered);
+ return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
}
/**
@@ -1716,17 +1716,17 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
static inline void __drbd_chk_io_error(struct drbd_conf *mdev, int forcedetach)
{
switch (mdev->bc->dc.on_io_error) {
- case PassOn:
+ case EP_PASS_ON:
if (!forcedetach) {
if (printk_ratelimit())
dev_err(DEV, "Local IO failed. Passing error on...\n");
break;
}
/* NOTE fall through to detach case if forcedetach set */
- case Detach:
- case CallIOEHelper:
- if (mdev->state.disk > Failed) {
- _drbd_set_state(_NS(mdev, disk, Failed), ChgStateHard, NULL);
+ case EP_DETACH:
+ case EP_CALL_HELPER:
+ if (mdev->state.disk > D_FAILED) {
+ _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV, "Local IO failed. Detaching...\n");
}
break;
@@ -1878,35 +1878,35 @@ static inline void request_ping(struct drbd_conf *mdev)
}
static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
- enum Drbd_Packet_Cmd cmd)
+ enum drbd_packets cmd)
{
- struct Drbd_Header h;
+ struct p_header h;
return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
}
static inline int drbd_send_ping(struct drbd_conf *mdev)
{
- struct Drbd_Header h;
- return drbd_send_cmd(mdev, USE_META_SOCKET, Ping, &h, sizeof(h));
+ struct p_header h;
+ return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
}
static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
{
- struct Drbd_Header h;
- return drbd_send_cmd(mdev, USE_META_SOCKET, PingAck, &h, sizeof(h));
+ struct p_header h;
+ return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
}
-static inline void drbd_thread_stop(struct Drbd_thread *thi)
+static inline void drbd_thread_stop(struct drbd_thread *thi)
{
_drbd_thread_stop(thi, FALSE, TRUE);
}
-static inline void drbd_thread_stop_nowait(struct Drbd_thread *thi)
+static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
{
_drbd_thread_stop(thi, FALSE, FALSE);
}
-static inline void drbd_thread_restart_nowait(struct Drbd_thread *thi)
+static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
{
_drbd_thread_stop(thi, TRUE, FALSE);
}
@@ -1925,7 +1925,7 @@ static inline void drbd_thread_restart_nowait(struct Drbd_thread *thi)
* _req_mod(req, data_received)
* [from receive_DataReply]
* _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked)
- * [from got_BlockAck (WriteAck, RecvAck)]
+ * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
* for some reason it is NOT decreased in got_NegAck,
* but in the resulting cleanup code from report_params.
* we should try to remember the reason for that...
@@ -1952,9 +1952,9 @@ static inline void inc_ap_pending(struct drbd_conf *mdev)
/* counts how many resync-related answers we still expect from the peer
* increase decrease
- * SyncTarget sends RSDataRequest (and expects RSDataReply)
- * SyncSource sends RSDataReply (and expects WriteAck whith ID_SYNCER)
- * (or NegAck with ID_SYNCER)
+ * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
+ * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK whith ID_SYNCER)
+ * (or P_NEG_ACK with ID_SYNCER)
*/
static inline void inc_rs_pending(struct drbd_conf *mdev)
{
@@ -1969,11 +1969,11 @@ static inline void inc_rs_pending(struct drbd_conf *mdev)
/* counts how many answers we still need to send to the peer.
* increased on
* receive_Data unless protocol A;
- * we need to send a RecvAck (proto B)
- * or WriteAck (proto C)
- * receive_RSDataReply (recv_resync_read) we need to send a WriteAck
- * receive_DataRequest (receive_RSDataRequest) we need to send back Data
- * receive_Barrier_* we need to send a BarrierAck
+ * we need to send a P_RECV_ACK (proto B)
+ * or P_WRITE_ACK (proto C)
+ * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
+ * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
+ * receive_Barrier_* we need to send a P_BARRIER_ACK
*/
static inline void inc_unacked(struct drbd_conf *mdev)
{
@@ -2006,7 +2006,7 @@ static inline int inc_net(struct drbd_conf *mdev)
int have_net_conf;
atomic_inc(&mdev->net_cnt);
- have_net_conf = mdev->state.conn >= Unconnected;
+ have_net_conf = mdev->state.conn >= C_UNCONNECTED;
if (!have_net_conf)
dec_net(mdev);
return have_net_conf;
@@ -2017,7 +2017,7 @@ static inline int inc_net(struct drbd_conf *mdev)
* TRUE you should call dec_local() after IO is completed.
*/
#define inc_local_if_state(M,MINS) __cond_lock(local, _inc_local_if_state(M,MINS))
-#define inc_local(M) __cond_lock(local, _inc_local_if_state(M,Inconsistent))
+#define inc_local(M) __cond_lock(local, _inc_local_if_state(M,D_INCONSISTENT))
static inline void dec_local(struct drbd_conf *mdev)
{
@@ -2093,7 +2093,7 @@ static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
return mxb;
}
-static inline int drbd_state_is_stable(union drbd_state_t s)
+static inline int drbd_state_is_stable(union drbd_state s)
{
/* DO NOT add a default clause, we want the compiler to warn us
@@ -2101,54 +2101,54 @@ static inline int drbd_state_is_stable(union drbd_state_t s)
switch ((enum drbd_conns)s.conn) {
/* new io only accepted when there is no connection, ... */
- case StandAlone:
- case WFConnection:
+ case C_STANDALONE:
+ case C_WF_CONNECTION:
/* ... or there is a well established connection. */
- case Connected:
- case SyncSource:
- case SyncTarget:
- case VerifyS:
- case VerifyT:
- case PausedSyncS:
- case PausedSyncT:
+ case C_CONNECTED:
+ case C_SYNC_SOURCE:
+ case C_SYNC_TARGET:
+ case C_VERIFY_S:
+ case C_VERIFY_T:
+ case C_PAUSED_SYNC_S:
+ case C_PAUSED_SYNC_T:
/* maybe stable, look at the disk state */
break;
/* no new io accepted during tansitional states
* like handshake or teardown */
- case Disconnecting:
- case Unconnected:
- case Timeout:
- case BrokenPipe:
- case NetworkFailure:
- case ProtocolError:
- case TearDown:
- case WFReportParams:
- case StartingSyncS:
- case StartingSyncT:
- case WFBitMapS:
- case WFBitMapT:
- case WFSyncUUID:
- case conn_mask:
+ case C_DISCONNECTING:
+ case C_UNCONNECTED:
+ case C_TIMEOUT:
+ case C_BROKEN_PIPE:
+ case C_NETWORK_FAILURE:
+ case C_PROTOCOL_ERROR:
+ case C_TEAR_DOWN:
+ case C_WF_REPORT_PARAMS:
+ case C_STARTING_SYNC_S:
+ case C_STARTING_SYNC_T:
+ case C_WF_BITMAP_S:
+ case C_WF_BITMAP_T:
+ case C_WF_SYNC_UUID:
+ case C_MASK:
/* not "stable" */
return 0;
}
switch ((enum drbd_disk_state)s.disk) {
- case Diskless:
- case Inconsistent:
- case Outdated:
- case Consistent:
- case UpToDate:
+ case D_DISKLESS:
+ case D_INCONSISTENT:
+ case D_OUTDATED:
+ case D_CONSISTENT:
+ case D_UP_TO_DATE:
/* disk state is stable as well. */
break;
/* no new io accepted during tansitional states */
- case Attaching:
- case Failed:
- case Negotiating:
- case DUnknown:
- case disk_mask:
+ case D_ATTACHING:
+ case D_FAILED:
+ case D_NEGOTIATING:
+ case D_UNKNOWN:
+ case D_MASK:
/* not "stable" */
return 0;
}
@@ -2188,7 +2188,7 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
{
/* compare with after_state_ch,
- * os.conn != WFBitMapS && ns.conn == WFBitMapS */
+ * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
DEFINE_WAIT(wait);
/* we wait here
@@ -2232,7 +2232,7 @@ static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
{
mdev->ed_uuid = val;
- MTRACE(TraceTypeUuid, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_METRICS,
dev_info(DEV, " exposed data uuid now %016llX\n",
(unsigned long long)val);
);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index dfb48e2c1bc5..4c84365aeeef 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -59,22 +59,22 @@
struct after_state_chg_work {
struct drbd_work w;
- union drbd_state_t os;
- union drbd_state_t ns;
+ union drbd_state os;
+ union drbd_state ns;
enum chg_state_flags flags;
struct completion *done;
};
-int drbdd_init(struct Drbd_thread *);
-int drbd_worker(struct Drbd_thread *);
-int drbd_asender(struct Drbd_thread *);
+int drbdd_init(struct drbd_thread *);
+int drbd_worker(struct drbd_thread *);
+int drbd_asender(struct drbd_thread *);
int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
static int drbd_release(struct gendisk *gd, fmode_t mode);
STATIC int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
- union drbd_state_t ns, enum chg_state_flags flags);
+STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum chg_state_flags flags);
STATIC int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
STATIC void md_sync_timer_fn(unsigned long data);
STATIC int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
@@ -116,9 +116,9 @@ int allow_oos;
unsigned int cn_idx = CN_IDX_DRBD;
#ifdef ENABLE_DYNAMIC_TRACE
-int trace_type; /* Bitmap of trace types to enable */
-int trace_level; /* Current trace level */
-int trace_devs; /* Bitmap of devices to trace */
+int trace_type; /* UI_BITMAP of trace types to enable */
+int trace_level; /* UI_CURRENT trace level */
+int trace_devs; /* UI_BITMAP of devices to trace */
int proc_details; /* Detail level in proc drbd*/
module_param(trace_level, int, 0644);
@@ -186,9 +186,9 @@ int _inc_local_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
/************************* The transfer log start */
STATIC int tl_init(struct drbd_conf *mdev)
{
- struct drbd_barrier *b;
+ struct drbd_tl_epoch *b;
- b = kmalloc(sizeof(struct drbd_barrier), GFP_KERNEL);
+ b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
if (!b)
return 0;
INIT_LIST_HEAD(&b->requests);
@@ -198,8 +198,8 @@ STATIC int tl_init(struct drbd_conf *mdev)
b->n_req = 0;
b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
- mdev->oldest_barrier = b;
- mdev->newest_barrier = b;
+ mdev->oldest_tle = b;
+ mdev->newest_tle = b;
INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
mdev->tl_hash = NULL;
@@ -210,12 +210,12 @@ STATIC int tl_init(struct drbd_conf *mdev)
STATIC void tl_cleanup(struct drbd_conf *mdev)
{
- D_ASSERT(mdev->oldest_barrier == mdev->newest_barrier);
+ D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
- kfree(mdev->oldest_barrier);
- mdev->oldest_barrier = NULL;
- kfree(mdev->unused_spare_barrier);
- mdev->unused_spare_barrier = NULL;
+ kfree(mdev->oldest_tle);
+ mdev->oldest_tle = NULL;
+ kfree(mdev->unused_spare_tle);
+ mdev->unused_spare_tle = NULL;
kfree(mdev->tl_hash);
mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;
@@ -224,9 +224,9 @@ STATIC void tl_cleanup(struct drbd_conf *mdev)
/**
* _tl_add_barrier: Adds a barrier to the TL.
*/
-void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_barrier *new)
+void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
{
- struct drbd_barrier *newest_before;
+ struct drbd_tl_epoch *newest_before;
INIT_LIST_HEAD(&new->requests);
INIT_LIST_HEAD(&new->w.list);
@@ -234,13 +234,13 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_barrier *new)
new->next = NULL;
new->n_req = 0;
- newest_before = mdev->newest_barrier;
+ newest_before = mdev->newest_tle;
/* never send a barrier number == 0, because that is special-cased
* when using TCQ for our write ordering code */
new->br_number = (newest_before->br_number+1) ?: 1;
- if (mdev->newest_barrier != new) {
- mdev->newest_barrier->next = new;
- mdev->newest_barrier = new;
+ if (mdev->newest_tle != new) {
+ mdev->newest_tle->next = new;
+ mdev->newest_tle = new;
}
}
@@ -248,13 +248,13 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_barrier *new)
void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
unsigned int set_size)
{
- struct drbd_barrier *b, *nob; /* next old barrier */
+ struct drbd_tl_epoch *b, *nob; /* next old barrier */
struct list_head *le, *tle;
struct drbd_request *r;
spin_lock_irq(&mdev->req_lock);
- b = mdev->oldest_barrier;
+ b = mdev->oldest_tle;
/* first some paranoia code */
if (b == NULL) {
@@ -297,12 +297,12 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
_tl_add_barrier(mdev, b);
if (nob)
- mdev->oldest_barrier = nob;
+ mdev->oldest_tle = nob;
/* if nob == NULL b was the only barrier, and becomes the new
- barrer. Threfore mdev->oldest_barrier points already to b */
+ barrer. Threfore mdev->oldest_tle points already to b */
} else {
D_ASSERT(nob != NULL);
- mdev->oldest_barrier = nob;
+ mdev->oldest_tle = nob;
kfree(b);
}
@@ -313,7 +313,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
bail:
spin_unlock_irq(&mdev->req_lock);
- drbd_force_state(mdev, NS(conn, ProtocolError));
+ drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
}
@@ -321,14 +321,14 @@ bail:
* or from some after_state_ch */
void tl_clear(struct drbd_conf *mdev)
{
- struct drbd_barrier *b, *tmp;
+ struct drbd_tl_epoch *b, *tmp;
struct list_head *le, *tle;
struct drbd_request *r;
int new_initial_bnr = net_random();
spin_lock_irq(&mdev->req_lock);
- b = mdev->oldest_barrier;
+ b = mdev->oldest_tle;
while (b) {
list_for_each_safe(le, tle, &b->requests) {
r = list_entry(le, struct drbd_request, tl_requests);
@@ -346,7 +346,7 @@ void tl_clear(struct drbd_conf *mdev)
if (b->w.cb != NULL)
dec_ap_pending(mdev);
- if (b == mdev->newest_barrier) {
+ if (b == mdev->newest_tle) {
/* recycle, but reinit! */
D_ASSERT(tmp == NULL);
INIT_LIST_HEAD(&b->requests);
@@ -355,7 +355,7 @@ void tl_clear(struct drbd_conf *mdev)
b->br_number = new_initial_bnr;
b->n_req = 0;
- mdev->oldest_barrier = b;
+ mdev->oldest_tle = b;
break;
}
kfree(b);
@@ -382,37 +382,37 @@ void tl_clear(struct drbd_conf *mdev)
* unlikely(!drbd_bio_uptodate(e->bio)) case from kernel thread context.
* See also drbd_chk_io_error
*
- * NOTE: we set ourselves FAILED here if on_io_error is Detach or Panic OR
+ * NOTE: we set ourselves FAILED here if on_io_error is EP_DETACH or Panic OR
* if the forcedetach flag is set. This flag is set when failures
* occur writing the meta data portion of the disk as they are
* not recoverable.
*/
int drbd_io_error(struct drbd_conf *mdev, int forcedetach)
{
- enum io_error_handler eh;
+ enum drbd_io_error_p eh;
unsigned long flags;
int send;
int ok = 1;
- eh = PassOn;
- if (inc_local_if_state(mdev, Failed)) {
+ eh = EP_PASS_ON;
+ if (inc_local_if_state(mdev, D_FAILED)) {
eh = mdev->bc->dc.on_io_error;
dec_local(mdev);
}
- if (!forcedetach && eh == PassOn)
+ if (!forcedetach && eh == EP_PASS_ON)
return 1;
spin_lock_irqsave(&mdev->req_lock, flags);
- send = (mdev->state.disk == Failed);
+ send = (mdev->state.disk == D_FAILED);
if (send)
- _drbd_set_state(_NS(mdev, disk, Diskless), ChgStateHard, NULL);
+ _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL);
spin_unlock_irqrestore(&mdev->req_lock, flags);
if (!send)
return ok;
- if (mdev->state.conn >= Connected) {
+ if (mdev->state.conn >= C_CONNECTED) {
ok = drbd_send_state(mdev);
if (ok)
dev_warn(DEV, "Notified peer that my disk is broken.\n");
@@ -429,7 +429,7 @@ int drbd_io_error(struct drbd_conf *mdev, int forcedetach)
/* Releasing the backing device is done in after_state_ch() */
- if (eh == CallIOEHelper)
+ if (eh == EP_CALL_HELPER)
drbd_khelper(mdev, "local-io-error");
return ok;
@@ -441,22 +441,22 @@ int drbd_io_error(struct drbd_conf *mdev, int forcedetach)
* transaction. Of course it returns 0 as soon as the connection is lost.
*/
STATIC int cl_wide_st_chg(struct drbd_conf *mdev,
- union drbd_state_t os, union drbd_state_t ns)
+ union drbd_state os, union drbd_state ns)
{
- return (os.conn >= Connected && ns.conn >= Connected &&
- ((os.role != Primary && ns.role == Primary) ||
- (os.conn != StartingSyncT && ns.conn == StartingSyncT) ||
- (os.conn != StartingSyncS && ns.conn == StartingSyncS) ||
- (os.disk != Diskless && ns.disk == Diskless))) ||
- (os.conn >= Connected && ns.conn == Disconnecting) ||
- (os.conn == Connected && ns.conn == VerifyS);
+ return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
+ ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
+ (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+ (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
+ (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
+ (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
+ (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
}
int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
- union drbd_state_t mask, union drbd_state_t val)
+ union drbd_state mask, union drbd_state val)
{
unsigned long flags;
- union drbd_state_t os, ns;
+ union drbd_state os, ns;
int rv;
spin_lock_irqsave(&mdev->req_lock, flags);
@@ -470,41 +470,41 @@ int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
}
void drbd_force_state(struct drbd_conf *mdev,
- union drbd_state_t mask, union drbd_state_t val)
+ union drbd_state mask, union drbd_state val)
{
- drbd_change_state(mdev, ChgStateHard, mask, val);
+ drbd_change_state(mdev, CS_HARD, mask, val);
}
-int is_valid_state(struct drbd_conf *mdev, union drbd_state_t ns);
+int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
int is_valid_state_transition(struct drbd_conf *,
- union drbd_state_t, union drbd_state_t);
+ union drbd_state, union drbd_state);
int drbd_send_state_req(struct drbd_conf *,
- union drbd_state_t, union drbd_state_t);
+ union drbd_state, union drbd_state);
-STATIC enum set_st_err _req_st_cond(struct drbd_conf *mdev,
- union drbd_state_t mask, union drbd_state_t val)
+STATIC enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
+ union drbd_state mask, union drbd_state val)
{
- union drbd_state_t os, ns;
+ union drbd_state os, ns;
unsigned long flags;
int rv;
if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
- return SS_CW_Success;
+ return SS_CW_SUCCESS;
if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
- return SS_CW_FailedByPeer;
+ return SS_CW_FAILED_BY_PEER;
rv = 0;
spin_lock_irqsave(&mdev->req_lock, flags);
os = mdev->state;
ns.i = (os.i & ~mask.i) | val.i;
if (!cl_wide_st_chg(mdev, os, ns))
- rv = SS_CW_NoNeed;
+ rv = SS_CW_NO_NEED;
if (!rv) {
rv = is_valid_state(mdev, ns);
- if (rv == SS_Success) {
+ if (rv == SS_SUCCESS) {
rv = is_valid_state_transition(mdev, ns, os);
- if (rv == SS_Success)
+ if (rv == SS_SUCCESS)
rv = 0; /* cont waiting, otherwise fail. */
}
}
@@ -520,17 +520,17 @@ STATIC enum set_st_err _req_st_cond(struct drbd_conf *mdev,
* It has a cousin named drbd_request_state(), which is always verbose.
*/
STATIC int drbd_req_state(struct drbd_conf *mdev,
- union drbd_state_t mask, union drbd_state_t val,
+ union drbd_state mask, union drbd_state val,
enum chg_state_flags f)
{
struct completion done;
unsigned long flags;
- union drbd_state_t os, ns;
+ union drbd_state os, ns;
int rv;
init_completion(&done);
- if (f & ChgSerialize)
+ if (f & CS_SERIALIZE)
mutex_lock(&mdev->state_mutex);
spin_lock_irqsave(&mdev->req_lock, flags);
@@ -539,12 +539,12 @@ STATIC int drbd_req_state(struct drbd_conf *mdev,
if (cl_wide_st_chg(mdev, os, ns)) {
rv = is_valid_state(mdev, ns);
- if (rv == SS_Success)
+ if (rv == SS_SUCCESS)
rv = is_valid_state_transition(mdev, ns, os);
spin_unlock_irqrestore(&mdev->req_lock, flags);
- if (rv < SS_Success) {
- if (f & ChgStateVerbose)
+ if (rv < SS_SUCCESS) {
+ if (f & CS_VERBOSE)
print_st_err(mdev, os, ns, rv);
goto abort;
}
@@ -552,8 +552,8 @@ STATIC int drbd_req_state(struct drbd_conf *mdev,
drbd_state_lock(mdev);
if (!drbd_send_state_req(mdev, mask, val)) {
drbd_state_unlock(mdev);
- rv = SS_CW_FailedByPeer;
- if (f & ChgStateVerbose)
+ rv = SS_CW_FAILED_BY_PEER;
+ if (f & CS_VERBOSE)
print_st_err(mdev, os, ns, rv);
goto abort;
}
@@ -561,10 +561,10 @@ STATIC int drbd_req_state(struct drbd_conf *mdev,
wait_event(mdev->state_wait,
(rv = _req_st_cond(mdev, mask, val)));
- if (rv < SS_Success) {
+ if (rv < SS_SUCCESS) {
/* nearly dead code. */
drbd_state_unlock(mdev);
- if (f & ChgStateVerbose)
+ if (f & CS_VERBOSE)
print_st_err(mdev, os, ns, rv);
goto abort;
}
@@ -579,13 +579,13 @@ STATIC int drbd_req_state(struct drbd_conf *mdev,
spin_unlock_irqrestore(&mdev->req_lock, flags);
- if (f & ChgWaitComplete && rv == SS_Success) {
+ if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
D_ASSERT(current != mdev->worker.task);
wait_for_completion(&done);
}
abort:
- if (f & ChgSerialize)
+ if (f & CS_SERIALIZE)
mutex_unlock(&mdev->state_mutex);
return rv;
@@ -597,18 +597,18 @@ abort:
* transition this function even does a cluster wide transaction.
* It has a cousin named drbd_request_state(), which is always verbose.
*/
-int _drbd_request_state(struct drbd_conf *mdev, union drbd_state_t mask,
- union drbd_state_t val, enum chg_state_flags f)
+int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
{
int rv;
wait_event(mdev->state_wait,
- (rv = drbd_req_state(mdev, mask, val, f)) != SS_InTransientState);
+ (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
return rv;
}
-STATIC void print_st(struct drbd_conf *mdev, char *name, union drbd_state_t ns)
+STATIC void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
{
dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
name,
@@ -625,9 +625,9 @@ STATIC void print_st(struct drbd_conf *mdev, char *name, union drbd_state_t ns)
}
void print_st_err(struct drbd_conf *mdev,
- union drbd_state_t os, union drbd_state_t ns, int err)
+ union drbd_state os, union drbd_state ns, int err)
{
- if (err == SS_InTransientState)
+ if (err == SS_IN_TRANSIENT_STATE)
return;
dev_err(DEV, "State change failed: %s\n", set_st_err_name(err));
print_st(mdev, " state", os);
@@ -650,14 +650,14 @@ void print_st_err(struct drbd_conf *mdev,
A##s_to_name(ns.A)); \
} })
-int is_valid_state(struct drbd_conf *mdev, union drbd_state_t ns)
+int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
{
/* See drbd_state_sw_errors in drbd_strings.c */
- enum fencing_policy fp;
- int rv = SS_Success;
+ enum drbd_fencing_p fp;
+ int rv = SS_SUCCESS;
- fp = DontCare;
+ fp = FP_DONT_CARE;
if (inc_local(mdev)) {
fp = mdev->bc->dc.fencing;
dec_local(mdev);
@@ -665,105 +665,105 @@ int is_valid_state(struct drbd_conf *mdev, union drbd_state_t ns)
if (inc_net(mdev)) {
if (!mdev->net_conf->two_primaries &&
- ns.role == Primary && ns.peer == Primary)
- rv = SS_TwoPrimaries;
+ ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
+ rv = SS_TWO_PRIMARIES;
dec_net(mdev);
}
if (rv <= 0)
/* already found a reason to abort */;
- else if (ns.role == Secondary && mdev->open_cnt)
- rv = SS_DeviceInUse;
+ else if (ns.role == R_SECONDARY && mdev->open_cnt)
+ rv = SS_DEVICE_IN_USE;
- else if (ns.role == Primary && ns.conn < Connected && ns.disk < UpToDate)
- rv = SS_NoUpToDateDisk;
+ else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
- else if (fp >= Resource &&
- ns.role == Primary && ns.conn < Connected && ns.pdsk >= DUnknown)
- rv = SS_PrimaryNOP;
+ else if (fp >= FP_RESOURCE &&
+ ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
+ rv = SS_PRIMARY_NOP;
- else if (ns.role == Primary && ns.disk <= Inconsistent && ns.pdsk <= Inconsistent)
- rv = SS_NoUpToDateDisk;
+ else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
+ rv = SS_NO_UP_TO_DATE_DISK;
- else if (ns.conn > Connected && ns.disk < UpToDate && ns.pdsk < UpToDate)
- rv = SS_BothInconsistent;
+ else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_BOTH_INCONSISTENT;
- else if (ns.conn > Connected && (ns.disk == Diskless || ns.pdsk == Diskless))
- rv = SS_SyncingDiskless;
+ else if (ns.conn > C_CONNECTED && (ns.disk == D_DISKLESS || ns.pdsk == D_DISKLESS))
+ rv = SS_SYNCING_DISKLESS;
- else if ((ns.conn == Connected ||
- ns.conn == WFBitMapS ||
- ns.conn == SyncSource ||
- ns.conn == PausedSyncS) &&
- ns.disk == Outdated)
- rv = SS_ConnectedOutdates;
+ else if ((ns.conn == C_CONNECTED ||
+ ns.conn == C_WF_BITMAP_S ||
+ ns.conn == C_SYNC_SOURCE ||
+ ns.conn == C_PAUSED_SYNC_S) &&
+ ns.disk == D_OUTDATED)
+ rv = SS_CONNECTED_OUTDATES;
- else if ((ns.conn == VerifyS || ns.conn == VerifyT) &&
+ else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
(mdev->sync_conf.verify_alg[0] == 0))
- rv = SS_NoVerifyAlg;
+ rv = SS_NO_VERIFY_ALG;
- else if ((ns.conn == VerifyS || ns.conn == VerifyT) &&
+ else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
mdev->agreed_pro_version < 88)
- rv = SS_NotSupported;
+ rv = SS_NOT_SUPPORTED;
return rv;
}
int is_valid_state_transition(struct drbd_conf *mdev,
- union drbd_state_t ns, union drbd_state_t os)
+ union drbd_state ns, union drbd_state os)
{
- int rv = SS_Success;
+ int rv = SS_SUCCESS;
- if ((ns.conn == StartingSyncT || ns.conn == StartingSyncS) &&
- os.conn > Connected)
- rv = SS_ResyncRunning;
+ if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
+ os.conn > C_CONNECTED)
+ rv = SS_RESYNC_RUNNING;
- if (ns.conn == Disconnecting && os.conn == StandAlone)
- rv = SS_AlreadyStandAlone;
+ if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
+ rv = SS_ALREADY_STANDALONE;
- if (ns.disk > Attaching && os.disk == Diskless)
- rv = SS_IsDiskLess;
+ if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
+ rv = SS_IS_DISKLESS;
- if (ns.conn == WFConnection && os.conn < Unconnected)
- rv = SS_NoNetConfig;
+ if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
+ rv = SS_NO_NET_CONFIG;
- if (ns.disk == Outdated && os.disk < Outdated && os.disk != Attaching)
- rv = SS_LowerThanOutdated;
+ if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
+ rv = SS_LOWER_THAN_OUTDATED;
- if (ns.conn == Disconnecting && os.conn == Unconnected)
- rv = SS_InTransientState;
+ if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
+ rv = SS_IN_TRANSIENT_STATE;
- if (ns.conn == os.conn && ns.conn == WFReportParams)
- rv = SS_InTransientState;
+ if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
+ rv = SS_IN_TRANSIENT_STATE;
- if ((ns.conn == VerifyS || ns.conn == VerifyT) && os.conn < Connected)
- rv = SS_NeedConnection;
+ if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
+ rv = SS_NEED_CONNECTION;
- if ((ns.conn == VerifyS || ns.conn == VerifyT) &&
- ns.conn != os.conn && os.conn > Connected)
- rv = SS_ResyncRunning;
+ if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ ns.conn != os.conn && os.conn > C_CONNECTED)
+ rv = SS_RESYNC_RUNNING;
- if ((ns.conn == StartingSyncS || ns.conn == StartingSyncT) &&
- os.conn < Connected)
- rv = SS_NeedConnection;
+ if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
+ os.conn < C_CONNECTED)
+ rv = SS_NEED_CONNECTION;
return rv;
}
int __drbd_set_state(struct drbd_conf *mdev,
- union drbd_state_t ns, enum chg_state_flags flags,
+ union drbd_state ns, enum chg_state_flags flags,
struct completion *done)
{
- union drbd_state_t os;
- int rv = SS_Success;
+ union drbd_state os;
+ int rv = SS_SUCCESS;
int warn_sync_abort = 0;
- enum fencing_policy fp;
+ enum drbd_fencing_p fp;
struct after_state_chg_work *ascw;
os = mdev->state;
- fp = DontCare;
+ fp = FP_DONT_CARE;
if (inc_local(mdev)) {
fp = mdev->bc->dc.fencing;
dec_local(mdev);
@@ -772,125 +772,125 @@ int __drbd_set_state(struct drbd_conf *mdev,
/* Early state sanitising. */
/* Dissalow Network errors to configure a device's network part */
- if ((ns.conn >= Timeout && ns.conn <= TearDown) &&
- os.conn <= Disconnecting)
+ if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
+ os.conn <= C_DISCONNECTING)
ns.conn = os.conn;
- /* After a network error (+TearDown) only Unconnected or Disconnecting can follow */
- if (os.conn >= Timeout && os.conn <= TearDown &&
- ns.conn != Unconnected && ns.conn != Disconnecting)
+ /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */
+ if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
+ ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING)
ns.conn = os.conn;
- /* After Disconnecting only StandAlone may follow */
- if (os.conn == Disconnecting && ns.conn != StandAlone)
+ /* After C_DISCONNECTING only C_STANDALONE may follow */
+ if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn;
- if (ns.conn < Connected) {
+ if (ns.conn < C_CONNECTED) {
ns.peer_isp = 0;
- ns.peer = Unknown;
- if (ns.pdsk > DUnknown || ns.pdsk < Inconsistent)
- ns.pdsk = DUnknown;
+ ns.peer = R_UNKNOWN;
+ if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
+ ns.pdsk = D_UNKNOWN;
}
/* Clear the aftr_isp when becomming Unconfigured */
- if (ns.conn == StandAlone && ns.disk == Diskless && ns.role == Secondary)
+ if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
ns.aftr_isp = 0;
- if (ns.conn <= Disconnecting && ns.disk == Diskless)
- ns.pdsk = DUnknown;
+ if (ns.conn <= C_DISCONNECTING && ns.disk == D_DISKLESS)
+ ns.pdsk = D_UNKNOWN;
- if (os.conn > Connected && ns.conn > Connected &&
- (ns.disk <= Failed || ns.pdsk <= Failed)) {
+ if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
+ (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
warn_sync_abort = 1;
- ns.conn = Connected;
+ ns.conn = C_CONNECTED;
}
- if (ns.conn >= Connected &&
- ((ns.disk == Consistent || ns.disk == Outdated) ||
- (ns.disk == Negotiating && ns.conn == WFBitMapT))) {
+ if (ns.conn >= C_CONNECTED &&
+ ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) ||
+ (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) {
switch (ns.conn) {
- case WFBitMapT:
- case PausedSyncT:
- ns.disk = Outdated;
+ case C_WF_BITMAP_T:
+ case C_PAUSED_SYNC_T:
+ ns.disk = D_OUTDATED;
break;
- case Connected:
- case WFBitMapS:
- case SyncSource:
- case PausedSyncS:
- ns.disk = UpToDate;
+ case C_CONNECTED:
+ case C_WF_BITMAP_S:
+ case C_SYNC_SOURCE:
+ case C_PAUSED_SYNC_S:
+ ns.disk = D_UP_TO_DATE;
break;
- case SyncTarget:
- ns.disk = Inconsistent;
+ case C_SYNC_TARGET:
+ ns.disk = D_INCONSISTENT;
dev_warn(DEV, "Implicitly set disk state Inconsistent!\n");
break;
}
- if (os.disk == Outdated && ns.disk == UpToDate)
+ if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE)
dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n");
}
- if (ns.conn >= Connected &&
- (ns.pdsk == Consistent || ns.pdsk == Outdated)) {
+ if (ns.conn >= C_CONNECTED &&
+ (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) {
switch (ns.conn) {
- case Connected:
- case WFBitMapT:
- case PausedSyncT:
- case SyncTarget:
- ns.pdsk = UpToDate;
+ case C_CONNECTED:
+ case C_WF_BITMAP_T:
+ case C_PAUSED_SYNC_T:
+ case C_SYNC_TARGET:
+ ns.pdsk = D_UP_TO_DATE;
break;
- case WFBitMapS:
- case PausedSyncS:
- ns.pdsk = Outdated;
+ case C_WF_BITMAP_S:
+ case C_PAUSED_SYNC_S:
+ ns.pdsk = D_OUTDATED;
break;
- case SyncSource:
- ns.pdsk = Inconsistent;
+ case C_SYNC_SOURCE:
+ ns.pdsk = D_INCONSISTENT;
dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n");
break;
}
- if (os.pdsk == Outdated && ns.pdsk == UpToDate)
+ if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE)
dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n");
}
/* Connection breaks down before we finished "Negotiating" */
- if (ns.conn < Connected && ns.disk == Negotiating &&
- inc_local_if_state(mdev, Negotiating)) {
- if (mdev->ed_uuid == mdev->bc->md.uuid[Current]) {
+ if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
+ inc_local_if_state(mdev, D_NEGOTIATING)) {
+ if (mdev->ed_uuid == mdev->bc->md.uuid[UI_CURRENT]) {
ns.disk = mdev->new_state_tmp.disk;
ns.pdsk = mdev->new_state_tmp.pdsk;
} else {
dev_alert(DEV, "Connection lost while negotiating, no data!\n");
- ns.disk = Diskless;
- ns.pdsk = DUnknown;
+ ns.disk = D_DISKLESS;
+ ns.pdsk = D_UNKNOWN;
}
dec_local(mdev);
}
- if (fp == Stonith &&
- (ns.role == Primary &&
- ns.conn < Connected &&
- ns.pdsk > Outdated))
+ if (fp == FP_STONITH &&
+ (ns.role == R_PRIMARY &&
+ ns.conn < C_CONNECTED &&
+ ns.pdsk > D_OUTDATED))
ns.susp = 1;
if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
- if (ns.conn == SyncSource)
- ns.conn = PausedSyncS;
- if (ns.conn == SyncTarget)
- ns.conn = PausedSyncT;
+ if (ns.conn == C_SYNC_SOURCE)
+ ns.conn = C_PAUSED_SYNC_S;
+ if (ns.conn == C_SYNC_TARGET)
+ ns.conn = C_PAUSED_SYNC_T;
} else {
- if (ns.conn == PausedSyncS)
- ns.conn = SyncSource;
- if (ns.conn == PausedSyncT)
- ns.conn = SyncTarget;
+ if (ns.conn == C_PAUSED_SYNC_S)
+ ns.conn = C_SYNC_SOURCE;
+ if (ns.conn == C_PAUSED_SYNC_T)
+ ns.conn = C_SYNC_TARGET;
}
if (ns.i == os.i)
- return SS_NothingToDo;
+ return SS_NOTHING_TO_DO;
- if (!(flags & ChgStateHard)) {
+ if (!(flags & CS_HARD)) {
/* pre-state-change checks ; only look at ns */
/* See drbd_state_sw_errors in drbd_strings.c */
rv = is_valid_state(mdev, ns);
- if (rv < SS_Success) {
+ if (rv < SS_SUCCESS) {
/* If the old state was illegal as well, then let
this happen...*/
@@ -906,8 +906,8 @@ int __drbd_set_state(struct drbd_conf *mdev,
rv = is_valid_state_transition(mdev, ns, os);
}
- if (rv < SS_Success) {
- if (flags & ChgStateVerbose)
+ if (rv < SS_SUCCESS) {
+ if (flags & CS_VERBOSE)
print_st_err(mdev, os, ns, rv);
return rv;
}
@@ -936,16 +936,16 @@ int __drbd_set_state(struct drbd_conf *mdev,
wake_up(&mdev->state_wait);
/** post-state-change actions **/
- if (os.conn >= SyncSource && ns.conn <= Connected) {
+ if (os.conn >= C_SYNC_SOURCE && ns.conn <= C_CONNECTED) {
set_bit(STOP_SYNC_TIMER, &mdev->flags);
mod_timer(&mdev->resync_timer, jiffies);
}
- if ((os.conn == PausedSyncT || os.conn == PausedSyncS) &&
- (ns.conn == SyncTarget || ns.conn == SyncSource)) {
+ if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
dev_info(DEV, "Syncer continues.\n");
mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time;
- if (ns.conn == SyncTarget) {
+ if (ns.conn == C_SYNC_TARGET) {
if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))
mod_timer(&mdev->resync_timer, jiffies);
/* This if (!test_bit) is only needed for the case
@@ -955,16 +955,16 @@ int __drbd_set_state(struct drbd_conf *mdev,
}
}
- if ((os.conn == SyncTarget || os.conn == SyncSource) &&
- (ns.conn == PausedSyncT || ns.conn == PausedSyncS)) {
+ if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
+ (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
dev_info(DEV, "Resync suspended\n");
mdev->rs_mark_time = jiffies;
- if (ns.conn == PausedSyncT)
+ if (ns.conn == C_PAUSED_SYNC_T)
set_bit(STOP_SYNC_TIMER, &mdev->flags);
}
- if (os.conn == Connected &&
- (ns.conn == VerifyS || ns.conn == VerifyT)) {
+ if (os.conn == C_CONNECTED &&
+ (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
mdev->ov_position = 0;
mdev->ov_left =
mdev->rs_total =
@@ -974,53 +974,53 @@ int __drbd_set_state(struct drbd_conf *mdev,
mdev->ov_last_oos_size = 0;
mdev->ov_last_oos_start = 0;
- if (ns.conn == VerifyS)
+ if (ns.conn == C_VERIFY_S)
mod_timer(&mdev->resync_timer, jiffies);
}
if (inc_local(mdev)) {
- u32 mdf = mdev->bc->md.flags & ~(MDF_Consistent|MDF_PrimaryInd|
- MDF_ConnectedInd|MDF_WasUpToDate|
- MDF_PeerOutDated|MDF_CrashedPrimary);
+ u32 mdf = mdev->bc->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
+ MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
+ MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
if (test_bit(CRASHED_PRIMARY, &mdev->flags))
- mdf |= MDF_CrashedPrimary;
- if (mdev->state.role == Primary ||
- (mdev->state.pdsk < Inconsistent && mdev->state.peer == Primary))
- mdf |= MDF_PrimaryInd;
- if (mdev->state.conn > WFReportParams)
- mdf |= MDF_ConnectedInd;
- if (mdev->state.disk > Inconsistent)
- mdf |= MDF_Consistent;
- if (mdev->state.disk > Outdated)
- mdf |= MDF_WasUpToDate;
- if (mdev->state.pdsk <= Outdated && mdev->state.pdsk >= Inconsistent)
- mdf |= MDF_PeerOutDated;
+ mdf |= MDF_CRASHED_PRIMARY;
+ if (mdev->state.role == R_PRIMARY ||
+ (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
+ mdf |= MDF_PRIMARY_IND;
+ if (mdev->state.conn > C_WF_REPORT_PARAMS)
+ mdf |= MDF_CONNECTED_IND;
+ if (mdev->state.disk > D_INCONSISTENT)
+ mdf |= MDF_CONSISTENT;
+ if (mdev->state.disk > D_OUTDATED)
+ mdf |= MDF_WAS_UP_TO_DATE;
+ if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
+ mdf |= MDF_PEER_OUT_DATED;
if (mdf != mdev->bc->md.flags) {
mdev->bc->md.flags = mdf;
drbd_md_mark_dirty(mdev);
}
- if (os.disk < Consistent && ns.disk >= Consistent)
- drbd_set_ed_uuid(mdev, mdev->bc->md.uuid[Current]);
+ if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
+ drbd_set_ed_uuid(mdev, mdev->bc->md.uuid[UI_CURRENT]);
dec_local(mdev);
}
- /* Peer was forced UpToDate & Primary, consider to resync */
- if (os.disk == Inconsistent && os.pdsk == Inconsistent &&
- os.peer == Secondary && ns.peer == Primary)
+ /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
+ if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
+ os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
set_bit(CONSIDER_RESYNC, &mdev->flags);
/* Receiver should clean up itself */
- if (os.conn != Disconnecting && ns.conn == Disconnecting)
+ if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
drbd_thread_stop_nowait(&mdev->receiver);
/* Now the receiver finished cleaning up itself, it should die */
- if (os.conn != StandAlone && ns.conn == StandAlone)
+ if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
drbd_thread_stop_nowait(&mdev->receiver);
/* Upon network failure, we need to restart the receiver. */
- if (os.conn > TearDown &&
- ns.conn <= TearDown && ns.conn >= Timeout)
+ if (os.conn > C_TEAR_DOWN &&
+ ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
drbd_thread_restart_nowait(&mdev->receiver);
ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
@@ -1044,7 +1044,7 @@ STATIC int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unu
ascw = (struct after_state_chg_work *) w;
after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
- if (ascw->flags & ChgWaitComplete) {
+ if (ascw->flags & CS_WAIT_COMPLETE) {
D_ASSERT(ascw->done != NULL);
complete(ascw->done);
}
@@ -1057,32 +1057,32 @@ static void abw_start_sync(struct drbd_conf *mdev, int rv)
{
if (rv) {
dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
- _drbd_request_state(mdev, NS(conn, Connected), ChgStateVerbose);
+ _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
return;
}
switch (mdev->state.conn) {
- case StartingSyncT:
- _drbd_request_state(mdev, NS(conn, WFSyncUUID), ChgStateVerbose);
+ case C_STARTING_SYNC_T:
+ _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
break;
- case StartingSyncS:
- drbd_start_resync(mdev, SyncSource);
+ case C_STARTING_SYNC_S:
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
break;
}
}
-STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
- union drbd_state_t ns, enum chg_state_flags flags)
+STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum chg_state_flags flags)
{
- enum fencing_policy fp;
+ enum drbd_fencing_p fp;
- if (os.conn != Connected && ns.conn == Connected) {
+ if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
clear_bit(CRASHED_PRIMARY, &mdev->flags);
if (mdev->p_uuid)
- mdev->p_uuid[UUID_FLAGS] &= ~((u64)2);
+ mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
}
- fp = DontCare;
+ fp = FP_DONT_CARE;
if (inc_local(mdev)) {
fp = mdev->bc->dc.fencing;
dec_local(mdev);
@@ -1091,44 +1091,44 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
/* Inform userspace about the change... */
drbd_bcast_state(mdev, ns);
- if (!(os.role == Primary && os.disk < UpToDate && os.pdsk < UpToDate) &&
- (ns.role == Primary && ns.disk < UpToDate && ns.pdsk < UpToDate))
+ if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
+ (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
drbd_khelper(mdev, "pri-on-incon-degr");
/* Here we have the actions that are performed after a
state change. This function might sleep */
- if (fp == Stonith && ns.susp) {
+ if (fp == FP_STONITH && ns.susp) {
/* case1: The outdate peer handler is successfull:
* case2: The connection was established again: */
- if ((os.pdsk > Outdated && ns.pdsk <= Outdated) ||
- (os.conn < Connected && ns.conn >= Connected)) {
+ if ((os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) ||
+ (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)) {
tl_clear(mdev);
spin_lock_irq(&mdev->req_lock);
- _drbd_set_state(_NS(mdev, susp, 0), ChgStateVerbose, NULL);
+ _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
spin_unlock_irq(&mdev->req_lock);
}
}
/* Do not change the order of the if above and the two below... */
- if (os.pdsk == Diskless && ns.pdsk > Diskless) { /* attach on the peer */
+ if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
drbd_send_uuids(mdev);
drbd_send_state(mdev);
}
- if (os.conn != WFBitMapS && ns.conn == WFBitMapS)
+ if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
/* Lost contact to peer's copy of the data */
- if ((os.pdsk >= Inconsistent &&
- os.pdsk != DUnknown &&
- os.pdsk != Outdated)
- && (ns.pdsk < Inconsistent ||
- ns.pdsk == DUnknown ||
- ns.pdsk == Outdated)) {
+ if ((os.pdsk >= D_INCONSISTENT &&
+ os.pdsk != D_UNKNOWN &&
+ os.pdsk != D_OUTDATED)
+ && (ns.pdsk < D_INCONSISTENT ||
+ ns.pdsk == D_UNKNOWN ||
+ ns.pdsk == D_OUTDATED)) {
kfree(mdev->p_uuid);
mdev->p_uuid = NULL;
if (inc_local(mdev)) {
- if ((ns.role == Primary || ns.peer == Primary) &&
- mdev->bc->md.uuid[Bitmap] == 0 && ns.disk >= UpToDate) {
+ if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
+ mdev->bc->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
drbd_uuid_new_current(mdev);
drbd_send_uuids(mdev);
}
@@ -1136,19 +1136,19 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
}
}
- if (ns.pdsk < Inconsistent && inc_local(mdev)) {
- if (ns.peer == Primary && mdev->bc->md.uuid[Bitmap] == 0)
+ if (ns.pdsk < D_INCONSISTENT && inc_local(mdev)) {
+ if (ns.peer == R_PRIMARY && mdev->bc->md.uuid[UI_BITMAP] == 0)
drbd_uuid_new_current(mdev);
- /* Diskless Peer becomes secondary */
- if (os.peer == Primary && ns.peer == Secondary)
+ /* D_DISKLESS Peer becomes secondary */
+ if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
drbd_al_to_on_disk_bm(mdev);
dec_local(mdev);
}
/* Last part of the attaching process ... */
- if (ns.conn >= Connected &&
- os.disk == Attaching && ns.disk == Negotiating) {
+ if (ns.conn >= C_CONNECTED &&
+ os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
drbd_send_sizes(mdev); /* to start sync... */
@@ -1157,7 +1157,7 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
}
/* We want to pause/continue resync, tell peer. */
- if (ns.conn >= Connected &&
+ if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp)))
drbd_send_state(mdev);
@@ -1169,22 +1169,22 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
/* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */
- if (os.conn == WFReportParams && ns.conn >= Connected)
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
drbd_send_state(mdev);
/* We are in the progress to start a full sync... */
- if ((os.conn != StartingSyncT && ns.conn == StartingSyncT) ||
- (os.conn != StartingSyncS && ns.conn == StartingSyncS))
+ if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+ (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
/* We are invalidating our self... */
- if (os.conn < Connected && ns.conn < Connected &&
- os.disk > Inconsistent && ns.disk == Inconsistent)
+ if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
+ os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
- if (os.disk > Diskless && ns.disk == Diskless) {
- /* since inc_local() only works as long as disk>=Inconsistent,
- and it is Diskless here, local_cnt can only go down, it can
+ if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
+ /* since inc_local() only works as long as disk>=D_INCONSISTENT,
+ and it is D_DISKLESS here, local_cnt can only go down, it can
not increase... It will reach zero */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
@@ -1201,25 +1201,25 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
}
/* Disks got bigger while they were detached */
- if (ns.disk > Negotiating && ns.pdsk > Negotiating &&
+ if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
- if (ns.conn == Connected)
+ if (ns.conn == C_CONNECTED)
resync_after_online_grow(mdev);
}
/* A resync finished or aborted, wake paused devices... */
- if ((os.conn > Connected && ns.conn <= Connected) ||
+ if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
(os.peer_isp && !ns.peer_isp) ||
(os.user_isp && !ns.user_isp))
resume_next_sg(mdev);
/* Upon network connection, we need to start the received */
- if (os.conn == StandAlone && ns.conn == Unconnected)
+ if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
drbd_thread_start(&mdev->receiver);
/* Terminate worker thread if we are unconfigured - it will be
restarted as needed... */
- if (ns.disk == Diskless && ns.conn == StandAlone && ns.role == Secondary) {
+ if (ns.disk == D_DISKLESS && ns.conn == C_STANDALONE && ns.role == R_SECONDARY) {
if (os.aftr_isp != ns.aftr_isp)
resume_next_sg(mdev);
drbd_thread_stop_nowait(&mdev->worker);
@@ -1231,7 +1231,7 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state_t os,
STATIC int drbd_thread_setup(void *arg)
{
- struct Drbd_thread *thi = (struct Drbd_thread *) arg;
+ struct drbd_thread *thi = (struct drbd_thread *) arg;
struct drbd_conf *mdev = thi->mdev;
int retval;
@@ -1242,7 +1242,7 @@ restart:
/* if the receiver has been "Exiting", the last thing it did
* was set the conn state to "StandAlone",
- * if now a re-connect request comes in, conn state goes Unconnected,
+ * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
* and receiver thread will be "started".
* drbd_thread_start needs to set "Restarting" in that case.
* t_state check and assignement needs to be within the same spinlock,
@@ -1270,8 +1270,8 @@ restart:
return retval;
}
-STATIC void drbd_thread_init(struct drbd_conf *mdev, struct Drbd_thread *thi,
- int (*func) (struct Drbd_thread *))
+STATIC void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
+ int (*func) (struct drbd_thread *))
{
spin_lock_init(&thi->t_lock);
thi->task = NULL;
@@ -1280,7 +1280,7 @@ STATIC void drbd_thread_init(struct drbd_conf *mdev, struct Drbd_thread *thi,
thi->mdev = mdev;
}
-int drbd_thread_start(struct Drbd_thread *thi)
+int drbd_thread_start(struct drbd_thread *thi)
{
struct drbd_conf *mdev = thi->mdev;
struct task_struct *nt;
@@ -1338,9 +1338,9 @@ int drbd_thread_start(struct Drbd_thread *thi)
}
-void _drbd_thread_stop(struct Drbd_thread *thi, int restart, int wait)
+void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
{
- enum Drbd_thread_state ns = restart ? Restarting : Exiting;
+ enum drbd_thread_state ns = restart ? Restarting : Exiting;
spin_lock(&thi->t_lock);
@@ -1405,7 +1405,7 @@ cpumask_t drbd_calc_cpu_mask(struct drbd_conf *mdev)
void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
{
struct task_struct *p = current;
- struct Drbd_thread *thi =
+ struct drbd_thread *thi =
p == mdev->asender.task ? &mdev->asender :
p == mdev->receiver.task ? &mdev->receiver :
p == mdev->worker.task ? &mdev->worker :
@@ -1425,7 +1425,7 @@ void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
/* the appropriate socket mutex must be held already */
int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum Drbd_Packet_Cmd cmd, struct Drbd_Header *h,
+ enum drbd_packets cmd, struct p_header *h,
size_t size, unsigned msg_flags)
{
int sent, ok;
@@ -1435,7 +1435,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
h->magic = BE_DRBD_MAGIC;
h->command = cpu_to_be16(cmd);
- h->length = cpu_to_be16(size-sizeof(struct Drbd_Header));
+ h->length = cpu_to_be16(size-sizeof(struct p_header));
dump_packet(mdev, sock, 0, (void *)h, __FILE__, __LINE__);
sent = drbd_send(mdev, sock, h, size, msg_flags);
@@ -1451,7 +1451,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
* when we hold the appropriate socket mutex.
*/
int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum Drbd_Packet_Cmd cmd, struct Drbd_Header *h, size_t size)
+ enum drbd_packets cmd, struct p_header *h, size_t size)
{
int ok = 0;
struct socket *sock;
@@ -1476,10 +1476,10 @@ int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
return ok;
}
-int drbd_send_cmd2(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd, char *data,
+int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
size_t size)
{
- struct Drbd_Header h;
+ struct p_header h;
int ok;
h.magic = BE_DRBD_MAGIC;
@@ -1503,15 +1503,15 @@ int drbd_send_cmd2(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd, char *data,
int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
{
- struct Drbd_SyncParam89_Packet *p;
+ struct p_rs_param_89 *p;
struct socket *sock;
int size, rv;
const int apv = mdev->agreed_pro_version;
- size = apv <= 87 ? sizeof(struct Drbd_SyncParam_Packet)
- : apv == 88 ? sizeof(struct Drbd_SyncParam_Packet)
+ size = apv <= 87 ? sizeof(struct p_rs_param)
+ : apv == 88 ? sizeof(struct p_rs_param)
+ strlen(mdev->sync_conf.verify_alg) + 1
- : /* 89 */ sizeof(struct Drbd_SyncParam89_Packet);
+ : /* 89 */ sizeof(struct p_rs_param_89);
/* used from admin command context and receiver/worker context.
* to avoid kmalloc, grab the socket right here,
@@ -1520,9 +1520,9 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
sock = mdev->data.socket;
if (likely(sock != NULL)) {
- enum Drbd_Packet_Cmd cmd = apv >= 89 ? SyncParam89 : SyncParam;
+ enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
- p = &mdev->data.sbuf.SyncParam89;
+ p = &mdev->data.sbuf.rs_param_89;
/* initialize verify_alg and csums_alg */
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
@@ -1545,10 +1545,10 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
int drbd_send_protocol(struct drbd_conf *mdev)
{
- struct Drbd_Protocol_Packet *p;
+ struct p_protocol *p;
int size, rv;
- size = sizeof(struct Drbd_Protocol_Packet);
+ size = sizeof(struct p_protocol);
if (mdev->agreed_pro_version >= 87)
size += strlen(mdev->net_conf->integrity_alg) + 1;
@@ -1567,34 +1567,34 @@ int drbd_send_protocol(struct drbd_conf *mdev)
if (mdev->agreed_pro_version >= 87)
strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
- rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportProtocol,
- (struct Drbd_Header *)p, size);
+ rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
+ (struct p_header *)p, size);
kfree(p);
return rv;
}
int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
{
- struct Drbd_GenCnt_Packet p;
+ struct p_uuids p;
int i;
- if (!inc_local_if_state(mdev, Negotiating))
+ if (!inc_local_if_state(mdev, D_NEGOTIATING))
return 1;
- for (i = Current; i < UUID_SIZE; i++)
+ for (i = UI_CURRENT; i < UI_SIZE; i++)
p.uuid[i] = mdev->bc ? cpu_to_be64(mdev->bc->md.uuid[i]) : 0;
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
- p.uuid[UUID_SIZE] = cpu_to_be64(mdev->comm_bm_set);
+ p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
- uuid_flags |= mdev->new_state_tmp.disk == Inconsistent ? 4 : 0;
- p.uuid[UUID_FLAGS] = cpu_to_be64(uuid_flags);
+ uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
+ p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
dec_local(mdev);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportUUIDs,
- (struct Drbd_Header *)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
+ (struct p_header *)&p, sizeof(p));
}
int drbd_send_uuids(struct drbd_conf *mdev)
@@ -1610,22 +1610,22 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
{
- struct Drbd_SyncUUID_Packet p;
+ struct p_rs_uuid p;
p.uuid = cpu_to_be64(val);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportSyncUUID,
- (struct Drbd_Header *)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
+ (struct p_header *)&p, sizeof(p));
}
int drbd_send_sizes(struct drbd_conf *mdev)
{
- struct Drbd_Sizes_Packet p;
+ struct p_sizes p;
sector_t d_size, u_size;
int q_order_type;
int ok;
- if (inc_local_if_state(mdev, Negotiating)) {
+ if (inc_local_if_state(mdev, D_NEGOTIATING)) {
D_ASSERT(mdev->bc->backing_bdev);
d_size = drbd_get_max_capacity(mdev->bc);
u_size = mdev->bc->dc.disk_size;
@@ -1644,22 +1644,22 @@ int drbd_send_sizes(struct drbd_conf *mdev)
p.max_segment_size = cpu_to_be32(mdev->rq_queue->max_segment_size);
p.queue_order_type = cpu_to_be32(q_order_type);
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportSizes,
- (struct Drbd_Header *)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
+ (struct p_header *)&p, sizeof(p));
return ok;
}
/**
* drbd_send_state:
* Informs the peer about our state. Only call it when
- * mdev->state.conn >= Connected (I.e. you may not call it while in
+ * mdev->state.conn >= C_CONNECTED (I.e. you may not call it while in
* WFReportParams. Though there is one valid and necessary exception,
* drbd_connect() calls drbd_send_state() while in it WFReportParams.
*/
int drbd_send_state(struct drbd_conf *mdev)
{
struct socket *sock;
- struct Drbd_State_Packet p;
+ struct p_state p;
int ok = 0;
/* Grab state lock so we wont send state if we're in the middle
@@ -1672,8 +1672,8 @@ int drbd_send_state(struct drbd_conf *mdev)
sock = mdev->data.socket;
if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, ReportState,
- (struct Drbd_Header *)&p, sizeof(p), 0);
+ ok = _drbd_send_cmd(mdev, sock, P_STATE,
+ (struct p_header *)&p, sizeof(p), 0);
}
mutex_unlock(&mdev->data.mutex);
@@ -1683,32 +1683,32 @@ int drbd_send_state(struct drbd_conf *mdev)
}
int drbd_send_state_req(struct drbd_conf *mdev,
- union drbd_state_t mask, union drbd_state_t val)
+ union drbd_state mask, union drbd_state val)
{
- struct Drbd_Req_State_Packet p;
+ struct p_req_state p;
p.mask = cpu_to_be32(mask.i);
p.val = cpu_to_be32(val.i);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, StateChgRequest,
- (struct Drbd_Header *)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
+ (struct p_header *)&p, sizeof(p));
}
int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
{
- struct Drbd_RqS_Reply_Packet p;
+ struct p_req_state_reply p;
p.retcode = cpu_to_be32(retcode);
- return drbd_send_cmd(mdev, USE_META_SOCKET, StateChgReply,
- (struct Drbd_Header *)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
+ (struct p_header *)&p, sizeof(p));
}
/* returns
* positive: number of payload bytes needed in this packet.
* zero: incompressible. */
int fill_bitmap_rle_bytes(struct drbd_conf *mdev,
- struct Drbd_Compressed_Bitmap_Packet *p,
+ struct p_compressed_bm *p,
struct bm_xfer_ctx *c)
{
unsigned long plain_bits;
@@ -1801,7 +1801,7 @@ int fill_bitmap_rle_bytes(struct drbd_conf *mdev,
}
int fill_bitmap_rle_bits(struct drbd_conf *mdev,
- struct Drbd_Compressed_Bitmap_Packet *p,
+ struct p_compressed_bm *p,
struct bm_xfer_ctx *c)
{
struct bitstream bs;
@@ -1897,9 +1897,9 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
enum { OK, FAILED, DONE }
send_bitmap_rle_or_plain(struct drbd_conf *mdev,
- struct Drbd_Header *h, struct bm_xfer_ctx *c)
+ struct p_header *h, struct bm_xfer_ctx *c)
{
- struct Drbd_Compressed_Bitmap_Packet *p = (void*)h;
+ struct p_compressed_bm *p = (void*)h;
unsigned long num_words;
int len;
int ok;
@@ -1913,7 +1913,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
return FAILED;
if (len) {
DCBP_set_code(p, 0 ? RLE_VLI_Bytes : RLE_VLI_BitsFibD_3_5);
- ok = _drbd_send_cmd(mdev, mdev->data.socket, ReportCBitMap, h,
+ ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
sizeof(*p) + len, 0);
c->packets[0]++;
@@ -1928,13 +1928,13 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
len = num_words * sizeof(long);
if (len)
drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
- ok = _drbd_send_cmd(mdev, mdev->data.socket, ReportBitMap,
- h, sizeof(struct Drbd_Header) + len, 0);
+ ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
+ h, sizeof(struct p_header) + len, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
c->packets[1]++;
- c->bytes[1] += sizeof(struct Drbd_Header) + len;
+ c->bytes[1] += sizeof(struct p_header) + len;
if (c->bit_offset > c->bm_bits)
c->bit_offset = c->bm_bits;
@@ -1950,30 +1950,30 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
int _drbd_send_bitmap(struct drbd_conf *mdev)
{
struct bm_xfer_ctx c;
- struct Drbd_Header *p;
+ struct p_header *p;
int ret;
ERR_IF(!mdev->bitmap) return FALSE;
/* maybe we should use some per thread scratch page,
* and allocate that during initial device creation? */
- p = (struct Drbd_Header *) __get_free_page(GFP_NOIO);
+ p = (struct p_header *) __get_free_page(GFP_NOIO);
if (!p) {
dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
return FALSE;
}
if (inc_local(mdev)) {
- if (drbd_md_test_flag(mdev->bc, MDF_FullSync)) {
+ if (drbd_md_test_flag(mdev->bc, MDF_FULL_SYNC)) {
dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
drbd_bm_set_all(mdev);
if (drbd_bm_write(mdev)) {
- /* write_bm did fail! Leave full sync flag set in Meta Data
+ /* write_bm did fail! Leave full sync flag set in Meta P_DATA
* but otherwise process as per normal - need to tell other
* side that a full resync is required! */
dev_err(DEV, "Failed to write bitmap to disk!\n");
} else {
- drbd_md_clear_flag(mdev, MDF_FullSync);
+ drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
drbd_md_sync(mdev);
}
}
@@ -2007,15 +2007,15 @@ int drbd_send_bitmap(struct drbd_conf *mdev)
int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
{
int ok;
- struct Drbd_BarrierAck_Packet p;
+ struct p_barrier_ack p;
p.barrier = barrier_nr;
p.set_size = cpu_to_be32(set_size);
- if (mdev->state.conn < Connected)
+ if (mdev->state.conn < C_CONNECTED)
return FALSE;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, BarrierAck,
- (struct Drbd_Header *)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
+ (struct p_header *)&p, sizeof(p));
return ok;
}
@@ -2024,45 +2024,45 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
* This helper function expects the sector and block_id parameter already
* in big endian!
*/
-STATIC int _drbd_send_ack(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
+STATIC int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
u64 sector,
u32 blksize,
u64 block_id)
{
int ok;
- struct Drbd_BlockAck_Packet p;
+ struct p_block_ack p;
p.sector = sector;
p.block_id = block_id;
p.blksize = blksize;
p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
- if (!mdev->meta.socket || mdev->state.conn < Connected)
+ if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
return FALSE;
ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
- (struct Drbd_Header *)&p, sizeof(p));
+ (struct p_header *)&p, sizeof(p));
return ok;
}
-int drbd_send_ack_dp(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
- struct Drbd_Data_Packet *dp)
+int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
+ struct p_data *dp)
{
- const int header_size = sizeof(struct Drbd_Data_Packet)
- - sizeof(struct Drbd_Header);
- int data_size = ((struct Drbd_Header *)dp)->length - header_size;
+ const int header_size = sizeof(struct p_data)
+ - sizeof(struct p_header);
+ int data_size = ((struct p_header *)dp)->length - header_size;
return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
-int drbd_send_ack_rp(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
- struct Drbd_BlockRequest_Packet *rp)
+int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
+ struct p_block_req *rp)
{
return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
}
int drbd_send_ack(struct drbd_conf *mdev,
- enum Drbd_Packet_Cmd cmd, struct Tl_epoch_entry *e)
+ enum drbd_packets cmd, struct drbd_epoch_entry *e)
{
return _drbd_send_ack(mdev, cmd,
cpu_to_be64(e->sector),
@@ -2072,7 +2072,7 @@ int drbd_send_ack(struct drbd_conf *mdev,
/* This function misuses the block_id field to signal if the blocks
* are is sync or not. */
-int drbd_send_ack_ex(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
+int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
sector_t sector, int blksize, u64 block_id)
{
return _drbd_send_ack(mdev, cmd,
@@ -2085,24 +2085,24 @@ int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id)
{
int ok;
- struct Drbd_BlockRequest_Packet p;
+ struct p_block_req p;
p.sector = cpu_to_be64(sector);
p.block_id = block_id;
p.blksize = cpu_to_be32(size);
ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
- (struct Drbd_Header *)&p, sizeof(p));
+ (struct p_header *)&p, sizeof(p));
return ok;
}
int drbd_send_drequest_csum(struct drbd_conf *mdev,
sector_t sector, int size,
void *digest, int digest_size,
- enum Drbd_Packet_Cmd cmd)
+ enum drbd_packets cmd)
{
int ok;
- struct Drbd_BlockRequest_Packet p;
+ struct p_block_req p;
p.sector = cpu_to_be64(sector);
p.block_id = BE_DRBD_MAGIC + 0xbeef;
@@ -2110,7 +2110,7 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev,
p.head.magic = BE_DRBD_MAGIC;
p.head.command = cpu_to_be16(cmd);
- p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct Drbd_Header) + digest_size);
+ p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size);
mutex_lock(&mdev->data.mutex);
@@ -2125,14 +2125,14 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev,
int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
{
int ok;
- struct Drbd_BlockRequest_Packet p;
+ struct p_block_req p;
p.sector = cpu_to_be64(sector);
p.block_id = BE_DRBD_MAGIC + 0xbabe;
p.blksize = cpu_to_be32(size);
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, OVRequest,
- (struct Drbd_Header *)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
+ (struct p_header *)&p, sizeof(p));
return ok;
}
@@ -2148,7 +2148,7 @@ STATIC int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
drop_it = mdev->meta.socket == sock
|| !mdev->asender.task
|| get_t_state(&mdev->asender) != Running
- || mdev->state.conn < Connected;
+ || mdev->state.conn < C_CONNECTED;
if (drop_it)
return TRUE;
@@ -2160,7 +2160,7 @@ STATIC int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
request_ping(mdev);
}
- return drop_it; /* && (mdev->state == Primary) */;
+ return drop_it; /* && (mdev->state == R_PRIMARY) */;
}
/* The idea of sendpage seems to be to put some kind of reference
@@ -2237,7 +2237,7 @@ int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
}
len -= sent;
offset += sent;
- } while (len > 0 /* THINK && mdev->cstate >= Connected*/);
+ } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
set_fs(oldfs);
clear_bit(NET_CONGESTED, &mdev->flags);
@@ -2274,12 +2274,12 @@ static inline int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
}
/* Used to send write requests
- * Primary -> Peer (Data)
+ * R_PRIMARY -> Peer (P_DATA)
*/
int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
{
int ok = 1;
- struct Drbd_Data_Packet p;
+ struct p_data p;
unsigned int dp_flags = 0;
void *dgb;
int dgs;
@@ -2291,9 +2291,9 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
p.head.magic = BE_DRBD_MAGIC;
- p.head.command = cpu_to_be16(Data);
+ p.head.command = cpu_to_be16(P_DATA);
p.head.length =
- cpu_to_be16(sizeof(p) - sizeof(struct Drbd_Header) + dgs + req->size);
+ cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size);
p.sector = cpu_to_be64(req->sector);
p.block_id = (unsigned long)req;
@@ -2308,8 +2308,8 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
dp_flags |= DP_HARDBARRIER;
if (bio_sync(req->master_bio))
dp_flags |= DP_RW_SYNC;
- if (mdev->state.conn >= SyncSource &&
- mdev->state.conn <= PausedSyncT)
+ if (mdev->state.conn >= C_SYNC_SOURCE &&
+ mdev->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
p.dp_flags = cpu_to_be32(dp_flags);
@@ -2334,14 +2334,14 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
}
/* answer packet, used to send data back for read requests:
- * Peer -> (diskless) Primary (DataReply)
- * SyncSource -> SyncTarget (RSDataReply)
+ * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
+ * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
-int drbd_send_block(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
- struct Tl_epoch_entry *e)
+int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
+ struct drbd_epoch_entry *e)
{
int ok;
- struct Drbd_Data_Packet p;
+ struct p_data p;
void *dgb;
int dgs;
@@ -2351,7 +2351,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum Drbd_Packet_Cmd cmd,
p.head.magic = BE_DRBD_MAGIC;
p.head.command = cpu_to_be16(cmd);
p.head.length =
- cpu_to_be16(sizeof(p) - sizeof(struct Drbd_Header) + dgs + e->size);
+ cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size);
p.sector = cpu_to_be64(e->sector);
p.block_id = e->block_id;
@@ -2457,9 +2457,9 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
dev_err(DEV, "%s_sendmsg returned %d\n",
sock == mdev->meta.socket ? "msock" : "sock",
rv);
- drbd_force_state(mdev, NS(conn, BrokenPipe));
+ drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
} else
- drbd_force_state(mdev, NS(conn, Timeout));
+ drbd_force_state(mdev, NS(conn, C_TIMEOUT));
}
return sent;
@@ -2475,7 +2475,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
/* to have a stable mdev->state.role
* and no race with updating open_cnt */
- if (mdev->state.role != Primary) {
+ if (mdev->state.role != R_PRIMARY) {
if (mode & FMODE_WRITE)
rv = -EROFS;
else if (!allow_oos)
@@ -2500,7 +2500,7 @@ STATIC void drbd_unplug_fn(struct request_queue *q)
{
struct drbd_conf *mdev = q->queuedata;
- MTRACE(TraceTypeUnplug, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_UNPLUG, TRACE_LVL_SUMMARY,
dev_info(DEV, "got unplugged ap_bio_count=%d\n",
atomic_read(&mdev->ap_bio_cnt));
);
@@ -2512,8 +2512,8 @@ STATIC void drbd_unplug_fn(struct request_queue *q)
/* only if connected */
spin_lock_irq(&mdev->req_lock);
- if (mdev->state.pdsk >= Inconsistent && mdev->state.conn >= Connected) {
- D_ASSERT(mdev->state.role == Primary);
+ if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
+ D_ASSERT(mdev->state.role == R_PRIMARY);
if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
/* add to the data.work queue,
* unless already queued.
@@ -2526,7 +2526,7 @@ STATIC void drbd_unplug_fn(struct request_queue *q)
}
spin_unlock_irq(&mdev->req_lock);
- if (mdev->state.disk >= Inconsistent)
+ if (mdev->state.disk >= D_INCONSISTENT)
drbd_kick_lo(mdev);
}
@@ -2535,12 +2535,12 @@ STATIC void drbd_set_defaults(struct drbd_conf *mdev)
mdev->sync_conf.after = DRBD_AFTER_DEF;
mdev->sync_conf.rate = DRBD_RATE_DEF;
mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF;
- mdev->state = (union drbd_state_t) {
- { .role = Secondary,
- .peer = Unknown,
- .conn = StandAlone,
- .disk = Diskless,
- .pdsk = DUnknown,
+ mdev->state = (union drbd_state) {
+ { .role = R_SECONDARY,
+ .peer = R_UNKNOWN,
+ .conn = C_STANDALONE,
+ .disk = D_DISKLESS,
+ .pdsk = D_UNKNOWN,
.susp = 0
} };
}
@@ -2715,7 +2715,7 @@ STATIC int drbd_create_mempools(void)
goto Enomem;
drbd_ee_cache = kmem_cache_create(
- "drbd_ee_cache", sizeof(struct Tl_epoch_entry), 0, 0, NULL);
+ "drbd_ee_cache", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
if (drbd_ee_cache == NULL)
goto Enomem;
@@ -3019,7 +3019,7 @@ int __init drbd_init(void)
{
int err;
- if (sizeof(struct Drbd_HandShake_Packet) != 80) {
+ if (sizeof(struct p_handshake) != 80) {
printk(KERN_ERR
"drbd: never change the size or layout "
"of the HandShake packet.\n");
@@ -3147,7 +3147,7 @@ void drbd_free_resources(struct drbd_conf *mdev)
struct meta_data_on_disk {
u64 la_size; /* last agreed size. */
- u64 uuid[UUID_SIZE]; /* UUIDs. */
+ u64 uuid[UI_SIZE]; /* UUIDs. */
u64 device_uuid;
u64 reserved_u64_1;
u32 flags; /* MDF */
@@ -3176,12 +3176,12 @@ void drbd_md_sync(struct drbd_conf *mdev)
return;
del_timer(&mdev->md_sync_timer);
- /* We use here Failed and not Attaching because we try to write
+ /* We use here D_FAILED and not D_ATTACHING because we try to write
* metadata even if we detach due to a disk failure! */
- if (!inc_local_if_state(mdev, Failed))
+ if (!inc_local_if_state(mdev, D_FAILED))
return;
- MTRACE(TraceTypeMDIO, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_MD_IO, TRACE_LVL_SUMMARY,
dev_info(DEV, "Writing meta data super block now.\n");
);
@@ -3190,7 +3190,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
memset(buffer, 0, 512);
buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
- for (i = Current; i < UUID_SIZE; i++)
+ for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(mdev->bc->md.uuid[i]);
buffer->flags = cpu_to_be32(mdev->bc->md.flags);
buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
@@ -3227,17 +3227,17 @@ void drbd_md_sync(struct drbd_conf *mdev)
/**
* drbd_md_read:
* @bdev: describes the backing storage and the meta-data storage
- * Reads the meta data from bdev. Return 0 (NoError) on success, and an
- * enum ret_codes in case something goes wrong.
- * Currently only: MDIOError, MDInvalid.
+ * Reads the meta data from bdev. Return 0 (NO_ERROR) on success, and an
+ * enum drbd_ret_codes in case something goes wrong.
+ * Currently only: ERR_IO_MD_DISK, MDInvalid.
*/
int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
- int i, rv = NoError;
+ int i, rv = NO_ERROR;
- if (!inc_local_if_state(mdev, Attaching))
- return MDIOError;
+ if (!inc_local_if_state(mdev, D_ATTACHING))
+ return ERR_IO_MD_DISK;
mutex_lock(&mdev->md_io_mutex);
buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
@@ -3246,43 +3246,43 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
/* NOTE: cant do normal error processing here as this is
called BEFORE disk is attached */
dev_err(DEV, "Error while reading metadata.\n");
- rv = MDIOError;
+ rv = ERR_IO_MD_DISK;
goto err;
}
if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
dev_err(DEV, "Error while reading metadata, magic not found.\n");
- rv = MDInvalid;
+ rv = ERR_MD_INVALID;
goto err;
}
if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
- rv = MDInvalid;
+ rv = ERR_MD_INVALID;
goto err;
}
if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
- rv = MDInvalid;
+ rv = ERR_MD_INVALID;
goto err;
}
if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
- rv = MDInvalid;
+ rv = ERR_MD_INVALID;
goto err;
}
if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
- rv = MDInvalid;
+ rv = ERR_MD_INVALID;
goto err;
}
bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
- for (i = Current; i < UUID_SIZE; i++)
+ for (i = UI_CURRENT; i < UI_SIZE; i++)
bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
bdev->md.flags = be32_to_cpu(buffer->flags);
mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
@@ -3315,10 +3315,10 @@ STATIC void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
- for (i = History_start; i < History_end; i++) {
+ for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
mdev->bc->md.uuid[i+1] = mdev->bc->md.uuid[i];
- MTRACE(TraceTypeUuid, TraceLvlAll,
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_ALL,
drbd_print_uuid(mdev, i+1);
);
}
@@ -3326,8 +3326,8 @@ STATIC void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
- if (idx == Current) {
- if (mdev->state.role == Primary)
+ if (idx == UI_CURRENT) {
+ if (mdev->state.role == R_PRIMARY)
val |= 1;
else
val &= ~((u64)1);
@@ -3337,7 +3337,7 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
mdev->bc->md.uuid[idx] = val;
- MTRACE(TraceTypeUuid, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_SUMMARY,
drbd_print_uuid(mdev, idx);
);
@@ -3349,9 +3349,9 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
if (mdev->bc->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
- mdev->bc->md.uuid[History_start] = mdev->bc->md.uuid[idx];
- MTRACE(TraceTypeUuid, TraceLvlMetrics,
- drbd_print_uuid(mdev, History_start);
+ mdev->bc->md.uuid[UI_HISTORY_START] = mdev->bc->md.uuid[idx];
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_METRICS,
+ drbd_print_uuid(mdev, UI_HISTORY_START);
);
}
_drbd_uuid_set(mdev, idx, val);
@@ -3367,39 +3367,39 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
u64 val;
dev_info(DEV, "Creating new current UUID\n");
- D_ASSERT(mdev->bc->md.uuid[Bitmap] == 0);
- mdev->bc->md.uuid[Bitmap] = mdev->bc->md.uuid[Current];
- MTRACE(TraceTypeUuid, TraceLvlMetrics,
- drbd_print_uuid(mdev, Bitmap);
+ D_ASSERT(mdev->bc->md.uuid[UI_BITMAP] == 0);
+ mdev->bc->md.uuid[UI_BITMAP] = mdev->bc->md.uuid[UI_CURRENT];
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_METRICS,
+ drbd_print_uuid(mdev, UI_BITMAP);
);
get_random_bytes(&val, sizeof(u64));
- _drbd_uuid_set(mdev, Current, val);
+ _drbd_uuid_set(mdev, UI_CURRENT, val);
}
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
{
- if (mdev->bc->md.uuid[Bitmap] == 0 && val == 0)
+ if (mdev->bc->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
if (val == 0) {
drbd_uuid_move_history(mdev);
- mdev->bc->md.uuid[History_start] = mdev->bc->md.uuid[Bitmap];
- mdev->bc->md.uuid[Bitmap] = 0;
+ mdev->bc->md.uuid[UI_HISTORY_START] = mdev->bc->md.uuid[UI_BITMAP];
+ mdev->bc->md.uuid[UI_BITMAP] = 0;
- MTRACE(TraceTypeUuid, TraceLvlMetrics,
- drbd_print_uuid(mdev, History_start);
- drbd_print_uuid(mdev, Bitmap);
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_METRICS,
+ drbd_print_uuid(mdev, UI_HISTORY_START);
+ drbd_print_uuid(mdev, UI_BITMAP);
);
} else {
- if (mdev->bc->md.uuid[Bitmap])
+ if (mdev->bc->md.uuid[UI_BITMAP])
dev_warn(DEV, "bm UUID already set");
- mdev->bc->md.uuid[Bitmap] = val;
- mdev->bc->md.uuid[Bitmap] &= ~((u64)1);
+ mdev->bc->md.uuid[UI_BITMAP] = val;
+ mdev->bc->md.uuid[UI_BITMAP] &= ~((u64)1);
- MTRACE(TraceTypeUuid, TraceLvlMetrics,
- drbd_print_uuid(mdev, Bitmap);
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_METRICS,
+ drbd_print_uuid(mdev, UI_BITMAP);
);
}
drbd_md_mark_dirty(mdev);
@@ -3414,15 +3414,15 @@ int drbd_bmio_set_n_write(struct drbd_conf *mdev)
{
int rv = -EIO;
- if (inc_local_if_state(mdev, Attaching)) {
- drbd_md_set_flag(mdev, MDF_FullSync);
+ if (inc_local_if_state(mdev, D_ATTACHING)) {
+ drbd_md_set_flag(mdev, MDF_FULL_SYNC);
drbd_md_sync(mdev);
drbd_bm_set_all(mdev);
rv = drbd_bm_write(mdev);
if (!rv) {
- drbd_md_clear_flag(mdev, MDF_FullSync);
+ drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
drbd_md_sync(mdev);
}
@@ -3441,7 +3441,7 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
{
int rv = -EIO;
- if (inc_local_if_state(mdev, Attaching)) {
+ if (inc_local_if_state(mdev, D_ATTACHING)) {
drbd_bm_clear_all(mdev);
rv = drbd_bm_write(mdev);
dec_local(mdev);
@@ -3649,7 +3649,7 @@ STATIC char *_drbd_uuid_str(unsigned int idx)
"UUID_FLAGS",
};
- return (idx < EXT_UUID_SIZE) ? uuid_str[idx] : "*Unknown UUID index*";
+ return (idx < UI_EXTENDED_SIZE) ? uuid_str[idx] : "*Unknown UUID index*";
}
/* Pretty print a UUID value */
@@ -3814,7 +3814,7 @@ do { \
} \
} while (0)
-STATIC char *dump_st(char *p, int len, union drbd_state_t mask, union drbd_state_t val)
+STATIC char *dump_st(char *p, int len, union drbd_state mask, union drbd_state val)
{
char *op = p;
*p = '\0';
@@ -3829,7 +3829,7 @@ STATIC char *dump_st(char *p, int len, union drbd_state_t mask, union drbd_state
#define INFOP(fmt, args...) \
do { \
- if (trace_level >= TraceLvlAll) { \
+ if (trace_level >= TRACE_LVL_ALL) { \
dev_info(DEV, "%s:%d: %s [%d] %s %s " fmt , \
file, line, current->comm, current->pid, \
sockname, recv ? "<<<" : ">>>" , \
@@ -3853,123 +3853,123 @@ STATIC char *_dump_block_id(u64 block_id, char *buff)
void
_dump_packet(struct drbd_conf *mdev, struct socket *sock,
- int recv, union Drbd_Polymorph_Packet *p, char *file, int line)
+ int recv, union p_polymorph *p, char *file, int line)
{
char *sockname = sock == mdev->meta.socket ? "meta" : "data";
- int cmd = (recv == 2) ? p->head.command : be16_to_cpu(p->head.command);
+ int cmd = (recv == 2) ? p->header.command : be16_to_cpu(p->header.command);
char tmp[300];
- union drbd_state_t m, v;
+ union drbd_state m, v;
switch (cmd) {
- case HandShake:
+ case P_HAND_SHAKE:
INFOP("%s (protocol %u-%u)\n", cmdname(cmd),
- be32_to_cpu(p->HandShake.protocol_min),
- be32_to_cpu(p->HandShake.protocol_max));
+ be32_to_cpu(p->handshake.protocol_min),
+ be32_to_cpu(p->handshake.protocol_max));
break;
- case ReportBitMap: /* don't report this */
- case ReportCBitMap: /* don't report this */
+ case P_BITMAP: /* don't report this */
+ case P_COMPRESSED_BITMAP: /* don't report this */
break;
- case Data:
+ case P_DATA:
INFOP("%s (sector %llus, id %s, seq %u, f %x)\n", cmdname(cmd),
- (unsigned long long)be64_to_cpu(p->Data.sector),
- _dump_block_id(p->Data.block_id, tmp),
- be32_to_cpu(p->Data.seq_num),
- be32_to_cpu(p->Data.dp_flags)
+ (unsigned long long)be64_to_cpu(p->data.sector),
+ _dump_block_id(p->data.block_id, tmp),
+ be32_to_cpu(p->data.seq_num),
+ be32_to_cpu(p->data.dp_flags)
);
break;
- case DataReply:
- case RSDataReply:
+ case P_DATA_REPLY:
+ case P_RS_DATA_REPLY:
INFOP("%s (sector %llus, id %s)\n", cmdname(cmd),
- (unsigned long long)be64_to_cpu(p->Data.sector),
- _dump_block_id(p->Data.block_id, tmp)
+ (unsigned long long)be64_to_cpu(p->data.sector),
+ _dump_block_id(p->data.block_id, tmp)
);
break;
- case RecvAck:
- case WriteAck:
- case RSWriteAck:
- case DiscardAck:
- case NegAck:
- case NegRSDReply:
+ case P_RECV_ACK:
+ case P_WRITE_ACK:
+ case P_RS_WRITE_ACK:
+ case P_DISCARD_ACK:
+ case P_NEG_ACK:
+ case P_NEG_RS_DREPLY:
INFOP("%s (sector %llus, size %u, id %s, seq %u)\n",
cmdname(cmd),
- (long long)be64_to_cpu(p->BlockAck.sector),
- be32_to_cpu(p->BlockAck.blksize),
- _dump_block_id(p->BlockAck.block_id, tmp),
- be32_to_cpu(p->BlockAck.seq_num)
+ (long long)be64_to_cpu(p->block_ack.sector),
+ be32_to_cpu(p->block_ack.blksize),
+ _dump_block_id(p->block_ack.block_id, tmp),
+ be32_to_cpu(p->block_ack.seq_num)
);
break;
- case DataRequest:
- case RSDataRequest:
+ case P_DATA_REQUEST:
+ case P_RS_DATA_REQUEST:
INFOP("%s (sector %llus, size %u, id %s)\n", cmdname(cmd),
- (long long)be64_to_cpu(p->BlockRequest.sector),
- be32_to_cpu(p->BlockRequest.blksize),
- _dump_block_id(p->BlockRequest.block_id, tmp)
+ (long long)be64_to_cpu(p->block_req.sector),
+ be32_to_cpu(p->block_req.blksize),
+ _dump_block_id(p->block_req.block_id, tmp)
);
break;
- case Barrier:
- case BarrierAck:
- INFOP("%s (barrier %u)\n", cmdname(cmd), p->Barrier.barrier);
+ case P_BARRIER:
+ case P_BARRIER_ACK:
+ INFOP("%s (barrier %u)\n", cmdname(cmd), p->barrier.barrier);
break;
- case SyncParam:
- case SyncParam89:
+ case P_SYNC_PARAM:
+ case P_SYNC_PARAM89:
INFOP("%s (rate %u, verify-alg \"%.64s\", csums-alg \"%.64s\")\n",
- cmdname(cmd), be32_to_cpu(p->SyncParam89.rate),
- p->SyncParam89.verify_alg, p->SyncParam89.csums_alg);
+ cmdname(cmd), be32_to_cpu(p->rs_param_89.rate),
+ p->rs_param_89.verify_alg, p->rs_param_89.csums_alg);
break;
- case ReportUUIDs:
+ case P_UUIDS:
INFOP("%s Curr:%016llX, Bitmap:%016llX, "
"HisSt:%016llX, HisEnd:%016llX\n",
cmdname(cmd),
- (unsigned long long)be64_to_cpu(p->GenCnt.uuid[Current]),
- (unsigned long long)be64_to_cpu(p->GenCnt.uuid[Bitmap]),
- (unsigned long long)be64_to_cpu(p->GenCnt.uuid[History_start]),
- (unsigned long long)be64_to_cpu(p->GenCnt.uuid[History_end]));
+ (unsigned long long)be64_to_cpu(p->uuids.uuid[UI_CURRENT]),
+ (unsigned long long)be64_to_cpu(p->uuids.uuid[UI_BITMAP]),
+ (unsigned long long)be64_to_cpu(p->uuids.uuid[UI_HISTORY_START]),
+ (unsigned long long)be64_to_cpu(p->uuids.uuid[UI_HISTORY_END]));
break;
- case ReportSizes:
+ case P_SIZES:
INFOP("%s (d %lluMiB, u %lluMiB, c %lldMiB, "
"max bio %x, q order %x)\n",
cmdname(cmd),
- (long long)(be64_to_cpu(p->Sizes.d_size)>>(20-9)),
- (long long)(be64_to_cpu(p->Sizes.u_size)>>(20-9)),
- (long long)(be64_to_cpu(p->Sizes.c_size)>>(20-9)),
- be32_to_cpu(p->Sizes.max_segment_size),
- be32_to_cpu(p->Sizes.queue_order_type));
+ (long long)(be64_to_cpu(p->sizes.d_size)>>(20-9)),
+ (long long)(be64_to_cpu(p->sizes.u_size)>>(20-9)),
+ (long long)(be64_to_cpu(p->sizes.c_size)>>(20-9)),
+ be32_to_cpu(p->sizes.max_segment_size),
+ be32_to_cpu(p->sizes.queue_order_type));
break;
- case ReportState:
- v.i = be32_to_cpu(p->State.state);
+ case P_STATE:
+ v.i = be32_to_cpu(p->state.state);
m.i = 0xffffffff;
dump_st(tmp, sizeof(tmp), m, v);
INFOP("%s (s %x {%s})\n", cmdname(cmd), v.i, tmp);
break;
- case StateChgRequest:
- m.i = be32_to_cpu(p->ReqState.mask);
- v.i = be32_to_cpu(p->ReqState.val);
+ case P_STATE_CHG_REQ:
+ m.i = be32_to_cpu(p->req_state.mask);
+ v.i = be32_to_cpu(p->req_state.val);
dump_st(tmp, sizeof(tmp), m, v);
INFOP("%s (m %x v %x {%s})\n", cmdname(cmd), m.i, v.i, tmp);
break;
- case StateChgReply:
+ case P_STATE_CHG_REPLY:
INFOP("%s (ret %x)\n", cmdname(cmd),
- be32_to_cpu(p->RqSReply.retcode));
+ be32_to_cpu(p->req_state_reply.retcode));
break;
- case Ping:
- case PingAck:
+ case P_PING:
+ case P_PING_ACK:
/*
* Dont trace pings at summary level
*/
- if (trace_level < TraceLvlAll)
+ if (trace_level < TRACE_LVL_ALL)
break;
/* fall through... */
default:
@@ -4015,14 +4015,14 @@ void _dump_bio(const char *pfx, struct drbd_conf *mdev, struct bio *bio, int com
bio->bi_sector << SECTOR_SHIFT,
bio->bi_size);
- if (trace_level >= TraceLvlMetrics &&
+ if (trace_level >= TRACE_LVL_METRICS &&
((biorw == WRITE) ^ complete)) {
printk(KERN_DEBUG " ind page offset length\n");
__bio_for_each_segment(bvec, bio, segno, 0) {
printk(KERN_DEBUG " [%d] %p %8.8x %8.8x\n", segno,
bvec->bv_page, bvec->bv_offset, bvec->bv_len);
- if (trace_level >= TraceLvlAll) {
+ if (trace_level >= TRACE_LVL_ALL) {
char *bvec_buf;
unsigned long flags;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 020c66741bd1..3b46a934c2d6 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -206,11 +206,11 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
char *ex_to_string;
int r;
enum drbd_disk_state nps;
- enum fencing_policy fp;
+ enum drbd_fencing_p fp;
- D_ASSERT(mdev->state.pdsk == DUnknown);
+ D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
- if (inc_local_if_state(mdev, Consistent)) {
+ if (inc_local_if_state(mdev, D_CONSISTENT)) {
fp = mdev->bc->dc.fencing;
dec_local(mdev);
} else {
@@ -218,42 +218,42 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
return mdev->state.pdsk;
}
- if (fp == Stonith)
- _drbd_request_state(mdev, NS(susp, 1), ChgWaitComplete);
+ if (fp == FP_STONITH)
+ _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE);
r = drbd_khelper(mdev, "fence-peer");
switch ((r>>8) & 0xff) {
case 3: /* peer is inconsistent */
ex_to_string = "peer is inconsistent or worse";
- nps = Inconsistent;
+ nps = D_INCONSISTENT;
break;
case 4:
ex_to_string = "peer is outdated";
- nps = Outdated;
+ nps = D_OUTDATED;
break;
case 5: /* peer was down, we will(have) create(d) a new UUID anyways... */
- /* If we would be more strict, we would return DUnknown here. */
+ /* If we would be more strict, we would return D_UNKNOWN here. */
ex_to_string = "peer is unreachable, assumed to be dead";
- nps = Outdated;
+ nps = D_OUTDATED;
break;
case 6: /* Peer is primary, voluntarily outdate myself.
- * This is useful when an unconnected Secondary is asked to
- * become Primary, but findes the other peer being active. */
+ * This is useful when an unconnected R_SECONDARY is asked to
+ * become R_PRIMARY, but findes the other peer being active. */
ex_to_string = "peer is active";
dev_warn(DEV, "Peer is primary, outdating myself.\n");
- nps = DUnknown;
- _drbd_request_state(mdev, NS(disk, Outdated), ChgWaitComplete);
+ nps = D_UNKNOWN;
+ _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
break;
case 7:
- if (fp != Stonith)
+ if (fp != FP_STONITH)
dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
- nps = Outdated;
+ nps = D_OUTDATED;
break;
default:
/* The script is broken ... */
- nps = DUnknown;
+ nps = D_UNKNOWN;
dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
return nps;
}
@@ -270,69 +270,69 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
int r = 0;
int try = 0;
int forced = 0;
- union drbd_state_t mask, val;
+ union drbd_state mask, val;
enum drbd_disk_state nps;
- if (new_role == Primary)
+ if (new_role == R_PRIMARY)
request_ping(mdev); /* Detect a dead peer ASAP */
mutex_lock(&mdev->state_mutex);
- mask.i = 0; mask.role = role_mask;
+ mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
while (try++ < max_tries) {
- r = _drbd_request_state(mdev, mask, val, ChgWaitComplete);
+ r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
/* in case we first succeeded to outdate,
* but now suddenly could establish a connection */
- if (r == SS_CW_FailedByPeer && mask.pdsk != 0) {
+ if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
val.pdsk = 0;
mask.pdsk = 0;
continue;
}
- if (r == SS_NoUpToDateDisk && force &&
- (mdev->state.disk == Inconsistent ||
- mdev->state.disk == Outdated)) {
- mask.disk = disk_mask;
- val.disk = UpToDate;
+ if (r == SS_NO_UP_TO_DATE_DISK && force &&
+ (mdev->state.disk == D_INCONSISTENT ||
+ mdev->state.disk == D_OUTDATED)) {
+ mask.disk = D_MASK;
+ val.disk = D_UP_TO_DATE;
forced = 1;
continue;
}
- if (r == SS_NoUpToDateDisk &&
- mdev->state.disk == Consistent) {
- D_ASSERT(mdev->state.pdsk == DUnknown);
+ if (r == SS_NO_UP_TO_DATE_DISK &&
+ mdev->state.disk == D_CONSISTENT) {
+ D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
nps = drbd_try_outdate_peer(mdev);
- if (nps == Outdated) {
- val.disk = UpToDate;
- mask.disk = disk_mask;
+ if (nps == D_OUTDATED) {
+ val.disk = D_UP_TO_DATE;
+ mask.disk = D_MASK;
}
val.pdsk = nps;
- mask.pdsk = disk_mask;
+ mask.pdsk = D_MASK;
continue;
}
- if (r == SS_NothingToDo)
+ if (r == SS_NOTHING_TO_DO)
goto fail;
- if (r == SS_PrimaryNOP) {
+ if (r == SS_PRIMARY_NOP) {
nps = drbd_try_outdate_peer(mdev);
- if (force && nps > Outdated) {
+ if (force && nps > D_OUTDATED) {
dev_warn(DEV, "Forced into split brain situation!\n");
- nps = Outdated;
+ nps = D_OUTDATED;
}
- mask.pdsk = disk_mask;
+ mask.pdsk = D_MASK;
val.pdsk = nps;
continue;
}
- if (r == SS_TwoPrimaries) {
+ if (r == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
__set_current_state(TASK_INTERRUPTIBLE);
@@ -341,10 +341,10 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
try = max_tries - 1;
continue;
}
- if (r < SS_Success) {
+ if (r < SS_SUCCESS) {
r = _drbd_request_state(mdev, mask, val,
- ChgStateVerbose + ChgWaitComplete);
- if (r < SS_Success)
+ CS_VERBOSE + CS_WAIT_COMPLETE);
+ if (r < SS_SUCCESS)
goto fail;
}
break;
@@ -358,10 +358,10 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
/* Wait until nothing is on the fly :) */
wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
- if (new_role == Secondary) {
+ if (new_role == R_SECONDARY) {
set_disk_ro(mdev->vdisk, TRUE);
if (inc_local(mdev)) {
- mdev->bc->md.uuid[Current] &= ~(u64)1;
+ mdev->bc->md.uuid[UI_CURRENT] &= ~(u64)1;
dec_local(mdev);
}
} else {
@@ -371,22 +371,22 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
}
set_disk_ro(mdev->vdisk, FALSE);
if (inc_local(mdev)) {
- if (((mdev->state.conn < Connected ||
- mdev->state.pdsk <= Failed)
- && mdev->bc->md.uuid[Bitmap] == 0) || forced)
+ if (((mdev->state.conn < C_CONNECTED ||
+ mdev->state.pdsk <= D_FAILED)
+ && mdev->bc->md.uuid[UI_BITMAP] == 0) || forced)
drbd_uuid_new_current(mdev);
- mdev->bc->md.uuid[Current] |= (u64)1;
+ mdev->bc->md.uuid[UI_CURRENT] |= (u64)1;
dec_local(mdev);
}
}
- if ((new_role == Secondary) && inc_local(mdev)) {
+ if ((new_role == R_SECONDARY) && inc_local(mdev)) {
drbd_al_to_on_disk_bm(mdev);
dec_local(mdev);
}
- if (mdev->state.conn >= WFReportParams) {
+ if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
/* if this was forced, we should consider sync */
if (forced)
drbd_send_uuids(mdev);
@@ -409,12 +409,12 @@ STATIC int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
memset(&primary_args, 0, sizeof(struct primary));
if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
- reply->ret_code = UnknownMandatoryTag;
+ reply->ret_code = ERR_MANDATORY_TAG;
return 0;
}
reply->ret_code =
- drbd_set_role(mdev, Primary, primary_args.overwrite_peer);
+ drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer);
return 0;
}
@@ -422,7 +422,7 @@ STATIC int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
STATIC int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- reply->ret_code = drbd_set_role(mdev, Secondary, 0);
+ reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
return 0;
}
@@ -486,16 +486,16 @@ char *ppsize(char *buf, unsigned long long size)
}
/* there is still a theoretical deadlock when called from receiver
- * on an Inconsistent Primary:
+ * on an D_INCONSISTENT R_PRIMARY:
* remote READ does inc_ap_bio, receiver would need to receive answer
* packet from remote to dec_ap_bio again.
* receiver receive_sizes(), comes here,
* waits for ap_bio_cnt == 0. -> deadlock.
* but this cannot happen, actually, because:
- * Primary Inconsistent, and peer's disk is unreachable
+ * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
* (not connected, * or bad/no disk on peer):
* see drbd_fail_request_early, ap_bio_cnt is zero.
- * Primary Inconsistent, and SyncTarget:
+ * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
* peer may not initiate a resize.
*/
void drbd_suspend_io(struct drbd_conf *mdev)
@@ -520,7 +520,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* indicate success.
* You should call drbd_md_sync() after calling this function.
*/
-enum determin_dev_size_enum drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local)
+enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size;
@@ -528,7 +528,7 @@ enum determin_dev_size_enum drbd_determin_dev_size(struct drbd_conf *mdev) __mus
char ppb[10];
int md_moved, la_size_changed;
- enum determin_dev_size_enum rv = unchanged;
+ enum determine_dev_size rv = unchanged;
/* race:
* application request passes inc_ap_bio,
@@ -717,7 +717,7 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
max_seg_s = min(b->max_sectors * b->hardsect_size, max_seg_s);
- MTRACE(TraceTypeRq, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_RQ, TRACE_LVL_SUMMARY,
DUMPI(b->max_sectors);
DUMPI(b->max_phys_segments);
DUMPI(b->max_hw_segments);
@@ -739,15 +739,7 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
q->seg_boundary_mask = PAGE_SIZE-1;
blk_queue_stack_limits(q, b);
- /* KERNEL BUG. in ll_rw_blk.c ??
- * t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
- * should be
- * t->max_segment_size = min_not_zero(...,...)
- * workaround here: */
- if (q->max_segment_size == 0)
- q->max_segment_size = max_seg_s;
-
- MTRACE(TraceTypeRq, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_RQ, TRACE_LVL_SUMMARY,
DUMPI(q->max_sectors);
DUMPI(q->max_phys_segments);
DUMPI(q->max_hw_segments);
@@ -774,21 +766,21 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- enum ret_codes retcode;
- enum determin_dev_size_enum dd;
+ enum drbd_ret_codes retcode;
+ enum determine_dev_size dd;
sector_t max_possible_sectors;
sector_t min_md_device_sectors;
struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
struct inode *inode, *inode2;
struct lru_cache *resync_lru = NULL;
- union drbd_state_t ns, os;
+ union drbd_state ns, os;
int rv, ntries = 0;
int cp_discovered = 0;
int hardsect;
/* if you want to reconfigure, please tear down first */
- if (mdev->state.disk > Diskless) {
- retcode = HaveDiskConfig;
+ if (mdev->state.disk > D_DISKLESS) {
+ retcode = ERR_DISK_CONFIGURED;
goto fail;
}
@@ -802,7 +794,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
break;
if (ntries++ >= 5) {
dev_warn(DEV, "drbd_nl_disk_conf: mdev->bc not NULL.\n");
- retcode = HaveDiskConfig;
+ retcode = ERR_DISK_CONFIGURED;
goto fail;
}
__set_current_state(TASK_INTERRUPTIBLE);
@@ -811,7 +803,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc = kmalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
if (!nbc) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
@@ -829,7 +821,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
- retcode = UnknownMandatoryTag;
+ retcode = ERR_MANDATORY_TAG;
goto fail;
}
@@ -837,7 +829,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc->md_file = NULL;
if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
- retcode = LDMDInvalid;
+ retcode = ERR_MD_IDX_INVALID;
goto fail;
}
@@ -846,14 +838,14 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
PTR_ERR(nbc->lo_file));
nbc->lo_file = NULL;
- retcode = LDNameInvalid;
+ retcode = ERR_OPEN_DISK;
goto fail;
}
inode = nbc->lo_file->f_dentry->d_inode;
if (!S_ISBLK(inode->i_mode)) {
- retcode = LDNoBlockDev;
+ retcode = ERR_DISK_NOT_BDEV;
goto fail;
}
@@ -862,14 +854,14 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
PTR_ERR(nbc->md_file));
nbc->md_file = NULL;
- retcode = MDNameInvalid;
+ retcode = ERR_OPEN_MD_DISK;
goto fail;
}
inode2 = nbc->md_file->f_dentry->d_inode;
if (!S_ISBLK(inode2->i_mode)) {
- retcode = MDNoBlockDev;
+ retcode = ERR_MD_NOT_BDEV;
goto fail;
}
@@ -880,19 +872,19 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc->backing_bdev->bd_holder,
nbc->backing_bdev->bd_contains->bd_holder,
nbc->backing_bdev->bd_holders);
- retcode = LDMounted;
+ retcode = ERR_BDCLAIM_DISK;
goto fail;
}
resync_lru = lc_alloc("resync", 61, sizeof(struct bm_extent), mdev);
if (!resync_lru) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto release_bdev_fail;
}
if (!mdev->bitmap) {
if (drbd_bm_init(mdev)) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto release_bdev_fail;
}
}
@@ -902,14 +894,14 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
(nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT) ?
(void *)mdev : (void *) drbd_m_holder)) {
- retcode = MDMounted;
+ retcode = ERR_BDCLAIM_MD_DISK;
goto release_bdev_fail;
}
if ((nbc->backing_bdev == nbc->md_bdev) !=
(nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
- retcode = LDMDInvalid;
+ retcode = ERR_MD_IDX_INVALID;
goto release_bdev2_fail;
}
@@ -920,7 +912,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) nbc->dc.disk_size);
- retcode = LDDeviceTooSmall;
+ retcode = ERR_DISK_TO_SMALL;
goto release_bdev2_fail;
}
@@ -939,7 +931,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
(unsigned long long) max_possible_sectors);
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
- retcode = MDDeviceTooSmall;
+ retcode = ERR_MD_DISK_TO_SMALL;
dev_warn(DEV, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
@@ -947,10 +939,10 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
/* Make sure the new disk is big enough
- * (we may currently be Primary with no local disk...) */
+ * (we may currently be R_PRIMARY with no local disk...) */
if (drbd_get_max_capacity(nbc) <
drbd_get_capacity(mdev->this_bdev)) {
- retcode = LDDeviceTooSmall;
+ retcode = ERR_DISK_TO_SMALL;
goto release_bdev2_fail;
}
@@ -958,46 +950,46 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt));
- retcode = _drbd_request_state(mdev, NS(disk, Attaching), ChgStateVerbose);
+ retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
drbd_resume_io(mdev);
- if (retcode < SS_Success)
+ if (retcode < SS_SUCCESS)
goto release_bdev2_fail;
- if (!inc_local_if_state(mdev, Attaching))
+ if (!inc_local_if_state(mdev, D_ATTACHING))
goto force_diskless;
drbd_thread_start(&mdev->worker);
drbd_md_set_sector_offsets(mdev, nbc);
retcode = drbd_md_read(mdev, nbc);
- if (retcode != NoError)
+ if (retcode != NO_ERROR)
goto force_diskless_dec;
- if (mdev->state.conn < Connected &&
- mdev->state.role == Primary &&
- (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[Current] & ~((u64)1))) {
+ if (mdev->state.conn < C_CONNECTED &&
+ mdev->state.role == R_PRIMARY &&
+ (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
(unsigned long long)mdev->ed_uuid);
- retcode = DataOfWrongCurrent;
+ retcode = ERR_DATA_NOT_CURRENT;
goto force_diskless_dec;
}
/* Since we are diskless, fix the AL first... */
if (drbd_check_al_size(mdev)) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto force_diskless_dec;
}
/* Prevent shrinking of consistent devices ! */
- if (drbd_md_test_flag(nbc, MDF_Consistent) &&
+ if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
- retcode = LDDeviceTooSmall;
+ retcode = ERR_DISK_TO_SMALL;
goto force_diskless_dec;
}
if (!drbd_al_read_log(mdev, nbc)) {
- retcode = MDIOError;
+ retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
@@ -1040,12 +1032,12 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
mdev->write_ordering = WO_bio_barrier;
drbd_bump_write_ordering(mdev, WO_bio_barrier);
- if (drbd_md_test_flag(mdev->bc, MDF_CrashedPrimary))
+ if (drbd_md_test_flag(mdev->bc, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags);
else
clear_bit(CRASHED_PRIMARY, &mdev->flags);
- if (drbd_md_test_flag(mdev->bc, MDF_PrimaryInd)) {
+ if (drbd_md_test_flag(mdev->bc, MDF_PRIMARY_IND)) {
set_bit(CRASHED_PRIMARY, &mdev->flags);
cp_discovered = 1;
}
@@ -1057,13 +1049,13 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE);
- /* If I am currently not Primary,
+ /* If I am currently not R_PRIMARY,
* but meta data primary indicator is set,
* I just now recover from a hard crash,
- * and have been Primary before that crash.
+ * and have been R_PRIMARY before that crash.
*
* Now, if I had no connection before that crash
- * (have been degraded Primary), chances are that
+ * (have been degraded R_PRIMARY), chances are that
* I won't find my peer now either.
*
* In that case, and _only_ in that case,
@@ -1072,28 +1064,28 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* degraded but active "cluster" after a certain timeout.
*/
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- if (mdev->state.role != Primary &&
- drbd_md_test_flag(mdev->bc, MDF_PrimaryInd) &&
- !drbd_md_test_flag(mdev->bc, MDF_ConnectedInd))
+ if (mdev->state.role != R_PRIMARY &&
+ drbd_md_test_flag(mdev->bc, MDF_PRIMARY_IND) &&
+ !drbd_md_test_flag(mdev->bc, MDF_CONNECTED_IND))
set_bit(USE_DEGR_WFC_T, &mdev->flags);
dd = drbd_determin_dev_size(mdev);
if (dd == dev_size_error) {
- retcode = VMallocFailed;
+ retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
} else if (dd == grew)
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
- if (drbd_md_test_flag(mdev->bc, MDF_FullSync)) {
+ if (drbd_md_test_flag(mdev->bc, MDF_FULL_SYNC)) {
dev_info(DEV, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
- retcode = MDIOError;
+ retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
} else {
if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
- retcode = MDIOError;
+ retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
}
@@ -1106,51 +1098,51 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
spin_lock_irq(&mdev->req_lock);
os = mdev->state;
ns.i = os.i;
- /* If MDF_Consistent is not set go into inconsistent state,
+ /* If MDF_CONSISTENT is not set go into inconsistent state,
otherwise investige MDF_WasUpToDate...
- If MDF_WasUpToDate is not set go into Outdated disk state,
- otherwise into Consistent state.
+ If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
+ otherwise into D_CONSISTENT state.
*/
- if (drbd_md_test_flag(mdev->bc, MDF_Consistent)) {
- if (drbd_md_test_flag(mdev->bc, MDF_WasUpToDate))
- ns.disk = Consistent;
+ if (drbd_md_test_flag(mdev->bc, MDF_CONSISTENT)) {
+ if (drbd_md_test_flag(mdev->bc, MDF_WAS_UP_TO_DATE))
+ ns.disk = D_CONSISTENT;
else
- ns.disk = Outdated;
+ ns.disk = D_OUTDATED;
} else {
- ns.disk = Inconsistent;
+ ns.disk = D_INCONSISTENT;
}
- if (drbd_md_test_flag(mdev->bc, MDF_PeerOutDated))
- ns.pdsk = Outdated;
+ if (drbd_md_test_flag(mdev->bc, MDF_PEER_OUT_DATED))
+ ns.pdsk = D_OUTDATED;
- if ( ns.disk == Consistent &&
- (ns.pdsk == Outdated || mdev->bc->dc.fencing == DontCare))
- ns.disk = UpToDate;
+ if ( ns.disk == D_CONSISTENT &&
+ (ns.pdsk == D_OUTDATED || mdev->bc->dc.fencing == FP_DONT_CARE))
+ ns.disk = D_UP_TO_DATE;
- /* All tests on MDF_PrimaryInd, MDF_ConnectedInd,
- MDF_Consistent and MDF_WasUpToDate must happen before
+ /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
+ MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
this point, because drbd_request_state() modifies these
flags. */
- /* In case we are Connected postpone any desicion on the new disk
+ /* In case we are C_CONNECTED postpone any desicion on the new disk
state after the negotiation phase. */
- if (mdev->state.conn == Connected) {
+ if (mdev->state.conn == C_CONNECTED) {
mdev->new_state_tmp.i = ns.i;
ns.i = os.i;
- ns.disk = Negotiating;
+ ns.disk = D_NEGOTIATING;
}
- rv = _drbd_set_state(mdev, ns, ChgStateVerbose, NULL);
+ rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
ns = mdev->state;
spin_unlock_irq(&mdev->req_lock);
- if (rv < SS_Success)
+ if (rv < SS_SUCCESS)
goto force_diskless_dec;
- if (mdev->state.role == Primary)
- mdev->bc->md.uuid[Current] |= (u64)1;
+ if (mdev->state.role == R_PRIMARY)
+ mdev->bc->md.uuid[UI_CURRENT] |= (u64)1;
else
- mdev->bc->md.uuid[Current] &= ~(u64)1;
+ mdev->bc->md.uuid[UI_CURRENT] &= ~(u64)1;
drbd_md_mark_dirty(mdev);
drbd_md_sync(mdev);
@@ -1163,7 +1155,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
force_diskless_dec:
dec_local(mdev);
force_diskless:
- drbd_force_state(mdev, NS(disk, Diskless));
+ drbd_force_state(mdev, NS(disk, D_DISKLESS));
drbd_md_sync(mdev);
release_bdev2_fail:
if (nbc)
@@ -1190,7 +1182,7 @@ STATIC int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
fsync_bdev(mdev->this_bdev);
- reply->ret_code = drbd_request_state(mdev, NS(disk, Diskless));
+ reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/20); /* 50ms; Time for worker to finally terminate */
@@ -1202,7 +1194,7 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
int i, ns;
- enum ret_codes retcode;
+ enum drbd_ret_codes retcode;
struct net_conf *new_conf = NULL;
struct crypto_hash *tfm = NULL;
struct crypto_hash *integrity_w_tfm = NULL;
@@ -1214,15 +1206,16 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
void *int_dig_out = NULL;
void *int_dig_in = NULL;
void *int_dig_vv = NULL;
+ struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
- if (mdev->state.conn > StandAlone) {
- retcode = HaveNetConfig;
+ if (mdev->state.conn > C_STANDALONE) {
+ retcode = ERR_NET_CONFIGURED;
goto fail;
}
new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_conf) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
@@ -1250,48 +1243,45 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
- retcode = UnknownMandatoryTag;
+ retcode = ERR_MANDATORY_TAG;
goto fail;
}
if (new_conf->two_primaries
&& (new_conf->wire_protocol != DRBD_PROT_C)) {
- retcode = ProtocolCRequired;
+ retcode = ERR_NOT_PROTO_C;
goto fail;
};
- if (mdev->state.role == Primary && new_conf->want_lose) {
- retcode = DiscardNotAllowed;
+ if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
+ retcode = ERR_DISCARD;
goto fail;
}
-#define M_ADDR(A) (((struct sockaddr_in *)&A->my_addr)->sin_addr.s_addr)
-#define M_PORT(A) (((struct sockaddr_in *)&A->my_addr)->sin_port)
-#define O_ADDR(A) (((struct sockaddr_in *)&A->peer_addr)->sin_addr.s_addr)
-#define O_PORT(A) (((struct sockaddr_in *)&A->peer_addr)->sin_port)
- retcode = NoError;
+ retcode = NO_ERROR;
+
+ new_my_addr = (struct sockaddr *)&new_conf->my_addr;
+ new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
for (i = 0; i < minor_count; i++) {
odev = minor_to_mdev(i);
if (!odev || odev == mdev)
continue;
if (inc_net(odev)) {
- if (M_ADDR(new_conf) == M_ADDR(odev->net_conf) &&
- M_PORT(new_conf) == M_PORT(odev->net_conf))
- retcode = LAAlreadyInUse;
+ taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
+ if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
+ !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
+ retcode = ERR_LOCAL_ADDR;
- if (O_ADDR(new_conf) == O_ADDR(odev->net_conf) &&
- O_PORT(new_conf) == O_PORT(odev->net_conf))
- retcode = OAAlreadyInUse;
+ taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
+ if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
+ !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
+ retcode = ERR_PEER_ADDR;
dec_net(odev);
- if (retcode != NoError)
+ if (retcode != NO_ERROR)
goto fail;
}
}
-#undef M_ADDR
-#undef M_PORT
-#undef O_ADDR
-#undef O_PORT
if (new_conf->cram_hmac_alg[0] != 0) {
snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
@@ -1299,13 +1289,13 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
tfm = NULL;
- retcode = CRAMAlgNotAvail;
+ retcode = ERR_AUTH_ALG;
goto fail;
}
if (crypto_tfm_alg_type(crypto_hash_tfm(tfm))
!= CRYPTO_ALG_TYPE_HASH) {
- retcode = CRAMAlgNotDigest;
+ retcode = ERR_AUTH_ALG_ND;
goto fail;
}
}
@@ -1314,19 +1304,19 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(integrity_w_tfm)) {
integrity_w_tfm = NULL;
- retcode=IntegrityAlgNotAvail;
+ retcode=ERR_INTEGRITY_ALG;
goto fail;
}
if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
- retcode=IntegrityAlgNotDigest;
+ retcode=ERR_INTEGRITY_ALG_ND;
goto fail;
}
integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(integrity_r_tfm)) {
integrity_r_tfm = NULL;
- retcode=IntegrityAlgNotAvail;
+ retcode=ERR_INTEGRITY_ALG;
goto fail;
}
}
@@ -1335,7 +1325,7 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
if (mdev->tl_hash_s != ns) {
new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
if (!new_tl_hash) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
}
@@ -1344,59 +1334,35 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
if (!new_ee_hash) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
}
((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
-#if 0
- /* for the connection loss logic in drbd_recv
- * I _need_ the resulting timeo in jiffies to be
- * non-zero and different
- *
- * XXX maybe rather store the value scaled to jiffies?
- * Note: MAX_SCHEDULE_TIMEOUT/HZ*HZ != MAX_SCHEDULE_TIMEOUT
- * and HZ > 10; which is unlikely to change...
- * Thus, if interrupted by a signal,
- * sock_{send,recv}msg returns -EINTR,
- * if the timeout expires, -EAGAIN.
- */
- /* unlikely: someone disabled the timeouts ...
- * just put some huge values in there. */
- if (!new_conf->ping_int)
- new_conf->ping_int = MAX_SCHEDULE_TIMEOUT/HZ;
- if (!new_conf->timeout)
- new_conf->timeout = MAX_SCHEDULE_TIMEOUT/HZ*10;
- if (new_conf->ping_int*10 < new_conf->timeout)
- new_conf->timeout = new_conf->ping_int*10/6;
- if (new_conf->ping_int*10 == new_conf->timeout)
- new_conf->ping_int = new_conf->ping_int+1;
-#endif
-
if (integrity_w_tfm) {
i = crypto_hash_digestsize(integrity_w_tfm);
int_dig_out = kmalloc(i, GFP_KERNEL);
if (!int_dig_out) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
int_dig_in = kmalloc(i, GFP_KERNEL);
if (!int_dig_in) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
int_dig_vv = kmalloc(i, GFP_KERNEL);
if (!int_dig_vv) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
}
if (!mdev->bitmap) {
if(drbd_bm_init(mdev)) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
}
@@ -1435,8 +1401,8 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
mdev->int_dig_in=int_dig_in;
mdev->int_dig_vv=int_dig_vv;
- retcode = _drbd_request_state(mdev, NS(conn, Unconnected), ChgStateVerbose);
- if (retcode >= SS_Success)
+ retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+ if (retcode >= SS_SUCCESS)
drbd_thread_start(&mdev->worker);
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
@@ -1463,40 +1429,40 @@ STATIC int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
{
int retcode;
- retcode = _drbd_request_state(mdev, NS(conn, Disconnecting), ChgOrdered);
+ retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
- if (retcode == SS_NothingToDo)
+ if (retcode == SS_NOTHING_TO_DO)
goto done;
- else if (retcode == SS_AlreadyStandAlone)
+ else if (retcode == SS_ALREADY_STANDALONE)
goto done;
- else if (retcode == SS_PrimaryNOP) {
+ else if (retcode == SS_PRIMARY_NOP) {
/* Our statche checking code wants to see the peer outdated. */
- retcode = drbd_request_state(mdev, NS2(conn, Disconnecting,
- pdsk, Outdated));
- } else if (retcode == SS_CW_FailedByPeer) {
+ retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
+ pdsk, D_OUTDATED));
+ } else if (retcode == SS_CW_FAILED_BY_PEER) {
/* The peer probabely wants to see us outdated. */
- retcode = _drbd_request_state(mdev, NS2(conn, Disconnecting,
- disk, Outdated),
- ChgOrdered);
- if (retcode == SS_IsDiskLess || retcode == SS_LowerThanOutdated) {
- drbd_force_state(mdev, NS(conn, Disconnecting));
- retcode = SS_Success;
+ retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
+ disk, D_OUTDATED),
+ CS_ORDERED);
+ if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ retcode = SS_SUCCESS;
}
}
- if (retcode < SS_Success)
+ if (retcode < SS_SUCCESS)
goto fail;
if (wait_event_interruptible(mdev->state_wait,
- mdev->state.conn != Disconnecting)) {
- /* Do not test for mdev->state.conn == StandAlone, since
+ mdev->state.conn != C_DISCONNECTING)) {
+ /* Do not test for mdev->state.conn == C_STANDALONE, since
someone else might connect us in the mean time! */
- retcode = GotSignal;
+ retcode = ERR_INTR;
goto fail;
}
done:
- retcode = NoError;
+ retcode = NO_ERROR;
fail:
drbd_md_sync(mdev);
reply->ret_code = retcode;
@@ -1509,43 +1475,43 @@ void resync_after_online_grow(struct drbd_conf *mdev)
dev_info(DEV, "Resync of new storage after online grow\n");
if (mdev->state.role != mdev->state.peer)
- iass = (mdev->state.role == Primary);
+ iass = (mdev->state.role == R_PRIMARY);
else
iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
if (iass)
- drbd_start_resync(mdev, SyncSource);
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
else
- _drbd_request_state(mdev, NS(conn, WFSyncUUID), ChgStateVerbose + ChgSerialize);
+ _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
}
STATIC int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
struct resize rs;
- int retcode = NoError;
+ int retcode = NO_ERROR;
int ldsc = 0; /* local disk size changed */
- enum determin_dev_size_enum dd;
+ enum determine_dev_size dd;
memset(&rs, 0, sizeof(struct resize));
if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
- retcode = UnknownMandatoryTag;
+ retcode = ERR_MANDATORY_TAG;
goto fail;
}
- if (mdev->state.conn > Connected) {
- retcode = NoResizeDuringResync;
+ if (mdev->state.conn > C_CONNECTED) {
+ retcode = ERR_RESIZE_RESYNC;
goto fail;
}
- if (mdev->state.role == Secondary &&
- mdev->state.peer == Secondary) {
- retcode = APrimaryNodeNeeded;
+ if (mdev->state.role == R_SECONDARY &&
+ mdev->state.peer == R_SECONDARY) {
+ retcode = ERR_NO_PRIMARY;
goto fail;
}
if (!inc_local(mdev)) {
- retcode = HaveNoDiskConfig;
+ retcode = ERR_NO_DISK;
goto fail;
}
@@ -1559,11 +1525,11 @@ STATIC int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
drbd_md_sync(mdev);
dec_local(mdev);
if (dd == dev_size_error) {
- retcode = VMallocFailed;
+ retcode = ERR_NOMEM_BITMAP;
goto fail;
}
- if (mdev->state.conn == Connected && (dd != unchanged || ldsc)) {
+ if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) {
drbd_send_uuids(mdev);
drbd_send_sizes(mdev);
if (dd == grew)
@@ -1578,7 +1544,7 @@ STATIC int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- int retcode = NoError;
+ int retcode = NO_ERROR;
int err;
int ovr; /* online verify running */
int rsr; /* re-sync running */
@@ -1598,19 +1564,19 @@ STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
}
if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
- retcode = UnknownMandatoryTag;
+ retcode = ERR_MANDATORY_TAG;
goto fail;
}
if (sc.after != -1) {
if (sc.after < -1 || minor_to_mdev(sc.after) == NULL) {
- retcode = SyncAfterInvalid;
+ retcode = ERR_SYNC_AFTER;
goto fail;
}
odev = minor_to_mdev(sc.after); /* check against loops in */
while (1) {
if (odev == mdev) {
- retcode = SyncAfterCycle;
+ retcode = ERR_SYNC_AFTER_CYCLE;
goto fail;
}
if (odev->sync_conf.after == -1)
@@ -1620,13 +1586,13 @@ STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
}
/* re-sync running */
- rsr = ( mdev->state.conn == SyncSource ||
- mdev->state.conn == SyncTarget ||
- mdev->state.conn == PausedSyncS ||
- mdev->state.conn == PausedSyncT );
+ rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
+ mdev->state.conn == C_SYNC_TARGET ||
+ mdev->state.conn == C_PAUSED_SYNC_S ||
+ mdev->state.conn == C_PAUSED_SYNC_T );
if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
- retcode = CSUMSResyncRunning;
+ retcode = ERR_CSUMS_RESYNC_RUNNING;
goto fail;
}
@@ -1634,22 +1600,22 @@ STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(csums_tfm)) {
csums_tfm = NULL;
- retcode = CSUMSAlgNotAvail;
+ retcode = ERR_CSUMS_ALG;
goto fail;
}
if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
- retcode = CSUMSAlgNotDigest;
+ retcode = ERR_CSUMS_ALG_ND;
goto fail;
}
}
/* online verify running */
- ovr = (mdev->state.conn == VerifyS || mdev->state.conn == VerifyT);
+ ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
if (ovr) {
if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
- retcode = VERIFYIsRunning;
+ retcode = ERR_VERIFY_RUNNING;
goto fail;
}
}
@@ -1658,12 +1624,12 @@ STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(verify_tfm)) {
verify_tfm = NULL;
- retcode = VERIFYAlgNotAvail;
+ retcode = ERR_VERIFY_ALG;
goto fail;
}
if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
- retcode = VERIFYAlgNotDigest;
+ retcode = ERR_VERIFY_ALG_ND;
goto fail;
}
}
@@ -1672,7 +1638,7 @@ STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
err = __bitmap_parse(sc.cpu_mask, 32, 0, (unsigned long *)&n_cpu_mask, NR_CPUS);
if (err) {
dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
- retcode = CPUMaskParseFailed;
+ retcode = ERR_CPU_MASK_PARSE;
goto fail;
}
}
@@ -1714,12 +1680,12 @@ STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
drbd_md_sync(mdev);
if (err) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
}
- if (mdev->state.conn >= Connected)
+ if (mdev->state.conn >= C_CONNECTED)
drbd_send_sync_param(mdev, &sc);
drbd_alter_sa(mdev, sc.after);
@@ -1745,21 +1711,21 @@ STATIC int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
{
int retcode;
- retcode = _drbd_request_state(mdev, NS(conn, StartingSyncT), ChgOrdered);
+ retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
- if (retcode < SS_Success && retcode != SS_NeedConnection)
- retcode = drbd_request_state(mdev, NS(conn, StartingSyncT));
+ if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
+ retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
- while (retcode == SS_NeedConnection) {
+ while (retcode == SS_NEED_CONNECTION) {
spin_lock_irq(&mdev->req_lock);
- if (mdev->state.conn < Connected)
- retcode = _drbd_set_state(_NS(mdev, disk, Inconsistent), ChgStateVerbose, NULL);
+ if (mdev->state.conn < C_CONNECTED)
+ retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
spin_unlock_irq(&mdev->req_lock);
- if (retcode != SS_NeedConnection)
+ if (retcode != SS_NEED_CONNECTION)
break;
- retcode = drbd_request_state(mdev, NS(conn, StartingSyncT));
+ retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
}
reply->ret_code = retcode;
@@ -1770,7 +1736,7 @@ STATIC int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
struct drbd_nl_cfg_reply *reply)
{
- reply->ret_code = drbd_request_state(mdev, NS(conn, StartingSyncS));
+ reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
return 0;
}
@@ -1778,10 +1744,10 @@ STATIC int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
STATIC int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- int retcode = NoError;
+ int retcode = NO_ERROR;
- if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NothingToDo)
- retcode = PauseFlagAlreadySet;
+ if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+ retcode = ERR_PAUSE_IS_SET;
reply->ret_code = retcode;
return 0;
@@ -1790,10 +1756,10 @@ STATIC int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
STATIC int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- int retcode = NoError;
+ int retcode = NO_ERROR;
- if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NothingToDo)
- retcode = PauseFlagAlreadyClear;
+ if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
+ retcode = ERR_PAUSE_IS_CLEAR;
reply->ret_code = retcode;
return 0;
@@ -1817,7 +1783,7 @@ STATIC int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
STATIC int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- reply->ret_code = drbd_request_state(mdev, NS(disk, Outdated));
+ reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
return 0;
}
@@ -1848,14 +1814,14 @@ STATIC int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
struct drbd_nl_cfg_reply *reply)
{
unsigned short *tl = reply->tag_list;
- union drbd_state_t s = mdev->state;
+ union drbd_state s = mdev->state;
unsigned long rs_left;
unsigned int res;
tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
/* no local ref, no bitmap, no syncer progress. */
- if (s.conn >= SyncSource && s.conn <= PausedSyncT) {
+ if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
if (inc_local(mdev)) {
drbd_get_syncer_progress(mdev, &rs_left, &res);
*tl++ = T_sync_progress;
@@ -1880,9 +1846,9 @@ STATIC int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (inc_local(mdev)) {
/* This is a hand crafted add tag ;) */
*tl++ = T_uuids;
- *tl++ = UUID_SIZE*sizeof(u64);
- memcpy(tl, mdev->bc->md.uuid, UUID_SIZE*sizeof(u64));
- tl = (unsigned short *)((char *)tl + UUID_SIZE*sizeof(u64));
+ *tl++ = UI_SIZE*sizeof(u64);
+ memcpy(tl, mdev->bc->md.uuid, UI_SIZE*sizeof(u64));
+ tl = (unsigned short *)((char *)tl + UI_SIZE*sizeof(u64));
*tl++ = T_uuids_flags;
*tl++ = sizeof(int);
memcpy(tl, &mdev->bc->md.flags, sizeof(int));
@@ -1906,8 +1872,8 @@ STATIC int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_r
tl = reply->tag_list;
- rv = mdev->state.pdsk == Outdated ? UT_PeerOutdated :
- test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_Degraded : UT_Default;
+ rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+ test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
/* This is a hand crafted add tag ;) */
*tl++ = T_use_degraded;
@@ -1922,7 +1888,7 @@ STATIC int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_r
STATIC int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- reply->ret_code = drbd_request_state(mdev,NS(conn,VerifyS));
+ reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
return 0;
}
@@ -1931,7 +1897,7 @@ STATIC int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
STATIC int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- int retcode = NoError;
+ int retcode = NO_ERROR;
int skip_initial_sync = 0;
int err;
@@ -1939,41 +1905,41 @@ STATIC int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
memset(&args, 0, sizeof(struct new_c_uuid));
if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
- reply->ret_code = UnknownMandatoryTag;
+ reply->ret_code = ERR_MANDATORY_TAG;
return 0;
}
mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
if (!inc_local(mdev)) {
- retcode = HaveNoDiskConfig;
+ retcode = ERR_NO_DISK;
goto out;
}
/* this is "skip initial sync", assume to be clean */
- if (mdev->state.conn == Connected && mdev->agreed_pro_version >= 90 &&
- mdev->bc->md.uuid[Current] == UUID_JUST_CREATED && args.clear_bm) {
+ if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
+ mdev->bc->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
dev_info(DEV, "Preparing to skip initial sync\n");
skip_initial_sync = 1;
- } else if (mdev->state.conn >= Connected) {
- retcode = MayNotBeConnected;
+ } else if (mdev->state.conn >= C_CONNECTED) {
+ retcode = ERR_CONNECTED;
goto out_dec;
}
- drbd_uuid_set(mdev, Bitmap, 0); /* Rotate Bitmap to History 1, etc... */
- drbd_uuid_new_current(mdev); /* New current, previous to Bitmap */
+ drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
+ drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
if (args.clear_bm) {
err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
if (err) {
dev_err(DEV, "Writing bitmap failed with %d\n",err);
- retcode = MDIOError;
+ retcode = ERR_IO_MD_DISK;
}
if (skip_initial_sync) {
drbd_send_uuids_skip_initial_sync(mdev);
- _drbd_uuid_set(mdev, Bitmap, 0);
- _drbd_set_state(_NS2(mdev, disk, UpToDate, pdsk, UpToDate),
- ChgStateVerbose, NULL);
+ _drbd_uuid_set(mdev, UI_BITMAP, 0);
+ _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
+ CS_VERBOSE, NULL);
}
}
@@ -2079,14 +2045,14 @@ STATIC void drbd_connector_callback(void *data)
mdev = ensure_mdev(nlp);
if (!mdev) {
- retcode = MinorNotKnown;
+ retcode = ERR_MINOR_INVALID;
goto fail;
}
- TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_packet(data););
+ TRACE(TRACE_TYPE_NL, TRACE_LVL_SUMMARY, nl_trace_packet(data););
if (nlp->packet_type >= P_nl_after_last_packet) {
- retcode = UnknownNetLinkPacket;
+ retcode = ERR_PACKET_NR;
goto fail;
}
@@ -2094,7 +2060,7 @@ STATIC void drbd_connector_callback(void *data)
/* This may happen if packet number is 0: */
if (cm->function == NULL) {
- retcode = UnknownNetLinkPacket;
+ retcode = ERR_PACKET_NR;
goto fail;
}
@@ -2102,7 +2068,7 @@ STATIC void drbd_connector_callback(void *data)
cn_reply = kmalloc(reply_size, GFP_KERNEL);
if (!cn_reply) {
- retcode = KMallocFailed;
+ retcode = ERR_NOMEM;
goto fail;
}
reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
@@ -2110,7 +2076,7 @@ STATIC void drbd_connector_callback(void *data)
reply->packet_type =
cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
reply->minor = nlp->drbd_minor;
- reply->ret_code = NoError; /* Might by modified by cm->function. */
+ reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
/* reply->tag_list; might be modified by cm->fucntion. */
rr = cm->function(mdev, nlp, reply);
@@ -2121,7 +2087,7 @@ STATIC void drbd_connector_callback(void *data)
cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
cn_reply->flags = 0;
- TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+ TRACE(TRACE_TYPE_NL, TRACE_LVL_SUMMARY, nl_trace_reply(cn_reply););
rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
if (rr && rr != -ESRCH)
@@ -2192,7 +2158,7 @@ tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
return tl;
}
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state_t state)
+void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
{
char buffer[sizeof(struct cn_msg)+
sizeof(struct drbd_nl_cfg_reply)+
@@ -2219,9 +2185,9 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state_t state)
reply->packet_type = P_get_state;
reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NoError;
+ reply->ret_code = NO_ERROR;
- TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+ TRACE(TRACE_TYPE_NL, TRACE_LVL_SUMMARY, nl_trace_reply(cn_reply););
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
}
@@ -2258,9 +2224,9 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
reply->packet_type = P_call_helper;
reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NoError;
+ reply->ret_code = NO_ERROR;
- TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+ TRACE(TRACE_TYPE_NL, TRACE_LVL_SUMMARY, nl_trace_reply(cn_reply););
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
}
@@ -2268,7 +2234,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
void drbd_bcast_ee(struct drbd_conf *mdev,
const char *reason, const int dgs,
const char* seen_hash, const char* calc_hash,
- const struct Tl_epoch_entry* e)
+ const struct drbd_epoch_entry* e)
{
struct cn_msg *cn_reply;
struct drbd_nl_cfg_reply *reply;
@@ -2328,9 +2294,9 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
reply->packet_type = P_dump_ee;
reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NoError;
+ reply->ret_code = NO_ERROR;
- TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+ TRACE(TRACE_TYPE_NL, TRACE_LVL_SUMMARY, nl_trace_reply(cn_reply););
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
kfree(cn_reply);
@@ -2372,9 +2338,9 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
reply->packet_type = P_sync_progress;
reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NoError;
+ reply->ret_code = NO_ERROR;
- TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+ TRACE(TRACE_TYPE_NL, TRACE_LVL_SUMMARY, nl_trace_reply(cn_reply););
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
}
@@ -2429,7 +2395,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
reply->ret_code = ret_code;
- TRACE(TraceTypeNl, TraceLvlSummary, nl_trace_reply(cn_reply););
+ TRACE(TRACE_TYPE_NL, TRACE_LVL_SUMMARY, nl_trace_reply(cn_reply););
rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
if (rr && rr != -ESRCH)
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index b209da0ff4ea..76b512180606 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -178,7 +178,7 @@ STATIC int drbd_seq_show(struct seq_file *seq, void *v)
pe .. pending (waiting for ack or data reply)
ua .. unack'd (still need to send ack or data reply)
ap .. application requests accepted, but not yet completed
- ep .. number of epochs currently "on the fly", BarrierAck pending
+ ep .. number of epochs currently "on the fly", P_BARRIER_ACK pending
wo .. write ordering mode currently in use
oos .. known out-of-sync kB
*/
@@ -196,9 +196,9 @@ STATIC int drbd_seq_show(struct seq_file *seq, void *v)
sn = conns_to_name(mdev->state.conn);
- if (mdev->state.conn == StandAlone &&
- mdev->state.disk == Diskless &&
- mdev->state.role == Secondary) {
+ if (mdev->state.conn == C_STANDALONE &&
+ mdev->state.disk == D_DISKLESS &&
+ mdev->state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
seq_printf(seq,
@@ -234,11 +234,11 @@ STATIC int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " oos:%lu\n",
Bit2KB(drbd_bm_total_weight(mdev)));
}
- if (mdev->state.conn == SyncSource ||
- mdev->state.conn == SyncTarget)
+ if (mdev->state.conn == C_SYNC_SOURCE ||
+ mdev->state.conn == C_SYNC_TARGET)
drbd_syncer_progress(mdev, seq);
- if (mdev->state.conn == VerifyS || mdev->state.conn == VerifyT)
+ if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
seq_printf(seq, "\t%3d%% %lu/%lu\n",
(int)((mdev->rs_total-mdev->ov_left) /
(mdev->rs_total/100+1)),
@@ -246,7 +246,7 @@ STATIC int drbd_seq_show(struct seq_file *seq, void *v)
mdev->rs_total);
#ifdef ENABLE_DYNAMIC_TRACE
- if (proc_details >= 1 && inc_local_if_state(mdev, Failed)) {
+ if (proc_details >= 1 && inc_local_if_state(mdev, D_FAILED)) {
lc_printf_stats(seq, mdev->resync);
lc_printf_stats(seq, mdev->act_log);
dec_local(mdev);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 64408cdcab8d..26ac8fd0e1f4 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -59,17 +59,17 @@ struct flush_work {
};
enum epoch_event {
- EV_put,
- EV_got_barrier_nr,
- EV_barrier_done,
- EV_became_last,
- EV_cleanup = 32, /* used as flag */
+ EV_PUT,
+ EV_GOT_BARRIER_NR,
+ EV_BARRIER_DONE,
+ EV_BECAME_LAST,
+ EV_CLEANUP = 32, /* used as flag */
};
enum finish_epoch {
- FE_still_live,
- FE_destroyed,
- FE_recycled,
+ FE_STILL_LIVE,
+ FE_DESTROYED,
+ FE_RECYCLED,
};
STATIC int drbd_do_handshake(struct drbd_conf *mdev);
@@ -201,14 +201,14 @@ You must not have the req_lock:
drbd_wait_ee_list_empty()
*/
-struct Tl_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
+struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
u64 id,
sector_t sector,
unsigned int data_size,
gfp_t gfp_mask) __must_hold(local)
{
struct request_queue *q;
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
struct bio_vec *bvec;
struct page *page;
struct bio *bio;
@@ -284,7 +284,7 @@ struct Tl_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
e->epoch = NULL;
e->flags = 0;
- MTRACE(TraceTypeEE, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EE, TRACE_LVL_ALL,
dev_info(DEV, "allocated EE sec=%llus size=%u ee=%p\n",
(unsigned long long)sector, data_size, e);
);
@@ -302,13 +302,13 @@ struct Tl_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
return NULL;
}
-void drbd_free_ee(struct drbd_conf *mdev, struct Tl_epoch_entry *e)
+void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
{
struct bio *bio = e->private_bio;
struct bio_vec *bvec;
int i;
- MTRACE(TraceTypeEE, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EE, TRACE_LVL_ALL,
dev_info(DEV, "Free EE sec=%llus size=%u ee=%p\n",
(unsigned long long)e->sector, e->size, e);
);
@@ -328,14 +328,14 @@ void drbd_free_ee(struct drbd_conf *mdev, struct Tl_epoch_entry *e)
int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
{
int count = 0;
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
struct list_head *le;
spin_lock_irq(&mdev->req_lock);
while (!list_empty(list)) {
le = list->next;
list_del(le);
- e = list_entry(le, struct Tl_epoch_entry, w.list);
+ e = list_entry(le, struct drbd_epoch_entry, w.list);
drbd_free_ee(mdev, e);
count++;
}
@@ -347,7 +347,7 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
STATIC void reclaim_net_ee(struct drbd_conf *mdev)
{
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
struct list_head *le, *tle;
/* The EEs are always appended to the end of the list. Since
@@ -356,7 +356,7 @@ STATIC void reclaim_net_ee(struct drbd_conf *mdev)
stop to examine the list... */
list_for_each_safe(le, tle, &mdev->net_ee) {
- e = list_entry(le, struct Tl_epoch_entry, w.list);
+ e = list_entry(le, struct drbd_epoch_entry, w.list);
if (drbd_bio_has_active_page(e->private_bio))
break;
list_del(le);
@@ -377,7 +377,7 @@ STATIC void reclaim_net_ee(struct drbd_conf *mdev)
STATIC int drbd_process_done_ee(struct drbd_conf *mdev)
{
LIST_HEAD(work_list);
- struct Tl_epoch_entry *e, *t;
+ struct drbd_epoch_entry *e, *t;
int ok = 1;
spin_lock_irq(&mdev->req_lock);
@@ -390,7 +390,7 @@ STATIC int drbd_process_done_ee(struct drbd_conf *mdev)
* all ignore the last argument.
*/
list_for_each_entry_safe(e, t, &work_list, w.list) {
- MTRACE(TraceTypeEE, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EE, TRACE_LVL_ALL,
dev_info(DEV, "Process EE on done_ee sec=%llus size=%u ee=%p\n",
(unsigned long long)e->sector, e->size, e);
);
@@ -410,7 +410,7 @@ STATIC int drbd_process_done_ee(struct drbd_conf *mdev)
void _drbd_clear_done_ee(struct drbd_conf *mdev)
{
struct list_head *le;
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
struct drbd_epoch *epoch;
int n = 0;
@@ -420,7 +420,7 @@ void _drbd_clear_done_ee(struct drbd_conf *mdev)
while (!list_empty(&mdev->done_ee)) {
le = mdev->done_ee.next;
list_del(le);
- e = list_entry(le, struct Tl_epoch_entry, w.list);
+ e = list_entry(le, struct drbd_epoch_entry, w.list);
if (mdev->net_conf->wire_protocol == DRBD_PROT_C
|| is_syncer_block_id(e->block_id))
++n;
@@ -432,9 +432,9 @@ void _drbd_clear_done_ee(struct drbd_conf *mdev)
if (e->flags & EE_IS_BARRIER) {
epoch = previous_epoch(mdev, e->epoch);
if (epoch)
- drbd_may_finish_epoch(mdev, epoch, EV_barrier_done + EV_cleanup);
+ drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + EV_CLEANUP);
}
- drbd_may_finish_epoch(mdev, e->epoch, EV_put + EV_cleanup);
+ drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
}
drbd_free_ee(mdev, e);
}
@@ -568,7 +568,7 @@ STATIC int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
set_fs(oldfs);
if (rv != size)
- drbd_force_state(mdev, NS(conn, BrokenPipe));
+ drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
return rv;
}
@@ -617,7 +617,7 @@ STATIC struct socket *drbd_try_connect(struct drbd_conf *mdev)
goto out;
/* connect may fail, peer not yet available.
- * stay WFConnection, don't go Disconnecting! */
+ * stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
err = sock->ops->connect(sock,
@@ -643,7 +643,7 @@ out:
dev_err(DEV, "%s failed, err = %d\n", what, err);
}
if (disconnect_on_error)
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
}
dec_net(mdev);
return sock;
@@ -688,7 +688,7 @@ out:
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
dev_err(DEV, "%s failed, err = %d\n", what, err);
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
}
}
dec_net(mdev);
@@ -697,16 +697,16 @@ out:
}
STATIC int drbd_send_fp(struct drbd_conf *mdev,
- struct socket *sock, enum Drbd_Packet_Cmd cmd)
+ struct socket *sock, enum drbd_packets cmd)
{
- struct Drbd_Header *h = (struct Drbd_Header *) &mdev->data.sbuf.head;
+ struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
}
-STATIC enum Drbd_Packet_Cmd drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
+STATIC enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
{
- struct Drbd_Header *h = (struct Drbd_Header *) &mdev->data.sbuf.head;
+ struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
int rr;
rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
@@ -759,7 +759,7 @@ STATIC int drbd_connect(struct drbd_conf *mdev)
if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags))
dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
- if (drbd_request_state(mdev, NS(conn, WFConnection)) < SS_Success)
+ if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
return -2;
clear_bit(DISCARD_CONCURRENT, &mdev->flags);
@@ -780,11 +780,11 @@ STATIC int drbd_connect(struct drbd_conf *mdev)
if (s) {
if (!sock) {
- drbd_send_fp(mdev, s, HandShakeS);
+ drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
sock = s;
s = NULL;
} else if (!msock) {
- drbd_send_fp(mdev, s, HandShakeM);
+ drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
msock = s;
s = NULL;
} else {
@@ -809,14 +809,14 @@ retry:
drbd_socket_okay(mdev, &sock);
drbd_socket_okay(mdev, &msock);
switch (try) {
- case HandShakeS:
+ case P_HAND_SHAKE_S:
if (sock) {
dev_warn(DEV, "initial packet S crossed\n");
sock_release(sock);
}
sock = s;
break;
- case HandShakeM:
+ case P_HAND_SHAKE_M:
if (msock) {
dev_warn(DEV, "initial packet M crossed\n");
sock_release(msock);
@@ -832,7 +832,7 @@ retry:
}
}
- if (mdev->state.conn <= Disconnecting)
+ if (mdev->state.conn <= C_DISCONNECTING)
return -1;
if (signal_pending(current)) {
flush_signals(current);
@@ -872,7 +872,7 @@ retry:
/* NOT YET ...
* sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
* sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- * first set it to the HandShake timeout, wich is hardcoded for now: */
+ * first set it to the P_HAND_SHAKE timeout, wich is hardcoded for now: */
sock->sk->sk_sndtimeo =
sock->sk->sk_rcvtimeo = 2*HZ;
@@ -902,7 +902,7 @@ retry:
}
}
- if (drbd_request_state(mdev, NS(conn, WFReportParams)) < SS_Success)
+ if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
return 0;
sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
@@ -923,7 +923,7 @@ retry:
return 1;
}
-STATIC int drbd_recv_header(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h)
{
int r;
@@ -962,7 +962,7 @@ STATIC enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
dec_local(mdev);
}
- return drbd_may_finish_epoch(mdev, epoch, EV_barrier_done);
+ return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
}
/**
@@ -979,8 +979,8 @@ STATIC int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
drbd_flush_after_epoch(mdev, epoch);
- drbd_may_finish_epoch(mdev, epoch, EV_put |
- (mdev->state.conn < Connected ? EV_cleanup : 0));
+ drbd_may_finish_epoch(mdev, epoch, EV_PUT |
+ (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
return 1;
}
@@ -996,13 +996,13 @@ STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
int finish, epoch_size;
struct drbd_epoch *next_epoch;
int schedule_flush = 0;
- enum finish_epoch rv = FE_still_live;
+ enum finish_epoch rv = FE_STILL_LIVE;
static char *epoch_event_str[] = {
- [EV_put] = "put",
- [EV_got_barrier_nr] = "got_barrier_nr",
- [EV_barrier_done] = "barrier_done",
- [EV_became_last] = "became_last",
+ [EV_PUT] = "put",
+ [EV_GOT_BARRIER_NR] = "got_barrier_nr",
+ [EV_BARRIER_DONE] = "barrier_done",
+ [EV_BECAME_LAST] = "became_last",
};
spin_lock(&mdev->epoch_lock);
@@ -1012,11 +1012,11 @@ STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
epoch_size = atomic_read(&epoch->epoch_size);
- switch (ev & ~EV_cleanup) {
- case EV_put:
+ switch (ev & ~EV_CLEANUP) {
+ case EV_PUT:
atomic_dec(&epoch->active);
break;
- case EV_got_barrier_nr:
+ case EV_GOT_BARRIER_NR:
set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
/* Special case: If we just switched from WO_bio_barrier to
@@ -1026,15 +1026,15 @@ STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
epoch == mdev->current_epoch)
clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
break;
- case EV_barrier_done:
+ case EV_BARRIER_DONE:
set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
break;
- case EV_became_last:
+ case EV_BECAME_LAST:
/* nothing to do*/
break;
}
- MTRACE(TraceTypeEpochs, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EPOCHS, TRACE_LVL_ALL,
dev_info(DEV, "Update epoch %p/%d { size=%d active=%d %c%c n%c%c } ev=%s\n",
epoch, epoch->barrier_nr, epoch_size, atomic_read(&epoch->active),
test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) ? 'n' : '-',
@@ -1053,7 +1053,7 @@ STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
mdev->write_ordering == WO_none ||
(epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
- ev & EV_cleanup) {
+ ev & EV_CLEANUP) {
finish = 1;
set_bit(DE_IS_FINISHING, &epoch->flags);
} else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
@@ -1063,7 +1063,7 @@ STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
}
}
if (finish) {
- if (!(ev & EV_cleanup)) {
+ if (!(ev & EV_CLEANUP)) {
spin_unlock(&mdev->epoch_lock);
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
spin_lock(&mdev->epoch_lock);
@@ -1073,22 +1073,22 @@ STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
if (mdev->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
- ev = EV_became_last | (ev & EV_cleanup);
+ ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
mdev->epochs--;
- MTRACE(TraceTypeEpochs, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_EPOCHS, TRACE_LVL_SUMMARY,
dev_info(DEV, "Freeing epoch %p/%d { size=%d } nr_epochs=%d\n",
epoch, epoch->barrier_nr, epoch_size, mdev->epochs);
);
kfree(epoch);
- if (rv == FE_still_live)
- rv = FE_destroyed;
+ if (rv == FE_STILL_LIVE)
+ rv = FE_DESTROYED;
} else {
epoch->flags = 0;
atomic_set(&epoch->epoch_size, 0);
/* atomic_set(&epoch->active, 0); is alrady zero */
- if (rv == FE_still_live)
- rv = FE_recycled;
+ if (rv == FE_STILL_LIVE)
+ rv = FE_RECYCLED;
}
}
@@ -1104,7 +1104,7 @@ STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
struct flush_work *fw;
fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
if (fw) {
- MTRACE(TraceTypeEpochs, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_EPOCHS, TRACE_LVL_METRICS,
dev_info(DEV, "Schedul flush %p/%d { size=%d } nr_epochs=%d\n",
epoch, epoch->barrier_nr, epoch_size, mdev->epochs);
);
@@ -1115,8 +1115,8 @@ STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
/* That is not a recursion, only one level */
- drbd_may_finish_epoch(mdev, epoch, EV_barrier_done);
- drbd_may_finish_epoch(mdev, epoch, EV_put);
+ drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
+ drbd_may_finish_epoch(mdev, epoch, EV_PUT);
}
}
@@ -1156,7 +1156,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
*/
int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
struct bio *bio = e->private_bio;
/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
@@ -1202,10 +1202,10 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
return 1;
}
-STATIC int receive_Barrier(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
{
int rv, issue_flush;
- struct Drbd_Barrier_Packet *p = (struct Drbd_Barrier_Packet *)h;
+ struct p_barrier *p = (struct p_barrier *)h;
struct drbd_epoch *epoch;
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
@@ -1219,27 +1219,27 @@ STATIC int receive_Barrier(struct drbd_conf *mdev, struct Drbd_Header *h)
drbd_kick_lo(mdev);
mdev->current_epoch->barrier_nr = p->barrier;
- rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_got_barrier_nr);
+ rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
- /* BarrierAck may imply that the corresponding extent is dropped from
+ /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
- * Primary crashes now.
+ * R_PRIMARY crashes now.
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
switch (mdev->write_ordering) {
case WO_bio_barrier:
case WO_none:
- if (rv == FE_recycled)
+ if (rv == FE_RECYCLED)
return TRUE;
break;
case WO_bdev_flush:
case WO_drain_io:
- D_ASSERT(rv == FE_still_live);
+ D_ASSERT(rv == FE_STILL_LIVE);
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
- if (rv == FE_recycled)
+ if (rv == FE_RECYCLED)
return TRUE;
/* The asender will send all the ACKs and barrier ACKs out, since
@@ -1255,7 +1255,7 @@ STATIC int receive_Barrier(struct drbd_conf *mdev, struct Drbd_Header *h)
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
if (issue_flush) {
rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
- if (rv == FE_recycled)
+ if (rv == FE_RECYCLED)
return TRUE;
}
@@ -1273,7 +1273,7 @@ STATIC int receive_Barrier(struct drbd_conf *mdev, struct Drbd_Header *h)
list_add(&epoch->list, &mdev->current_epoch->list);
mdev->current_epoch = epoch;
mdev->epochs++;
- MTRACE(TraceTypeEpochs, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_EPOCHS, TRACE_LVL_METRICS,
dev_info(DEV, "Allocat epoch %p/xxxx { } nr_epochs=%d\n", epoch, mdev->epochs);
);
} else {
@@ -1287,10 +1287,10 @@ STATIC int receive_Barrier(struct drbd_conf *mdev, struct Drbd_Header *h)
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data */
-STATIC struct Tl_epoch_entry *
+STATIC struct drbd_epoch_entry *
read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
{
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
struct bio_vec *bvec;
struct page *page;
struct bio *bio;
@@ -1440,7 +1440,7 @@ STATIC int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
* drbd_process_done_ee() by asender only */
STATIC int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
sector_t sector = e->sector;
int ok;
@@ -1448,12 +1448,12 @@ STATIC int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
if (likely(drbd_bio_uptodate(e->private_bio))) {
drbd_set_in_sync(mdev, sector, e->size);
- ok = drbd_send_ack(mdev, RSWriteAck, e);
+ ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
} else {
/* Record failure to sync */
drbd_rs_failed_io(mdev, sector, e->size);
- ok = drbd_send_ack(mdev, NegAck, e);
+ ok = drbd_send_ack(mdev, P_NEG_ACK, e);
ok &= drbd_io_error(mdev, FALSE);
}
dec_unacked(mdev);
@@ -1463,7 +1463,7 @@ STATIC int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
STATIC int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
{
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
e = read_in_block(mdev, ID_SYNCER, sector, data_size);
if (!e) {
@@ -1485,7 +1485,7 @@ STATIC int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
list_add(&e->w.list, &mdev->sync_ee);
spin_unlock_irq(&mdev->req_lock);
- MTRACE(TraceTypeEE, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EE, TRACE_LVL_ALL,
dev_info(DEV, "submit EE (RS)WRITE sec=%llus size=%u ee=%p\n",
(unsigned long long)e->sector, e->size, e);
);
@@ -1497,13 +1497,13 @@ STATIC int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
return TRUE;
}
-STATIC int receive_DataReply(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
{
struct drbd_request *req;
sector_t sector;
unsigned int header_size, data_size;
int ok;
- struct Drbd_Data_Packet *p = (struct Drbd_Data_Packet *)h;
+ struct p_data *p = (struct p_data *)h;
header_size = sizeof(*p) - sizeof(*h);
data_size = h->length - header_size;
@@ -1537,12 +1537,12 @@ STATIC int receive_DataReply(struct drbd_conf *mdev, struct Drbd_Header *h)
return ok;
}
-STATIC int receive_RSDataReply(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
{
sector_t sector;
unsigned int header_size, data_size;
int ok;
- struct Drbd_Data_Packet *p = (struct Drbd_Data_Packet *)h;
+ struct p_data *p = (struct p_data *)h;
header_size = sizeof(*p) - sizeof(*h);
data_size = h->length - header_size;
@@ -1566,7 +1566,7 @@ STATIC int receive_RSDataReply(struct drbd_conf *mdev, struct Drbd_Header *h)
ok = drbd_drain_block(mdev, data_size);
- drbd_send_ack_dp(mdev, NegAck, p);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p);
}
return ok;
@@ -1577,7 +1577,7 @@ STATIC int receive_RSDataReply(struct drbd_conf *mdev, struct Drbd_Header *h)
*/
STATIC int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
sector_t sector = e->sector;
struct drbd_epoch *epoch;
int ok = 1, pcmd;
@@ -1585,20 +1585,20 @@ STATIC int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
if (e->flags & EE_IS_BARRIER) {
epoch = previous_epoch(mdev, e->epoch);
if (epoch)
- drbd_may_finish_epoch(mdev, epoch, EV_barrier_done);
+ drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
}
if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
if (likely(drbd_bio_uptodate(e->private_bio))) {
- pcmd = (mdev->state.conn >= SyncSource &&
- mdev->state.conn <= PausedSyncT &&
+ pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
+ mdev->state.conn <= C_PAUSED_SYNC_T &&
e->flags & EE_MAY_SET_IN_SYNC) ?
- RSWriteAck : WriteAck;
+ P_RS_WRITE_ACK : P_WRITE_ACK;
ok &= drbd_send_ack(mdev, pcmd, e);
- if (pcmd == RSWriteAck)
+ if (pcmd == P_RS_WRITE_ACK)
drbd_set_in_sync(mdev, sector, e->size);
} else {
- ok = drbd_send_ack(mdev, NegAck, e);
+ ok = drbd_send_ack(mdev, P_NEG_ACK, e);
ok &= drbd_io_error(mdev, FALSE);
/* we expect it to be marked out of sync anyways...
* maybe assert this? */
@@ -1609,7 +1609,7 @@ STATIC int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
}
/* we delete from the conflict detection hash _after_ we sent out the
- * WriteAck / NegAck, to get the sequence number right. */
+ * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
if (mdev->net_conf->two_primaries) {
spin_lock_irq(&mdev->req_lock);
D_ASSERT(!hlist_unhashed(&e->colision));
@@ -1619,18 +1619,18 @@ STATIC int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
D_ASSERT(hlist_unhashed(&e->colision));
}
- drbd_may_finish_epoch(mdev, e->epoch, EV_put);
+ drbd_may_finish_epoch(mdev, e->epoch, EV_PUT);
return ok;
}
STATIC int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
int ok = 1;
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- ok = drbd_send_ack(mdev, DiscardAck, e);
+ ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
spin_lock_irq(&mdev->req_lock);
D_ASSERT(!hlist_unhashed(&e->colision));
@@ -1645,11 +1645,11 @@ STATIC int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
/* Called from receive_Data.
* Synchronize packets on sock with packets on msock.
*
- * This is here so even when a Data packet traveling via sock overtook an Ack
+ * This is here so even when a P_DATA packet traveling via sock overtook an Ack
* packet traveling on msock, they are still processed in the order they have
* been sent.
*
- * Note: we don't care for Ack packets overtaking Data packets.
+ * Note: we don't care for Ack packets overtaking P_DATA packets.
*
* In case packet_seq is larger than mdev->peer_seq number, there are
* outstanding packets on the msock. We wait for them to arrive.
@@ -1696,11 +1696,11 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
}
/* mirrored write */
-STATIC int receive_Data(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_Data(struct drbd_conf *mdev, struct p_header *h)
{
sector_t sector;
- struct Tl_epoch_entry *e;
- struct Drbd_Data_Packet *p = (struct Drbd_Data_Packet *)h;
+ struct drbd_epoch_entry *e;
+ struct p_data *p = (struct p_data *)h;
int header_size, data_size;
int rw = WRITE;
u32 dp_flags;
@@ -1725,7 +1725,7 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct Drbd_Header *h)
mdev->peer_seq++;
spin_unlock(&mdev->peer_seq_lock);
- drbd_send_ack_dp(mdev, NegAck, p);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p);
atomic_inc(&mdev->current_epoch->epoch_size);
return drbd_drain_block(mdev, data_size);
}
@@ -1752,7 +1752,7 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct Drbd_Header *h)
a Barrier. */
epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
if (epoch == e->epoch) {
- MTRACE(TraceTypeEpochs, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_EPOCHS, TRACE_LVL_METRICS,
dev_info(DEV, "Add barrier %p/%d\n",
epoch, epoch->barrier_nr);
);
@@ -1762,7 +1762,7 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct Drbd_Header *h)
} else {
if (atomic_read(&epoch->epoch_size) > 1 ||
!test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
- MTRACE(TraceTypeEpochs, TraceLvlMetrics,
+ MTRACE(TRACE_TYPE_EPOCHS, TRACE_LVL_METRICS,
dev_info(DEV, "Add barrier %p/%d, setting bi in %p/%d\n",
e->epoch, e->epoch->barrier_nr,
epoch, epoch->barrier_nr);
@@ -1823,7 +1823,7 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct Drbd_Header *h)
* if any conflicting request is found
* that has not yet been acked,
* AND I have the "discard concurrent writes" flag:
- * queue (via done_ee) the DiscardAck; OUT.
+ * queue (via done_ee) the P_DISCARD_ACK; OUT.
*
* if any conflicting request is found:
* block the receiver, waiting on misc_wait
@@ -1832,7 +1832,7 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct Drbd_Header *h)
*
* we do not just write after local io completion of those
* requests, but only after req is done completely, i.e.
- * we wait for the DiscardAck to arrive!
+ * we wait for the P_DISCARD_ACK to arrive!
*
* then proceed normally, i.e. submit.
*/
@@ -1880,7 +1880,7 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct Drbd_Header *h)
spin_unlock_irq(&mdev->req_lock);
- /* we could probably send that DiscardAck ourselves,
+ /* we could probably send that P_DISCARD_ACK ourselves,
* but I don't like the receiver using the msock */
dec_local(mdev);
@@ -1926,21 +1926,21 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct Drbd_Header *h)
case DRBD_PROT_B:
/* I really don't like it that the receiver thread
* sends on the msock, but anyways */
- drbd_send_ack(mdev, RecvAck, e);
+ drbd_send_ack(mdev, P_RECV_ACK, e);
break;
case DRBD_PROT_A:
/* nothing to do */
break;
}
- if (mdev->state.pdsk == Diskless) {
+ if (mdev->state.pdsk == D_DISKLESS) {
/* In case we have the only disk of the cluster, */
drbd_set_out_of_sync(mdev, e->sector, e->size);
e->flags |= EE_CALL_AL_COMPLETE_IO;
drbd_al_begin_io(mdev, e->sector);
}
- MTRACE(TraceTypeEE, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EE, TRACE_LVL_ALL,
dev_info(DEV, "submit EE (DATA)WRITE sec=%llus size=%u ee=%p\n",
(unsigned long long)e->sector, e->size, e);
);
@@ -1962,16 +1962,16 @@ out_interrupted:
return FALSE;
}
-STATIC int receive_DataRequest(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
{
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
struct digest_info *di;
int size, digest_size;
unsigned int fault_type;
- struct Drbd_BlockRequest_Packet *p =
- (struct Drbd_BlockRequest_Packet *)h;
+ struct p_block_req *p =
+ (struct p_block_req *)h;
const int brps = sizeof(*p)-sizeof(*h);
if (drbd_recv(mdev, h->payload, brps) != brps)
@@ -1991,12 +1991,12 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct Drbd_Header *h)
return FALSE;
}
- if (!inc_local_if_state(mdev, UpToDate)) {
+ if (!inc_local_if_state(mdev, D_UP_TO_DATE)) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not satisfy peer's read request, "
"no local data.\n");
- drbd_send_ack_rp(mdev, h->command == DataRequest ? NegDReply :
- NegRSDReply , p);
+ drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
+ P_NEG_RS_DREPLY , p);
return TRUE;
}
@@ -2010,11 +2010,11 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct Drbd_Header *h)
e->private_bio->bi_end_io = drbd_endio_read_sec;
switch (h->command) {
- case DataRequest:
+ case P_DATA_REQUEST:
e->w.cb = w_e_end_data_req;
fault_type = DRBD_FAULT_DT_RD;
break;
- case RSDataRequest:
+ case P_RS_DATA_REQUEST:
e->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
/* Eventually this should become asynchrously. Currently it
@@ -2032,8 +2032,8 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct Drbd_Header *h)
}
break;
- case OVReply:
- case CsumRSRequest:
+ case P_OV_REPLY:
+ case P_CSUM_RS_REQUEST:
fault_type = DRBD_FAULT_RS_RD;
digest_size = h->length - brps ;
di = kmalloc(sizeof(*di) + digest_size, GFP_KERNEL);
@@ -2054,10 +2054,10 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct Drbd_Header *h)
}
e->block_id = (u64)(unsigned long)di;
- if (h->command == CsumRSRequest) {
+ if (h->command == P_CSUM_RS_REQUEST) {
D_ASSERT(mdev->agreed_pro_version >= 89);
e->w.cb = w_e_end_csum_rs_req;
- } else if (h->command == OVReply) {
+ } else if (h->command == P_OV_REPLY) {
e->w.cb = w_e_end_ov_reply;
dec_rs_pending(mdev);
break;
@@ -2073,7 +2073,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct Drbd_Header *h)
}
break;
- case OVRequest:
+ case P_OV_REQUEST:
e->w.cb = w_e_end_ov_req;
fault_type = DRBD_FAULT_RS_RD;
/* Eventually this should become asynchrously. Currently it
@@ -2104,7 +2104,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct Drbd_Header *h)
inc_unacked(mdev);
- MTRACE(TraceTypeEE, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EE, TRACE_LVL_ALL,
dev_info(DEV, "submit EE READ sec=%llus size=%u ee=%p\n",
(unsigned long long)e->sector, e->size, e);
);
@@ -2121,21 +2121,21 @@ STATIC int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
int self, peer, rv = -100;
unsigned long ch_self, ch_peer;
- self = mdev->bc->md.uuid[Bitmap] & 1;
- peer = mdev->p_uuid[Bitmap] & 1;
+ self = mdev->bc->md.uuid[UI_BITMAP] & 1;
+ peer = mdev->p_uuid[UI_BITMAP] & 1;
- ch_peer = mdev->p_uuid[UUID_SIZE];
+ ch_peer = mdev->p_uuid[UI_SIZE];
ch_self = mdev->comm_bm_set;
switch (mdev->net_conf->after_sb_0p) {
- case Consensus:
- case DiscardSecondary:
- case CallHelper:
+ case ASB_CONSENSUS:
+ case ASB_DISCARD_SECONDARY:
+ case ASB_CALL_HELPER:
dev_err(DEV, "Configuration error.\n");
break;
- case Disconnect:
+ case ASB_DISCONNECT:
break;
- case DiscardYoungerPri:
+ case ASB_DISCARD_YOUNGER_PRI:
if (self == 0 && peer == 1) {
rv = -1;
break;
@@ -2145,7 +2145,7 @@ STATIC int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
break;
}
/* Else fall through to one of the other strategies... */
- case DiscardOlderPri:
+ case ASB_DISCARD_OLDER_PRI:
if (self == 0 && peer == 1) {
rv = 1;
break;
@@ -2157,7 +2157,7 @@ STATIC int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
/* Else fall through to one of the other strategies... */
dev_warn(DEV, "Discard younger/older primary did not found a decision\n"
"Using discard-least-changes instead\n");
- case DiscardZeroChg:
+ case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
? -1 : 1;
@@ -2166,9 +2166,9 @@ STATIC int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
if (ch_peer == 0) { rv = 1; break; }
if (ch_self == 0) { rv = -1; break; }
}
- if (mdev->net_conf->after_sb_0p == DiscardZeroChg)
+ if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
- case DiscardLeastChg:
+ case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
else if (ch_self > ch_peer)
@@ -2178,10 +2178,10 @@ STATIC int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
? -1 : 1;
break;
- case DiscardLocal:
+ case ASB_DISCARD_LOCAL:
rv = -1;
break;
- case DiscardRemote:
+ case ASB_DISCARD_REMOTE:
rv = 1;
}
@@ -2192,36 +2192,36 @@ STATIC int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
{
int self, peer, hg, rv = -100;
- self = mdev->bc->md.uuid[Bitmap] & 1;
- peer = mdev->p_uuid[Bitmap] & 1;
+ self = mdev->bc->md.uuid[UI_BITMAP] & 1;
+ peer = mdev->p_uuid[UI_BITMAP] & 1;
switch (mdev->net_conf->after_sb_1p) {
- case DiscardYoungerPri:
- case DiscardOlderPri:
- case DiscardLeastChg:
- case DiscardLocal:
- case DiscardRemote:
+ case ASB_DISCARD_YOUNGER_PRI:
+ case ASB_DISCARD_OLDER_PRI:
+ case ASB_DISCARD_LEAST_CHG:
+ case ASB_DISCARD_LOCAL:
+ case ASB_DISCARD_REMOTE:
dev_err(DEV, "Configuration error.\n");
break;
- case Disconnect:
+ case ASB_DISCONNECT:
break;
- case Consensus:
+ case ASB_CONSENSUS:
hg = drbd_asb_recover_0p(mdev);
- if (hg == -1 && mdev->state.role == Secondary)
+ if (hg == -1 && mdev->state.role == R_SECONDARY)
rv = hg;
- if (hg == 1 && mdev->state.role == Primary)
+ if (hg == 1 && mdev->state.role == R_PRIMARY)
rv = hg;
break;
- case Violently:
+ case ASB_VIOLENTLY:
rv = drbd_asb_recover_0p(mdev);
break;
- case DiscardSecondary:
- return mdev->state.role == Primary ? 1 : -1;
- case CallHelper:
+ case ASB_DISCARD_SECONDARY:
+ return mdev->state.role == R_PRIMARY ? 1 : -1;
+ case ASB_CALL_HELPER:
hg = drbd_asb_recover_0p(mdev);
- if (hg == -1 && mdev->state.role == Primary) {
- self = drbd_set_role(mdev, Secondary, 0);
- if (self != SS_Success) {
+ if (hg == -1 && mdev->state.role == R_PRIMARY) {
+ self = drbd_set_role(mdev, R_SECONDARY, 0);
+ if (self != SS_SUCCESS) {
drbd_khelper(mdev, "pri-lost-after-sb");
} else {
dev_warn(DEV, "Sucessfully gave up primary role.\n");
@@ -2238,29 +2238,29 @@ STATIC int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
{
int self, peer, hg, rv = -100;
- self = mdev->bc->md.uuid[Bitmap] & 1;
- peer = mdev->p_uuid[Bitmap] & 1;
+ self = mdev->bc->md.uuid[UI_BITMAP] & 1;
+ peer = mdev->p_uuid[UI_BITMAP] & 1;
switch (mdev->net_conf->after_sb_2p) {
- case DiscardYoungerPri:
- case DiscardOlderPri:
- case DiscardLeastChg:
- case DiscardLocal:
- case DiscardRemote:
- case Consensus:
- case DiscardSecondary:
+ case ASB_DISCARD_YOUNGER_PRI:
+ case ASB_DISCARD_OLDER_PRI:
+ case ASB_DISCARD_LEAST_CHG:
+ case ASB_DISCARD_LOCAL:
+ case ASB_DISCARD_REMOTE:
+ case ASB_CONSENSUS:
+ case ASB_DISCARD_SECONDARY:
dev_err(DEV, "Configuration error.\n");
break;
- case Violently:
+ case ASB_VIOLENTLY:
rv = drbd_asb_recover_0p(mdev);
break;
- case Disconnect:
+ case ASB_DISCONNECT:
break;
- case CallHelper:
+ case ASB_CALL_HELPER:
hg = drbd_asb_recover_0p(mdev);
if (hg == -1) {
- self = drbd_set_role(mdev, Secondary, 0);
- if (self != SS_Success) {
+ self = drbd_set_role(mdev, R_SECONDARY, 0);
+ if (self != SS_SUCCESS) {
drbd_khelper(mdev, "pri-lost-after-sb");
} else {
dev_warn(DEV, "Sucessfully gave up primary role.\n");
@@ -2282,21 +2282,21 @@ STATIC void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
}
dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
text,
- (unsigned long long)uuid[Current],
- (unsigned long long)uuid[Bitmap],
- (unsigned long long)uuid[History_start],
- (unsigned long long)uuid[History_end],
+ (unsigned long long)uuid[UI_CURRENT],
+ (unsigned long long)uuid[UI_BITMAP],
+ (unsigned long long)uuid[UI_HISTORY_START],
+ (unsigned long long)uuid[UI_HISTORY_END],
(unsigned long long)bits,
(unsigned long long)flags);
}
/*
100 after split brain try auto recover
- 2 SyncSource set BitMap
- 1 SyncSource use BitMap
+ 2 C_SYNC_SOURCE set BitMap
+ 1 C_SYNC_SOURCE use BitMap
0 no Sync
- -1 SyncTarget use BitMap
- -2 SyncTarget set BitMap
+ -1 C_SYNC_TARGET use BitMap
+ -2 C_SYNC_TARGET set BitMap
-100 after split brain, disconnect
-1000 unrelated data
*/
@@ -2305,8 +2305,8 @@ STATIC int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
u64 self, peer;
int i, j;
- self = mdev->bc->md.uuid[Current] & ~((u64)1);
- peer = mdev->p_uuid[Current] & ~((u64)1);
+ self = mdev->bc->md.uuid[UI_CURRENT] & ~((u64)1);
+ peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
*rule_nr = 1;
if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
@@ -2327,11 +2327,11 @@ STATIC int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
int rct, dc; /* roles at crash time */
rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
- (mdev->p_uuid[UUID_FLAGS] & 2);
+ (mdev->p_uuid[UI_FLAGS] & 2);
/* lowest bit is set when we were primary,
* next bit (weight 2) is set when peer was primary */
- MTRACE(TraceTypeUuid, TraceLvlMetrics, DUMPI(rct););
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_METRICS, DUMPI(rct););
switch (rct) {
case 0: /* !self_pri && !peer_pri */ return 0;
@@ -2339,46 +2339,46 @@ STATIC int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
- MTRACE(TraceTypeUuid, TraceLvlMetrics, DUMPI(dc););
+ MTRACE(TRACE_TYPE_UUID, TRACE_LVL_METRICS, DUMPI(dc););
return dc ? -1 : 1;
}
}
*rule_nr = 5;
- peer = mdev->p_uuid[Bitmap] & ~((u64)1);
+ peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
if (self == peer)
return -1;
*rule_nr = 6;
- for (i = History_start; i <= History_end; i++) {
+ for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
peer = mdev->p_uuid[i] & ~((u64)1);
if (self == peer)
return -2;
}
*rule_nr = 7;
- self = mdev->bc->md.uuid[Bitmap] & ~((u64)1);
- peer = mdev->p_uuid[Current] & ~((u64)1);
+ self = mdev->bc->md.uuid[UI_BITMAP] & ~((u64)1);
+ peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
if (self == peer)
return 1;
*rule_nr = 8;
- for (i = History_start; i <= History_end; i++) {
+ for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
self = mdev->bc->md.uuid[i] & ~((u64)1);
if (self == peer)
return 2;
}
*rule_nr = 9;
- self = mdev->bc->md.uuid[Bitmap] & ~((u64)1);
- peer = mdev->p_uuid[Bitmap] & ~((u64)1);
+ self = mdev->bc->md.uuid[UI_BITMAP] & ~((u64)1);
+ peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
if (self == peer && self != ((u64)0))
return 100;
*rule_nr = 10;
- for (i = History_start; i <= History_end; i++) {
+ for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
self = mdev->p_uuid[i] & ~((u64)1);
- for (j = History_start; j <= History_end; j++) {
+ for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
peer = mdev->p_uuid[j] & ~((u64)1);
if (self == peer)
return -100;
@@ -2389,37 +2389,37 @@ STATIC int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
}
/* drbd_sync_handshake() returns the new conn state on success, or
- conn_mask (-1) on failure.
+ CONN_MASK (-1) on failure.
*/
STATIC enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
enum drbd_disk_state peer_disk) __must_hold(local)
{
int hg, rule_nr;
- enum drbd_conns rv = conn_mask;
+ enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
mydisk = mdev->state.disk;
- if (mydisk == Negotiating)
+ if (mydisk == D_NEGOTIATING)
mydisk = mdev->new_state_tmp.disk;
hg = drbd_uuid_compare(mdev, &rule_nr);
dev_info(DEV, "drbd_sync_handshake:\n");
drbd_uuid_dump(mdev, "self", mdev->bc->md.uuid,
- mdev->state.disk >= Negotiating ? drbd_bm_total_weight(mdev) : 0, 0);
+ mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
- mdev->p_uuid[UUID_SIZE], mdev->p_uuid[UUID_FLAGS]);
+ mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
if (hg == -1000) {
dev_alert(DEV, "Unrelated data, aborting!\n");
- return conn_mask;
+ return C_MASK;
}
- if ((mydisk == Inconsistent && peer_disk > Inconsistent) ||
- (peer_disk == Inconsistent && mydisk > Inconsistent)) {
+ if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
+ (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
int f = (hg == -100) || abs(hg) == 2;
- hg = mydisk > Inconsistent ? 1 : -1;
+ hg = mydisk > D_INCONSISTENT ? 1 : -1;
if (f)
hg = hg*2;
dev_info(DEV, "Becoming sync %s due to disk states.\n",
@@ -2427,8 +2427,8 @@ STATIC enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
- int pcount = (mdev->state.role == Primary)
- + (peer_role == Primary);
+ int pcount = (mdev->state.role == R_PRIMARY)
+ + (peer_role == R_PRIMARY);
int forced = (hg == -100);
switch (pcount) {
@@ -2455,9 +2455,9 @@ STATIC enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
if (hg == -100) {
- if (mdev->net_conf->want_lose && !(mdev->p_uuid[UUID_FLAGS]&1))
+ if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
hg = -1;
- if (!mdev->net_conf->want_lose && (mdev->p_uuid[UUID_FLAGS]&1))
+ if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
@@ -2469,24 +2469,24 @@ STATIC enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (hg == -100) {
dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
drbd_khelper(mdev, "split-brain");
- return conn_mask;
+ return C_MASK;
}
- if (hg > 0 && mydisk <= Inconsistent) {
+ if (hg > 0 && mydisk <= D_INCONSISTENT) {
dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
- return conn_mask;
+ return C_MASK;
}
if (hg < 0 && /* by intention we do not use mydisk here. */
- mdev->state.role == Primary && mdev->state.disk >= Consistent) {
+ mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
switch (mdev->net_conf->rr_conflict) {
- case CallHelper:
+ case ASB_CALL_HELPER:
drbd_khelper(mdev, "pri-lost");
/* fall through */
- case Disconnect:
+ case ASB_DISCONNECT:
dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
- return conn_mask;
- case Violently:
+ return C_MASK;
+ case ASB_VIOLENTLY:
dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
"assumption\n");
}
@@ -2495,15 +2495,15 @@ STATIC enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (abs(hg) >= 2) {
dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
- return conn_mask;
+ return C_MASK;
}
if (hg > 0) { /* become sync source. */
- rv = WFBitMapS;
+ rv = C_WF_BITMAP_S;
} else if (hg < 0) { /* become sync target */
- rv = WFBitMapT;
+ rv = C_WF_BITMAP_T;
} else {
- rv = Connected;
+ rv = C_CONNECTED;
if (drbd_bm_total_weight(mdev)) {
dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
drbd_bm_total_weight(mdev));
@@ -2516,16 +2516,16 @@ STATIC enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
/* returns 1 if invalid */
-STATIC int cmp_after_sb(enum after_sb_handler peer, enum after_sb_handler self)
+STATIC int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
{
- /* DiscardRemote - DiscardLocal is valid */
- if ((peer == DiscardRemote && self == DiscardLocal) ||
- (self == DiscardRemote && peer == DiscardLocal))
+ /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
+ if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
+ (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
return 0;
- /* any other things with DiscardRemote or DiscardLocal are invalid */
- if (peer == DiscardRemote || peer == DiscardLocal ||
- self == DiscardRemote || self == DiscardLocal)
+ /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
+ if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
+ self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
return 1;
/* everything else is valid if they are equal on both sides. */
@@ -2536,9 +2536,9 @@ STATIC int cmp_after_sb(enum after_sb_handler peer, enum after_sb_handler self)
return 1;
}
-STATIC int receive_protocol(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_Protocol_Packet *p = (struct Drbd_Protocol_Packet *)h;
+ struct p_protocol *p = (struct p_protocol *)h;
int header_size, data_size;
int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
int p_want_lose, p_two_primaries;
@@ -2605,7 +2605,7 @@ STATIC int receive_protocol(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
disconnect:
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return FALSE;
}
@@ -2636,19 +2636,19 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
return tfm;
}
-STATIC int receive_SyncParam(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
{
int ok = TRUE;
- struct Drbd_SyncParam89_Packet *p = (struct Drbd_SyncParam89_Packet *)h;
+ struct p_rs_param_89 *p = (struct p_rs_param_89 *)h;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
struct crypto_hash *csums_tfm = NULL;
const int apv = mdev->agreed_pro_version;
- exp_max_sz = apv <= 87 ? sizeof(struct Drbd_SyncParam_Packet)
- : apv == 88 ? sizeof(struct Drbd_SyncParam_Packet)
+ exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
+ : apv == 88 ? sizeof(struct p_rs_param)
+ SHARED_SECRET_MAX
- : /* 89 */ sizeof(struct Drbd_SyncParam89_Packet);
+ : /* 89 */ sizeof(struct p_rs_param_89);
if (h->length > exp_max_sz) {
dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
@@ -2657,10 +2657,10 @@ STATIC int receive_SyncParam(struct drbd_conf *mdev, struct Drbd_Header *h)
}
if (apv <= 88) {
- header_size = sizeof(struct Drbd_SyncParam_Packet) - sizeof(*h);
+ header_size = sizeof(struct p_rs_param) - sizeof(*h);
data_size = h->length - header_size;
} else /* apv >= 89 */ {
- header_size = sizeof(struct Drbd_SyncParam89_Packet) - sizeof(*h);
+ header_size = sizeof(struct p_rs_param_89) - sizeof(*h);
data_size = h->length - header_size;
D_ASSERT(data_size == 0);
}
@@ -2700,7 +2700,7 @@ STATIC int receive_SyncParam(struct drbd_conf *mdev, struct Drbd_Header *h)
}
if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
- if (mdev->state.conn == WFReportParams) {
+ if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
mdev->sync_conf.verify_alg, p->verify_alg);
goto disconnect;
@@ -2712,7 +2712,7 @@ STATIC int receive_SyncParam(struct drbd_conf *mdev, struct Drbd_Header *h)
}
if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
- if (mdev->state.conn == WFReportParams) {
+ if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
mdev->sync_conf.csums_alg, p->csums_alg);
goto disconnect;
@@ -2746,7 +2746,7 @@ STATIC int receive_SyncParam(struct drbd_conf *mdev, struct Drbd_Header *h)
return ok;
disconnect:
crypto_free_hash(verify_tfm);
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return FALSE;
}
@@ -2769,10 +2769,10 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev,
(unsigned long long)a, (unsigned long long)b);
}
-STATIC int receive_sizes(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_Sizes_Packet *p = (struct Drbd_Sizes_Packet *)h;
- enum determin_dev_size_enum dd = unchanged;
+ struct p_sizes *p = (struct p_sizes *)h;
+ enum determine_dev_size dd = unchanged;
unsigned int max_seg_s;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
@@ -2785,9 +2785,9 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct Drbd_Header *h)
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
- if (p_size == 0 && mdev->state.disk == Diskless) {
+ if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
dev_err(DEV, "some backing storage is needed\n");
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return FALSE;
}
@@ -2804,7 +2804,7 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct Drbd_Header *h)
/* if this is the first connect, or an otherwise expected
* param exchange, choose the minimum */
- if (mdev->state.conn == WFReportParams)
+ if (mdev->state.conn == C_WF_REPORT_PARAMS)
p_usize = min_not_zero((sector_t)mdev->bc->dc.disk_size,
p_usize);
@@ -2820,10 +2820,10 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct Drbd_Header *h)
But allow online shrinking if we are connected. */
if (drbd_new_dev_size(mdev, mdev->bc) <
drbd_get_capacity(mdev->this_bdev) &&
- mdev->state.disk >= Outdated &&
- mdev->state.conn < Connected) {
+ mdev->state.disk >= D_OUTDATED &&
+ mdev->state.conn < C_CONNECTED) {
dev_err(DEV, "The peer's disk size is too small!\n");
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
mdev->bc->dc.disk_size = my_usize;
dec_local(mdev);
return FALSE;
@@ -2843,18 +2843,18 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct Drbd_Header *h)
drbd_set_my_capacity(mdev, p_size);
}
- if (mdev->p_uuid && mdev->state.conn <= Connected && inc_local(mdev)) {
+ if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && inc_local(mdev)) {
nconn = drbd_sync_handshake(mdev,
mdev->state.peer, mdev->state.pdsk);
dec_local(mdev);
- if (nconn == conn_mask) {
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ if (nconn == C_MASK) {
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return FALSE;
}
- if (drbd_request_state(mdev, NS(conn, nconn)) < SS_Success) {
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return FALSE;
}
}
@@ -2873,16 +2873,16 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct Drbd_Header *h)
dec_local(mdev);
}
- if (mdev->state.conn > WFReportParams) {
+ if (mdev->state.conn > C_WF_REPORT_PARAMS) {
if (be64_to_cpu(p->c_size) !=
drbd_get_capacity(mdev->this_bdev) || ldsc) {
/* we have different sizes, probabely peer
* needs to know my new size... */
drbd_send_sizes(mdev);
}
- if (dd == grew && mdev->state.conn == Connected) {
- if (mdev->state.pdsk >= Inconsistent &&
- mdev->state.disk >= Inconsistent)
+ if (dd == grew && mdev->state.conn == C_CONNECTED) {
+ if (mdev->state.pdsk >= D_INCONSISTENT &&
+ mdev->state.disk >= D_INCONSISTENT)
resync_after_online_grow(mdev);
else
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
@@ -2892,9 +2892,9 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int receive_uuids(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_GenCnt_Packet *p = (struct Drbd_GenCnt_Packet *)h;
+ struct p_uuids *p = (struct p_uuids *)h;
u64 *p_uuid;
int i;
@@ -2902,38 +2902,38 @@ STATIC int receive_uuids(struct drbd_conf *mdev, struct Drbd_Header *h)
if (drbd_recv(mdev, h->payload, h->length) != h->length)
return FALSE;
- p_uuid = kmalloc(sizeof(u64)*EXT_UUID_SIZE, GFP_KERNEL);
+ p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_KERNEL);
- for (i = Current; i < EXT_UUID_SIZE; i++)
+ for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
p_uuid[i] = be64_to_cpu(p->uuid[i]);
kfree(mdev->p_uuid);
mdev->p_uuid = p_uuid;
- if (mdev->state.conn < Connected &&
- mdev->state.disk < Inconsistent &&
- mdev->state.role == Primary &&
- (mdev->ed_uuid & ~((u64)1)) != (p_uuid[Current] & ~((u64)1))) {
+ if (mdev->state.conn < C_CONNECTED &&
+ mdev->state.disk < D_INCONSISTENT &&
+ mdev->state.role == R_PRIMARY &&
+ (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
(unsigned long long)mdev->ed_uuid);
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return FALSE;
}
if (inc_local(mdev)) {
int skip_initial_sync =
- mdev->state.conn == Connected &&
+ mdev->state.conn == C_CONNECTED &&
mdev->agreed_pro_version >= 90 &&
- mdev->bc->md.uuid[Current] == UUID_JUST_CREATED &&
- (p_uuid[UUID_FLAGS] & 8);
+ mdev->bc->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
+ (p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
"clear_n_write from receive_uuids");
- _drbd_uuid_set(mdev, Current, p_uuid[Current]);
- _drbd_uuid_set(mdev, Bitmap, 0);
- _drbd_set_state(_NS2(mdev, disk, UpToDate, pdsk, UpToDate),
- ChgStateVerbose, NULL);
+ _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
+ _drbd_uuid_set(mdev, UI_BITMAP, 0);
+ _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
+ CS_VERBOSE, NULL);
drbd_md_sync(mdev);
}
dec_local(mdev);
@@ -2944,8 +2944,8 @@ STATIC int receive_uuids(struct drbd_conf *mdev, struct Drbd_Header *h)
we are primary and are detaching from our disk. We need to see the
new disk state... */
wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
- if (mdev->state.conn >= Connected && mdev->state.disk < Inconsistent)
- drbd_set_ed_uuid(mdev, p_uuid[Current]);
+ if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
+ drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
return TRUE;
}
@@ -2954,18 +2954,18 @@ STATIC int receive_uuids(struct drbd_conf *mdev, struct Drbd_Header *h)
* convert_state:
* Switches the view of the state.
*/
-STATIC union drbd_state_t convert_state(union drbd_state_t ps)
+STATIC union drbd_state convert_state(union drbd_state ps)
{
- union drbd_state_t ms;
+ union drbd_state ms;
static enum drbd_conns c_tab[] = {
- [Connected] = Connected,
+ [C_CONNECTED] = C_CONNECTED,
- [StartingSyncS] = StartingSyncT,
- [StartingSyncT] = StartingSyncS,
- [Disconnecting] = TearDown, /* NetworkFailure, */
- [VerifyS] = VerifyT,
- [conn_mask] = conn_mask,
+ [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
+ [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
+ [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
+ [C_VERIFY_S] = C_VERIFY_T,
+ [C_MASK] = C_MASK,
};
ms.i = ps.i;
@@ -2980,10 +2980,10 @@ STATIC union drbd_state_t convert_state(union drbd_state_t ps)
return ms;
}
-STATIC int receive_req_state(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_Req_State_Packet *p = (struct Drbd_Req_State_Packet *)h;
- union drbd_state_t mask, val;
+ struct p_req_state *p = (struct p_req_state *)h;
+ union drbd_state mask, val;
int rv;
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
@@ -2995,14 +2995,14 @@ STATIC int receive_req_state(struct drbd_conf *mdev, struct Drbd_Header *h)
if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
- drbd_send_sr_reply(mdev, SS_ConcurrentStChg);
+ drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
return TRUE;
}
mask = convert_state(mask);
val = convert_state(val);
- rv = drbd_change_state(mdev, ChgStateVerbose, mask, val);
+ rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
drbd_send_sr_reply(mdev, rv);
drbd_md_sync(mdev);
@@ -3010,11 +3010,11 @@ STATIC int receive_req_state(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int receive_state(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_state(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_State_Packet *p = (struct Drbd_State_Packet *)h;
+ struct p_state *p = (struct p_state *)h;
enum drbd_conns nconn, oconn;
- union drbd_state_t ns, peer_state;
+ union drbd_state ns, peer_state;
enum drbd_disk_state real_peer_disk;
int rv;
@@ -3027,8 +3027,8 @@ STATIC int receive_state(struct drbd_conf *mdev, struct Drbd_Header *h)
peer_state.i = be32_to_cpu(p->state);
real_peer_disk = peer_state.disk;
- if (peer_state.disk == Negotiating) {
- real_peer_disk = mdev->p_uuid[UUID_FLAGS] & 4 ? Inconsistent : Consistent;
+ if (peer_state.disk == D_NEGOTIATING) {
+ real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
dev_info(DEV, "real peer disk state = %s\n", disks_to_name(real_peer_disk));
}
@@ -3037,34 +3037,34 @@ STATIC int receive_state(struct drbd_conf *mdev, struct Drbd_Header *h)
oconn = nconn = mdev->state.conn;
spin_unlock_irq(&mdev->req_lock);
- if (nconn == WFReportParams)
- nconn = Connected;
+ if (nconn == C_WF_REPORT_PARAMS)
+ nconn = C_CONNECTED;
- if (mdev->p_uuid && peer_state.disk >= Negotiating &&
- inc_local_if_state(mdev, Negotiating)) {
+ if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
+ inc_local_if_state(mdev, D_NEGOTIATING)) {
int cr; /* consider resync */
- cr = (oconn < Connected);
- cr |= (oconn == Connected &&
- (peer_state.disk == Negotiating ||
- mdev->state.disk == Negotiating));
+ cr = (oconn < C_CONNECTED);
+ cr |= (oconn == C_CONNECTED &&
+ (peer_state.disk == D_NEGOTIATING ||
+ mdev->state.disk == D_NEGOTIATING));
cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); /* peer forced */
- cr |= (oconn == Connected && peer_state.conn > Connected);
+ cr |= (oconn == C_CONNECTED && peer_state.conn > C_CONNECTED);
if (cr)
nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
dec_local(mdev);
- if (nconn == conn_mask) {
- if (mdev->state.disk == Negotiating) {
- drbd_force_state(mdev, NS(disk, Diskless));
- nconn = Connected;
- } else if (peer_state.disk == Negotiating) {
+ if (nconn == C_MASK) {
+ if (mdev->state.disk == D_NEGOTIATING) {
+ drbd_force_state(mdev, NS(disk, D_DISKLESS));
+ nconn = C_CONNECTED;
+ } else if (peer_state.disk == D_NEGOTIATING) {
dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
- peer_state.disk = Diskless;
+ peer_state.disk = D_DISKLESS;
} else {
- D_ASSERT(oconn == WFReportParams);
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ D_ASSERT(oconn == C_WF_REPORT_PARAMS);
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return FALSE;
}
}
@@ -3079,21 +3079,21 @@ STATIC int receive_state(struct drbd_conf *mdev, struct Drbd_Header *h)
ns.peer = peer_state.role;
ns.pdsk = real_peer_disk;
ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
- if ((nconn == Connected || nconn == WFBitMapS) && ns.disk == Negotiating)
+ if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
ns.disk = mdev->new_state_tmp.disk;
- rv = _drbd_set_state(mdev, ns, ChgStateVerbose | ChgStateHard, NULL);
+ rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL);
ns = mdev->state;
spin_unlock_irq(&mdev->req_lock);
- if (rv < SS_Success) {
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ if (rv < SS_SUCCESS) {
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return FALSE;
}
- if (oconn > WFReportParams) {
- if (nconn > Connected && peer_state.conn <= Connected &&
- peer_state.disk != Negotiating ) {
+ if (oconn > C_WF_REPORT_PARAMS) {
+ if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
+ peer_state.disk != D_NEGOTIATING ) {
/* we want resync, peer has not yet decided to sync... */
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpTpDate with that */
@@ -3109,15 +3109,15 @@ STATIC int receive_state(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int receive_sync_uuid(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_SyncUUID_Packet *p = (struct Drbd_SyncUUID_Packet *)h;
+ struct p_rs_uuid *p = (struct p_rs_uuid *)h;
wait_event(mdev->misc_wait,
- mdev->state.conn < Connected ||
- mdev->state.conn == WFSyncUUID);
+ mdev->state.conn < C_CONNECTED ||
+ mdev->state.conn == C_WF_SYNC_UUID);
- /* D_ASSERT( mdev->state.conn == WFSyncUUID ); */
+ /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
if (drbd_recv(mdev, h->payload, h->length) != h->length)
@@ -3125,11 +3125,11 @@ STATIC int receive_sync_uuid(struct drbd_conf *mdev, struct Drbd_Header *h)
/* Here the _drbd_uuid_ functions are right, current should
_not_ be rotated into the history */
- if (inc_local_if_state(mdev, Negotiating)) {
- _drbd_uuid_set(mdev, Current, be64_to_cpu(p->uuid));
- _drbd_uuid_set(mdev, Bitmap, 0UL);
+ if (inc_local_if_state(mdev, D_NEGOTIATING)) {
+ _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
+ _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
- drbd_start_resync(mdev, SyncTarget);
+ drbd_start_resync(mdev, C_SYNC_TARGET);
dec_local(mdev);
} else
@@ -3141,7 +3141,7 @@ STATIC int receive_sync_uuid(struct drbd_conf *mdev, struct Drbd_Header *h)
enum receive_bitmap_ret { OK, DONE, FAILED };
static enum receive_bitmap_ret
-receive_bitmap_plain(struct drbd_conf *mdev, struct Drbd_Header *h,
+receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h,
unsigned long *buffer, struct bm_xfer_ctx *c)
{
unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
@@ -3168,7 +3168,7 @@ receive_bitmap_plain(struct drbd_conf *mdev, struct Drbd_Header *h,
static enum receive_bitmap_ret
recv_bm_rle_bits(struct drbd_conf *mdev,
- struct Drbd_Compressed_Bitmap_Packet *p,
+ struct p_compressed_bm *p,
struct bm_xfer_ctx *c)
{
struct bitstream bs;
@@ -3228,7 +3228,7 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
static enum receive_bitmap_ret
recv_bm_rle_bytes(struct drbd_conf *mdev,
- struct Drbd_Compressed_Bitmap_Packet *p,
+ struct p_compressed_bm *p,
struct bm_xfer_ctx *c)
{
u64 rl;
@@ -3282,7 +3282,7 @@ recv_bm_rle_bytes(struct drbd_conf *mdev,
static enum receive_bitmap_ret
decode_bitmap_c(struct drbd_conf *mdev,
- struct Drbd_Compressed_Bitmap_Packet *p,
+ struct p_compressed_bm *p,
struct bm_xfer_ctx *c)
{
switch (DCBP_get_code(p)) {
@@ -3304,7 +3304,7 @@ decode_bitmap_c(struct drbd_conf *mdev,
void INFO_bm_xfer_stats(struct drbd_conf *mdev,
const char *direction, struct bm_xfer_ctx *c)
{
- unsigned plain_would_take = sizeof(struct Drbd_Header) *
+ unsigned plain_would_take = sizeof(struct p_header) *
((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
+ c->bm_words * sizeof(long);
unsigned total = c->bytes[0] + c->bytes[1];
@@ -3334,7 +3334,7 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we suceessfully received it. */
-STATIC int receive_bitmap(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
{
struct bm_xfer_ctx c;
void *buffer;
@@ -3359,12 +3359,12 @@ STATIC int receive_bitmap(struct drbd_conf *mdev, struct Drbd_Header *h)
};
do {
- if (h->command == ReportBitMap) {
+ if (h->command == P_BITMAP) {
ret = receive_bitmap_plain(mdev, h, buffer, &c);
- } else if (h->command == ReportCBitMap) {
+ } else if (h->command == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
- struct Drbd_Compressed_Bitmap_Packet *p;
+ struct p_compressed_bm *p;
if (h->length > BM_PACKET_PAYLOAD_BYTES) {
dev_err(DEV, "ReportCBitmap packet too large\n");
@@ -3385,8 +3385,8 @@ STATIC int receive_bitmap(struct drbd_conf *mdev, struct Drbd_Header *h)
goto out;
}
- c.packets[h->command == ReportBitMap]++;
- c.bytes[h->command == ReportBitMap] += sizeof(struct Drbd_Header) + h->length;
+ c.packets[h->command == P_BITMAP]++;
+ c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length;
if (ret != OK)
break;
@@ -3399,15 +3399,15 @@ STATIC int receive_bitmap(struct drbd_conf *mdev, struct Drbd_Header *h)
INFO_bm_xfer_stats(mdev, "receive", &c);
- if (mdev->state.conn == WFBitMapT) {
+ if (mdev->state.conn == C_WF_BITMAP_T) {
ok = !drbd_send_bitmap(mdev);
if (!ok)
goto out;
- /* Omit ChgOrdered with this state transition to avoid deadlocks. */
- ok = _drbd_request_state(mdev, NS(conn, WFSyncUUID), ChgStateVerbose);
- D_ASSERT(ok == SS_Success);
- } else if (mdev->state.conn != WFBitMapS) {
- /* admin may have requested Disconnecting,
+ /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
+ ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ D_ASSERT(ok == SS_SUCCESS);
+ } else if (mdev->state.conn != C_WF_BITMAP_S) {
+ /* admin may have requested C_DISCONNECTING,
* other threads may have noticed network errors */
dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
conns_to_name(mdev->state.conn));
@@ -3416,13 +3416,13 @@ STATIC int receive_bitmap(struct drbd_conf *mdev, struct Drbd_Header *h)
ok = TRUE;
out:
drbd_bm_unlock(mdev);
- if (ok && mdev->state.conn == WFBitMapS)
- drbd_start_resync(mdev, SyncSource);
+ if (ok && mdev->state.conn == C_WF_BITMAP_S)
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
free_page((unsigned long) buffer);
return ok;
}
-STATIC int receive_skip(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_skip(struct drbd_conf *mdev, struct p_header *h)
{
/* TODO zero copy sink :) */
static char sink[128];
@@ -3441,9 +3441,9 @@ STATIC int receive_skip(struct drbd_conf *mdev, struct Drbd_Header *h)
return size == 0;
}
-STATIC int receive_UnplugRemote(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
{
- if (mdev->state.disk >= Inconsistent)
+ if (mdev->state.disk >= D_INCONSISTENT)
drbd_kick_lo(mdev);
/* Make sure we've acked all the TCP data associated
@@ -3453,32 +3453,32 @@ STATIC int receive_UnplugRemote(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct Drbd_Header *);
+typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
static drbd_cmd_handler_f drbd_default_handler[] = {
- [Data] = receive_Data,
- [DataReply] = receive_DataReply,
- [RSDataReply] = receive_RSDataReply,
- [Barrier] = receive_Barrier,
- [ReportBitMap] = receive_bitmap,
- [ReportCBitMap] = receive_bitmap,
- [UnplugRemote] = receive_UnplugRemote,
- [DataRequest] = receive_DataRequest,
- [RSDataRequest] = receive_DataRequest,
- [SyncParam] = receive_SyncParam,
- [SyncParam89] = receive_SyncParam,
- [ReportProtocol] = receive_protocol,
- [ReportUUIDs] = receive_uuids,
- [ReportSizes] = receive_sizes,
- [ReportState] = receive_state,
- [StateChgRequest] = receive_req_state,
- [ReportSyncUUID] = receive_sync_uuid,
- [OVRequest] = receive_DataRequest,
- [OVReply] = receive_DataRequest,
- [CsumRSRequest] = receive_DataRequest,
+ [P_DATA] = receive_Data,
+ [P_DATA_REPLY] = receive_DataReply,
+ [P_RS_DATA_REPLY] = receive_RSDataReply,
+ [P_BARRIER] = receive_Barrier,
+ [P_BITMAP] = receive_bitmap,
+ [P_COMPRESSED_BITMAP] = receive_bitmap,
+ [P_UNPLUG_REMOTE] = receive_UnplugRemote,
+ [P_DATA_REQUEST] = receive_DataRequest,
+ [P_RS_DATA_REQUEST] = receive_DataRequest,
+ [P_SYNC_PARAM] = receive_SyncParam,
+ [P_SYNC_PARAM89] = receive_SyncParam,
+ [P_PROTOCOL] = receive_protocol,
+ [P_UUIDS] = receive_uuids,
+ [P_SIZES] = receive_sizes,
+ [P_STATE] = receive_state,
+ [P_STATE_CHG_REQ] = receive_req_state,
+ [P_SYNC_UUID] = receive_sync_uuid,
+ [P_OV_REQUEST] = receive_DataRequest,
+ [P_OV_REPLY] = receive_DataRequest,
+ [P_CSUM_RS_REQUEST] = receive_DataRequest,
/* anything missing from this table is in
* the asender_tbl, see get_asender_cmd */
- [MAX_CMD] = NULL,
+ [P_MAX_CMD] = NULL,
};
static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler;
@@ -3487,19 +3487,19 @@ static drbd_cmd_handler_f *drbd_opt_cmd_handler;
STATIC void drbdd(struct drbd_conf *mdev)
{
drbd_cmd_handler_f handler;
- struct Drbd_Header *header = &mdev->data.rbuf.head;
+ struct p_header *header = &mdev->data.rbuf.header;
while (get_t_state(&mdev->receiver) == Running) {
drbd_thread_current_set_cpu(mdev);
if (!drbd_recv_header(mdev, header))
break;
- if (header->command < MAX_CMD)
+ if (header->command < P_MAX_CMD)
handler = drbd_cmd_handler[header->command];
- else if (MayIgnore < header->command
- && header->command < MAX_OPT_CMD)
- handler = drbd_opt_cmd_handler[header->command-MayIgnore];
- else if (header->command > MAX_OPT_CMD)
+ else if (P_MAY_IGNORE < header->command
+ && header->command < P_MAX_OPT_CMD)
+ handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
+ else if (header->command > P_MAX_OPT_CMD)
handler = receive_skip;
else
handler = NULL;
@@ -3507,13 +3507,13 @@ STATIC void drbdd(struct drbd_conf *mdev)
if (unlikely(!handler)) {
dev_err(DEV, "unknown packet type %d, l: %d!\n",
header->command, header->length);
- drbd_force_state(mdev, NS(conn, ProtocolError));
+ drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
break;
}
if (unlikely(!handler(mdev, header))) {
dev_err(DEV, "error receiving %s, l: %d!\n",
cmdname(header->command), header->length);
- drbd_force_state(mdev, NS(conn, ProtocolError));
+ drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
break;
}
@@ -3558,14 +3558,14 @@ STATIC void drbd_fail_pending_reads(struct drbd_conf *mdev)
STATIC void drbd_disconnect(struct drbd_conf *mdev)
{
struct drbd_work prev_work_done;
- enum fencing_policy fp;
- union drbd_state_t os, ns;
- int rv = SS_UnknownError;
+ enum drbd_fencing_p fp;
+ union drbd_state os, ns;
+ int rv = SS_UNKNOWN_ERROR;
unsigned int i;
- if (mdev->state.conn == StandAlone)
+ if (mdev->state.conn == C_STANDALONE)
return;
- if (mdev->state.conn >= WFConnection)
+ if (mdev->state.conn >= C_WF_CONNECTION)
dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
conns_to_name(mdev->state.conn));
@@ -3586,10 +3586,10 @@ STATIC void drbd_disconnect(struct drbd_conf *mdev)
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
- * * On SyncTarget we do not have any data structures describing
+ * * On C_SYNC_TARGET we do not have any data structures describing
* the pending RSDataRequest's we have sent.
- * * On SyncSource there is no data structure that tracks
- * the RSDataReply blocks that we sent to the SyncTarget.
+ * * On C_SYNC_SOURCE there is no data structure that tracks
+ * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
* And no, it is not the sum of the reference counts in the
* resync_LRU. The resync_LRU tracks the whole operation including
* the disk-IO, while the rs_pending_cnt only tracks the blocks
@@ -3625,14 +3625,14 @@ STATIC void drbd_disconnect(struct drbd_conf *mdev)
drbd_md_sync(mdev);
- fp = DontCare;
+ fp = FP_DONT_CARE;
if (inc_local(mdev)) {
fp = mdev->bc->dc.fencing;
dec_local(mdev);
}
- if (mdev->state.role == Primary) {
- if (fp >= Resource && mdev->state.pdsk >= DUnknown) {
+ if (mdev->state.role == R_PRIMARY) {
+ if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) {
enum drbd_disk_state nps = drbd_try_outdate_peer(mdev);
drbd_request_state(mdev, NS(pdsk, nps));
}
@@ -3640,15 +3640,15 @@ STATIC void drbd_disconnect(struct drbd_conf *mdev)
spin_lock_irq(&mdev->req_lock);
os = mdev->state;
- if (os.conn >= Unconnected) {
- /* Do not restart in case we are Disconnecting */
+ if (os.conn >= C_UNCONNECTED) {
+ /* Do not restart in case we are C_DISCONNECTING */
ns = os;
- ns.conn = Unconnected;
- rv = _drbd_set_state(mdev, ns, ChgStateVerbose, NULL);
+ ns.conn = C_UNCONNECTED;
+ rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
spin_unlock_irq(&mdev->req_lock);
- if (os.conn == Disconnecting) {
+ if (os.conn == C_DISCONNECTING) {
struct hlist_head *h;
wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0);
@@ -3681,7 +3681,7 @@ STATIC void drbd_disconnect(struct drbd_conf *mdev)
kfree(mdev->net_conf);
mdev->net_conf = NULL;
- drbd_request_state(mdev, NS(conn, StandAlone));
+ drbd_request_state(mdev, NS(conn, C_STANDALONE));
}
/* they do trigger all the time.
@@ -3715,7 +3715,7 @@ STATIC void drbd_disconnect(struct drbd_conf *mdev)
STATIC int drbd_send_handshake(struct drbd_conf *mdev)
{
/* ASSERT current == mdev->receiver ... */
- struct Drbd_HandShake_Packet *p = &mdev->data.sbuf.HandShake;
+ struct p_handshake *p = &mdev->data.sbuf.handshake;
int ok;
if (mutex_lock_interruptible(&mdev->data.mutex)) {
@@ -3731,8 +3731,8 @@ STATIC int drbd_send_handshake(struct drbd_conf *mdev)
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- ok = _drbd_send_cmd( mdev, mdev->data.socket, HandShake,
- (struct Drbd_Header *)p, sizeof(*p), 0 );
+ ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
+ (struct p_header *)p, sizeof(*p), 0 );
mutex_unlock(&mdev->data.mutex);
return ok;
}
@@ -3747,9 +3747,9 @@ STATIC int drbd_send_handshake(struct drbd_conf *mdev)
int drbd_do_handshake(struct drbd_conf *mdev)
{
/* ASSERT current == mdev->receiver ... */
- struct Drbd_HandShake_Packet *p = &mdev->data.rbuf.HandShake;
- const int expect = sizeof(struct Drbd_HandShake_Packet)
- -sizeof(struct Drbd_Header);
+ struct p_handshake *p = &mdev->data.rbuf.handshake;
+ const int expect = sizeof(struct p_handshake)
+ -sizeof(struct p_header);
int rv;
rv = drbd_send_handshake(mdev);
@@ -3760,7 +3760,7 @@ int drbd_do_handshake(struct drbd_conf *mdev)
if (!rv)
return 0;
- if (p->head.command != HandShake) {
+ if (p->head.command != P_HAND_SHAKE) {
dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
cmdname(p->head.command), p->head.command);
return -1;
@@ -3822,7 +3822,7 @@ int drbd_do_auth(struct drbd_conf *mdev)
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
- struct Drbd_Header p;
+ struct p_header p;
unsigned int key_len = strlen(mdev->net_conf->shared_secret);
unsigned int resp_size;
struct hash_desc desc;
@@ -3841,7 +3841,7 @@ int drbd_do_auth(struct drbd_conf *mdev)
get_random_bytes(my_challenge, CHALLENGE_LEN);
- rv = drbd_send_cmd2(mdev, AuthChallenge, my_challenge, CHALLENGE_LEN);
+ rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
if (!rv)
goto fail;
@@ -3849,7 +3849,7 @@ int drbd_do_auth(struct drbd_conf *mdev)
if (!rv)
goto fail;
- if (p.command != AuthChallenge) {
+ if (p.command != P_AUTH_CHALLENGE) {
dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
cmdname(p.command), p.command);
rv = 0;
@@ -3895,7 +3895,7 @@ int drbd_do_auth(struct drbd_conf *mdev)
goto fail;
}
- rv = drbd_send_cmd2(mdev, AuthResponse, response, resp_size);
+ rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
if (!rv)
goto fail;
@@ -3903,7 +3903,7 @@ int drbd_do_auth(struct drbd_conf *mdev)
if (!rv)
goto fail;
- if (p.command != AuthResponse) {
+ if (p.command != P_AUTH_RESPONSE) {
dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
cmdname(p.command), p.command);
rv = 0;
@@ -3955,7 +3955,7 @@ int drbd_do_auth(struct drbd_conf *mdev)
}
#endif
-STATIC int drbdd_init(struct Drbd_thread *thi)
+STATIC int drbdd_init(struct drbd_thread *thi)
{
struct drbd_conf *mdev = thi->mdev;
unsigned int minor = mdev_to_minor(mdev);
@@ -3974,7 +3974,7 @@ STATIC int drbdd_init(struct Drbd_thread *thi)
}
if (h == -1) {
dev_warn(DEV, "Discarding network configuration.\n");
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
}
} while (h == 0);
@@ -3993,13 +3993,13 @@ STATIC int drbdd_init(struct Drbd_thread *thi)
/* ********* acknowledge sender ******** */
-STATIC int got_RqSReply(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_RqS_Reply_Packet *p = (struct Drbd_RqS_Reply_Packet *)h;
+ struct p_req_state_reply *p = (struct p_req_state_reply *)h;
int retcode = be32_to_cpu(p->retcode);
- if (retcode >= SS_Success) {
+ if (retcode >= SS_SUCCESS) {
set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
} else {
set_bit(CL_ST_CHG_FAIL, &mdev->flags);
@@ -4011,13 +4011,13 @@ STATIC int got_RqSReply(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int got_Ping(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_Ping(struct drbd_conf *mdev, struct p_header *h)
{
return drbd_send_ping_ack(mdev);
}
-STATIC int got_PingAck(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
{
/* restore idle timeout */
mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
@@ -4025,9 +4025,9 @@ STATIC int got_PingAck(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int got_IsInSync(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_BlockAck_Packet *p = (struct Drbd_BlockAck_Packet *)h;
+ struct p_block_ack *p = (struct p_block_ack *)h;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
@@ -4044,10 +4044,10 @@ STATIC int got_IsInSync(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int got_BlockAck(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
{
struct drbd_request *req;
- struct Drbd_BlockAck_Packet *p = (struct Drbd_BlockAck_Packet *)h;
+ struct p_block_ack *p = (struct p_block_ack *)h;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
@@ -4067,19 +4067,19 @@ STATIC int got_BlockAck(struct drbd_conf *mdev, struct Drbd_Header *h)
}
switch (be16_to_cpu(h->command)) {
- case RSWriteAck:
+ case P_RS_WRITE_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
_req_mod(req, write_acked_by_peer_and_sis, 0);
break;
- case WriteAck:
+ case P_WRITE_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
_req_mod(req, write_acked_by_peer, 0);
break;
- case RecvAck:
+ case P_RECV_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
_req_mod(req, recv_acked_by_peer, 0);
break;
- case DiscardAck:
+ case P_DISCARD_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
" DRBD is not a random data generator!\n",
@@ -4096,9 +4096,9 @@ STATIC int got_BlockAck(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int got_NegAck(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_BlockAck_Packet *p = (struct Drbd_BlockAck_Packet *)h;
+ struct p_block_ack *p = (struct p_block_ack *)h;
sector_t sector = be64_to_cpu(p->sector);
struct drbd_request *req;
@@ -4130,10 +4130,10 @@ STATIC int got_NegAck(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int got_NegDReply(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
{
struct drbd_request *req;
- struct Drbd_BlockAck_Packet *p = (struct Drbd_BlockAck_Packet *)h;
+ struct p_block_ack *p = (struct p_block_ack *)h;
sector_t sector = be64_to_cpu(p->sector);
spin_lock_irq(&mdev->req_lock);
@@ -4155,11 +4155,11 @@ STATIC int got_NegDReply(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int got_NegRSDReply(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
{
sector_t sector;
int size;
- struct Drbd_BlockAck_Packet *p = (struct Drbd_BlockAck_Packet *)h;
+ struct p_block_ack *p = (struct p_block_ack *)h;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
@@ -4169,7 +4169,7 @@ STATIC int got_NegRSDReply(struct drbd_conf *mdev, struct Drbd_Header *h)
dec_rs_pending(mdev);
- if (inc_local_if_state(mdev, Failed)) {
+ if (inc_local_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, sector);
drbd_rs_failed_io(mdev, sector, size);
dec_local(mdev);
@@ -4178,18 +4178,18 @@ STATIC int got_NegRSDReply(struct drbd_conf *mdev, struct Drbd_Header *h)
return TRUE;
}
-STATIC int got_BarrierAck(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_BarrierAck_Packet *p = (struct Drbd_BarrierAck_Packet *)h;
+ struct p_barrier_ack *p = (struct p_barrier_ack *)h;
tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
return TRUE;
}
-STATIC int got_OVResult(struct drbd_conf *mdev, struct Drbd_Header *h)
+STATIC int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
{
- struct Drbd_BlockAck_Packet *p = (struct Drbd_BlockAck_Packet *)h;
+ struct p_block_ack *p = (struct p_block_ack *)h;
struct drbd_work *w;
sector_t sector;
int size;
@@ -4222,7 +4222,7 @@ STATIC int got_OVResult(struct drbd_conf *mdev, struct Drbd_Header *h)
struct asender_cmd {
size_t pkt_size;
- int (*process)(struct drbd_conf *mdev, struct Drbd_Header *h);
+ int (*process)(struct drbd_conf *mdev, struct p_header *h);
};
static struct asender_cmd *get_asender_cmd(int cmd)
@@ -4231,36 +4231,34 @@ static struct asender_cmd *get_asender_cmd(int cmd)
/* anything missing from this table is in
* the drbd_cmd_handler (drbd_default_handler) table,
* see the beginning of drbdd() */
- [Ping] = { sizeof(struct Drbd_Header), got_Ping },
- [PingAck] = { sizeof(struct Drbd_Header), got_PingAck },
- [RecvAck] = { sizeof(struct Drbd_BlockAck_Packet), got_BlockAck },
- [WriteAck] = { sizeof(struct Drbd_BlockAck_Packet), got_BlockAck },
- [RSWriteAck] = { sizeof(struct Drbd_BlockAck_Packet), got_BlockAck },
- [DiscardAck] = { sizeof(struct Drbd_BlockAck_Packet), got_BlockAck },
- [NegAck] = { sizeof(struct Drbd_BlockAck_Packet), got_NegAck },
- [NegDReply] = { sizeof(struct Drbd_BlockAck_Packet), got_NegDReply },
- [NegRSDReply] = { sizeof(struct Drbd_BlockAck_Packet), got_NegRSDReply},
- [OVResult] = { sizeof(struct Drbd_BlockAck_Packet), got_OVResult },
- [BarrierAck] = { sizeof(struct Drbd_BarrierAck_Packet), got_BarrierAck },
- [StateChgReply] = { sizeof(struct Drbd_RqS_Reply_Packet), got_RqSReply },
- [RSIsInSync] = { sizeof(struct Drbd_BlockAck_Packet), got_IsInSync },
- [MAX_CMD] = { 0, NULL },
+ [P_PING] = { sizeof(struct p_header), got_Ping },
+ [P_PING_ACK] = { sizeof(struct p_header), got_PingAck },
+ [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
+ [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
+ [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
+ [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
+ [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
+ [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
+ [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
+ [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
+ [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
+ [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
};
- if (cmd > MAX_CMD)
+ if (cmd > P_MAX_CMD)
return NULL;
return &asender_tbl[cmd];
}
-STATIC int drbd_asender(struct Drbd_thread *thi)
+STATIC int drbd_asender(struct drbd_thread *thi)
{
struct drbd_conf *mdev = thi->mdev;
- struct Drbd_Header *h = &mdev->meta.rbuf.head;
+ struct p_header *h = &mdev->meta.rbuf.header;
struct asender_cmd *cmd = NULL;
int rv, len;
void *buf = h;
int received = 0;
- int expect = sizeof(struct Drbd_Header);
+ int expect = sizeof(struct p_header);
int empty;
sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
@@ -4360,7 +4358,7 @@ STATIC int drbd_asender(struct Drbd_thread *thi)
goto disconnect;
}
expect = cmd->pkt_size;
- ERR_IF(len != expect-sizeof(struct Drbd_Header)) {
+ ERR_IF(len != expect-sizeof(struct p_header)) {
dump_packet(mdev, mdev->meta.socket, 1, (void *)h, __FILE__, __LINE__);
DUMPI(expect);
goto reconnect;
@@ -4374,22 +4372,22 @@ STATIC int drbd_asender(struct Drbd_thread *thi)
buf = h;
received = 0;
- expect = sizeof(struct Drbd_Header);
+ expect = sizeof(struct p_header);
cmd = NULL;
}
}
if (0) {
reconnect:
- drbd_force_state(mdev, NS(conn, NetworkFailure));
+ drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
}
if (0) {
disconnect:
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
}
clear_bit(SIGNAL_ASENDER, &mdev->flags);
- D_ASSERT(mdev->state.conn < Connected);
+ D_ASSERT(mdev->state.conn < C_CONNECTED);
dev_info(DEV, "asender terminated\n");
return 0;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index cace6b7d9d27..cbfcb6b8b4d4 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -93,9 +93,9 @@ STATIC void _print_req_mod(struct drbd_request *req, enum drbd_req_event what)
# ifdef ENABLE_DYNAMIC_TRACE
# define print_rq_state(R, T) \
- MTRACE(TraceTypeRq, TraceLvlMetrics, _print_rq_state(R, T);)
+ MTRACE(TRACE_TYPE_RQ, TRACE_LVL_METRICS, _print_rq_state(R, T);)
# define print_req_mod(T, W) \
- MTRACE(TraceTypeRq, TraceLvlMetrics, _print_req_mod(T, W);)
+ MTRACE(TRACE_TYPE_RQ, TRACE_LVL_METRICS, _print_req_mod(T, W);)
# else
# define print_rq_state(R, T) _print_rq_state(R, T)
# define print_req_mod(T, W) _print_req_mod(T, W)
@@ -166,7 +166,7 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* we would forget to resync the corresponding extent.
*/
if (s & RQ_LOCAL_MASK) {
- if (inc_local_if_state(mdev, Failed)) {
+ if (inc_local_if_state(mdev, D_FAILED)) {
drbd_al_complete_io(mdev, req->sector);
dec_local(mdev);
} else if (__ratelimit(&drbd_ratelimit_state)) {
@@ -207,7 +207,7 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
static void queue_barrier(struct drbd_conf *mdev)
{
- struct drbd_barrier *b;
+ struct drbd_tl_epoch *b;
/* We are within the req_lock. Once we queued the barrier for sending,
* we set the CREATE_BARRIER bit. It is cleared as soon as a new
@@ -217,7 +217,7 @@ static void queue_barrier(struct drbd_conf *mdev)
if (test_bit(CREATE_BARRIER, &mdev->flags))
return;
- b = mdev->newest_barrier;
+ b = mdev->newest_tle;
b->w.cb = w_send_barrier;
/* inc_ap_pending done here, so we won't
* get imbalanced on connection loss.
@@ -233,14 +233,14 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
{
const unsigned long s = req->rq_state;
struct drbd_request *i;
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
struct hlist_node *n;
struct hlist_head *slot;
/* before we can signal completion to the upper layers,
* we may need to close the current epoch */
- if (mdev->state.conn >= Connected &&
- req->epoch == mdev->newest_barrier->br_number)
+ if (mdev->state.conn >= C_CONNECTED &&
+ req->epoch == mdev->newest_tle->br_number)
queue_barrier(mdev);
/* we need to do the conflict detection stuff,
@@ -269,7 +269,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
*
* currently, there can be only _one_ such ee
* (well, or some more, which would be pending
- * DiscardAck not yet sent by the asender...),
+ * P_DISCARD_ACK not yet sent by the asender...),
* since we block the receiver thread upon the
* first conflict detection, which will wait on
* misc_wait. maybe we want to assert that?
@@ -324,8 +324,8 @@ void _req_may_be_done(struct drbd_request *req, int error)
if (req->master_bio) {
/* this is data_received (remote read)
- * or protocol C WriteAck
- * or protocol B RecvAck
+ * or protocol C P_WRITE_ACK
+ * or protocol B P_RECV_ACK
* or protocol A "handed_over_to_network" (SendAck)
* or canceled or failed,
* or killed from the transfer log due to connection loss.
@@ -367,8 +367,8 @@ void _req_may_be_done(struct drbd_request *req, int error)
if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
/* this is disconnected (local only) operation,
- * or protocol C WriteAck,
- * or protocol A or B BarrierAck,
+ * or protocol C P_WRITE_ACK,
+ * or protocol A or B P_BARRIER_ACK,
* or killed from the transfer log due to connection loss. */
_req_is_done(mdev, req, rw);
}
@@ -405,7 +405,7 @@ STATIC int _req_conflicts(struct drbd_request *req)
const sector_t sector = req->sector;
const int size = req->size;
struct drbd_request *i;
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
struct hlist_node *n;
struct hlist_head *slot;
@@ -624,12 +624,12 @@ void _req_mod(struct drbd_request *req, enum drbd_req_event what, int error)
* just after it grabs the req_lock */
D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
- req->epoch = mdev->newest_barrier->br_number;
+ req->epoch = mdev->newest_tle->br_number;
list_add_tail(&req->tl_requests,
- &mdev->newest_barrier->requests);
+ &mdev->newest_tle->requests);
/* increment size of current epoch */
- mdev->newest_barrier->n_req++;
+ mdev->newest_tle->n_req++;
/* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING);
@@ -638,7 +638,7 @@ void _req_mod(struct drbd_request *req, enum drbd_req_event what, int error)
drbd_queue_work(&mdev->data.work, &req->w);
/* close the epoch, in case it outgrew the limit */
- if (mdev->newest_barrier->n_req >= mdev->net_conf->max_epoch_size)
+ if (mdev->newest_tle->n_req >= mdev->net_conf->max_epoch_size)
queue_barrier(mdev);
break;
@@ -709,7 +709,7 @@ void _req_mod(struct drbd_request *req, enum drbd_req_event what, int error)
* A barrier request is expected to have forced all prior
* requests onto stable storage, so completion of a barrier
* request could set NET_DONE right here, and not wait for the
- * BarrierAck, but that is an unecessary optimisation. */
+ * P_BARRIER_ACK, but that is an unecessary optimisation. */
/* this makes it effectively the same as for: */
case recv_acked_by_peer:
@@ -770,13 +770,13 @@ STATIC int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
- if (mdev->state.disk == UpToDate)
+ if (mdev->state.disk == D_UP_TO_DATE)
return 1;
- if (mdev->state.disk >= Outdated)
+ if (mdev->state.disk >= D_OUTDATED)
return 0;
- if (mdev->state.disk < Inconsistent)
+ if (mdev->state.disk < D_INCONSISTENT)
return 0;
- /* state.disk == Inconsistent We will have a look at the BitMap */
+ /* state.disk == D_INCONSISTENT We will have a look at the BitMap */
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
@@ -794,7 +794,7 @@ STATIC int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
const int rw = bio_rw(bio);
const int size = bio->bi_size;
const sector_t sector = bio->bi_sector;
- struct drbd_barrier *b = NULL;
+ struct drbd_tl_epoch *b = NULL;
struct drbd_request *req;
int local, remote;
int err = -EIO;
@@ -834,18 +834,18 @@ STATIC int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
dec_local(mdev);
}
}
- remote = !local && mdev->state.pdsk >= UpToDate;
+ remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
}
/* If we have a disk, but a READA request is mapped to remote,
- * we are Primary, Inconsistent, SyncTarget.
+ * we are R_PRIMARY, D_INCONSISTENT, SyncTarget.
* Just fail that READA request right here.
*
* THINK: maybe fail all READA when not local?
* or make this configurable...
* if network is slow, READA won't do any good.
*/
- if (rw == READA && mdev->state.disk >= Inconsistent && !local) {
+ if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) {
err = -EWOULDBLOCK;
goto fail_and_free_req;
}
@@ -858,9 +858,9 @@ STATIC int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
if (rw == WRITE && local)
drbd_al_begin_io(mdev, sector);
- remote = remote && (mdev->state.pdsk == UpToDate ||
- (mdev->state.pdsk == Inconsistent &&
- mdev->state.conn >= Connected));
+ remote = remote && (mdev->state.pdsk == D_UP_TO_DATE ||
+ (mdev->state.pdsk == D_INCONSISTENT &&
+ mdev->state.conn >= C_CONNECTED));
if (!(local || remote)) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
@@ -868,16 +868,16 @@ STATIC int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
}
/* For WRITE request, we have to make sure that we have an
- * unused_spare_barrier, in case we need to start a new epoch.
+ * unused_spare_tle, in case we need to start a new epoch.
* I try to be smart and avoid to pre-allocate always "just in case",
* but there is a race between testing the bit and pointer outside the
* spinlock, and grabbing the spinlock.
* if we lost that race, we retry. */
if (rw == WRITE && remote &&
- mdev->unused_spare_barrier == NULL &&
+ mdev->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
allocate_barrier:
- b = kmalloc(sizeof(struct drbd_barrier), GFP_NOIO);
+ b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
if (!b) {
dev_err(DEV, "Failed to alloc barrier.\n");
err = -ENOMEM;
@@ -889,9 +889,9 @@ allocate_barrier:
spin_lock_irq(&mdev->req_lock);
if (remote) {
- remote = (mdev->state.pdsk == UpToDate ||
- (mdev->state.pdsk == Inconsistent &&
- mdev->state.conn >= Connected));
+ remote = (mdev->state.pdsk == D_UP_TO_DATE ||
+ (mdev->state.pdsk == D_INCONSISTENT &&
+ mdev->state.conn >= C_CONNECTED));
if (!remote)
dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
if (!(local || remote)) {
@@ -901,12 +901,12 @@ allocate_barrier:
}
}
- if (b && mdev->unused_spare_barrier == NULL) {
- mdev->unused_spare_barrier = b;
+ if (b && mdev->unused_spare_tle == NULL) {
+ mdev->unused_spare_tle = b;
b = NULL;
}
if (rw == WRITE && remote &&
- mdev->unused_spare_barrier == NULL &&
+ mdev->unused_spare_tle == NULL &&
test_bit(CREATE_BARRIER, &mdev->flags)) {
/* someone closed the current epoch
* while we were grabbing the spinlock */
@@ -928,10 +928,10 @@ allocate_barrier:
* barrier packet. To get the write ordering right, we only have to
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
- if (remote && mdev->unused_spare_barrier &&
+ if (remote && mdev->unused_spare_tle &&
test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, mdev->unused_spare_barrier);
- mdev->unused_spare_barrier = NULL;
+ _tl_add_barrier(mdev, mdev->unused_spare_tle);
+ mdev->unused_spare_tle = NULL;
} else {
D_ASSERT(!(remote && rw == WRITE &&
test_bit(CREATE_BARRIER, &mdev->flags)));
@@ -988,7 +988,7 @@ allocate_barrier:
/* NOTE remote first: to get the concurrent write detection right,
* we must register the request before start of local IO. */
if (remote) {
- /* either WRITE and Connected,
+ /* either WRITE and C_CONNECTED,
* or READ, and no local disk,
* or READ, but not in sync.
*/
@@ -1044,11 +1044,11 @@ fail_and_free_req:
static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
{
/* Unconfigured */
- if (mdev->state.conn == Disconnecting &&
- mdev->state.disk == Diskless)
+ if (mdev->state.conn == C_DISCONNECTING &&
+ mdev->state.disk == D_DISKLESS)
return 1;
- if (mdev->state.role != Primary &&
+ if (mdev->state.role != R_PRIMARY &&
(!allow_oos || is_write)) {
if (__ratelimit(&drbd_ratelimit_state)) {
dev_err(DEV, "Process %s[%u] tried to %s; "
@@ -1069,7 +1069,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
* to serialize state changes, this is racy, since we may lose
* the connection *after* we test for the cstate.
*/
- if (mdev->state.disk < UpToDate && mdev->state.pdsk < UpToDate) {
+ if (mdev->state.disk < D_UP_TO_DATE && mdev->state.pdsk < D_UP_TO_DATE) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sorry, I have no access to good data anymore.\n");
return 1;
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 6c7c9635da30..8866ea62f431 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -39,7 +39,7 @@
Try to get the locking right :) */
/*
- * Objects of type struct drbd_request do only exist on a Primary node, and are
+ * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
* associated with IO requests originating from the block layer above us.
*
* There are quite a few things that may happen to a drbd request
@@ -168,7 +168,7 @@ enum drbd_req_state_bits {
__RQ_NET_SENT,
/* when set, the request may be freed (if RQ_NET_QUEUED is clear).
- * basically this means the corresponding BarrierAck was received */
+ * basically this means the corresponding P_BARRIER_ACK was received */
__RQ_NET_DONE,
/* whether or not we know (C) or pretend (B,A) that the write
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 491019c8331d..b230693f35e6 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -26,90 +26,88 @@
#include <linux/drbd.h>
static const char *drbd_conn_s_names[] = {
- [StandAlone] = "StandAlone",
- [Disconnecting] = "Disconnecting",
- [Unconnected] = "Unconnected",
- [Timeout] = "Timeout",
- [BrokenPipe] = "BrokenPipe",
- [NetworkFailure] = "NetworkFailure",
- [ProtocolError] = "ProtocolError",
- [WFConnection] = "WFConnection",
- [WFReportParams] = "WFReportParams",
- [TearDown] = "TearDown",
- [Connected] = "Connected",
- [StartingSyncS] = "StartingSyncS",
- [StartingSyncT] = "StartingSyncT",
- [WFBitMapS] = "WFBitMapS",
- [WFBitMapT] = "WFBitMapT",
- [WFSyncUUID] = "WFSyncUUID",
- [SyncSource] = "SyncSource",
- [SyncTarget] = "SyncTarget",
- [VerifyS] = "VerifyS",
- [VerifyT] = "VerifyT",
- [PausedSyncS] = "PausedSyncS",
- [PausedSyncT] = "PausedSyncT"
+ [C_STANDALONE] = "StandAlone",
+ [C_DISCONNECTING] = "Disconnecting",
+ [C_UNCONNECTED] = "Unconnected",
+ [C_TIMEOUT] = "Timeout",
+ [C_BROKEN_PIPE] = "BrokenPipe",
+ [C_NETWORK_FAILURE] = "NetworkFailure",
+ [C_PROTOCOL_ERROR] = "ProtocolError",
+ [C_WF_CONNECTION] = "WFConnection",
+ [C_WF_REPORT_PARAMS] = "WFReportParams",
+ [C_TEAR_DOWN] = "TearDown",
+ [C_CONNECTED] = "Connected",
+ [C_STARTING_SYNC_S] = "StartingSyncS",
+ [C_STARTING_SYNC_T] = "StartingSyncT",
+ [C_WF_BITMAP_S] = "WFBitMapS",
+ [C_WF_BITMAP_T] = "WFBitMapT",
+ [C_WF_SYNC_UUID] = "WFSyncUUID",
+ [C_SYNC_SOURCE] = "SyncSource",
+ [C_SYNC_TARGET] = "SyncTarget",
+ [C_PAUSED_SYNC_S] = "PausedSyncS",
+ [C_PAUSED_SYNC_T] = "PausedSyncT",
+ [C_VERIFY_S] = "VerifyS",
+ [C_VERIFY_T] = "VerifyT",
};
static const char *drbd_role_s_names[] = {
- [Primary] = "Primary",
- [Secondary] = "Secondary",
- [Unknown] = "Unknown"
+ [R_PRIMARY] = "Primary",
+ [R_SECONDARY] = "Secondary",
+ [R_UNKNOWN] = "Unknown"
};
static const char *drbd_disk_s_names[] = {
- [Diskless] = "Diskless",
- [Attaching] = "Attaching",
- [Failed] = "Failed",
- [Negotiating] = "Negotiating",
- [Inconsistent] = "Inconsistent",
- [Outdated] = "Outdated",
- [DUnknown] = "DUnknown",
- [Consistent] = "Consistent",
- [UpToDate] = "UpToDate",
+ [D_DISKLESS] = "Diskless",
+ [D_ATTACHING] = "Attaching",
+ [D_FAILED] = "Failed",
+ [D_NEGOTIATING] = "Negotiating",
+ [D_INCONSISTENT] = "Inconsistent",
+ [D_OUTDATED] = "Outdated",
+ [D_UNKNOWN] = "DUnknown",
+ [D_CONSISTENT] = "Consistent",
+ [D_UP_TO_DATE] = "UpToDate",
};
static const char *drbd_state_sw_errors[] = {
- [-SS_TwoPrimaries] = "Multiple primaries not allowed by config",
- [-SS_NoUpToDateDisk] =
- "Refusing to be Primary without at least one UpToDate disk",
- [-SS_BothInconsistent] = "Refusing to be inconsistent on both nodes",
- [-SS_SyncingDiskless] = "Refusing to be syncing and diskless",
- [-SS_ConnectedOutdates] = "Refusing to be Outdated while Connected",
- [-SS_PrimaryNOP] = "Refusing to be Primary while peer is not outdated",
- [-SS_ResyncRunning] = "Can not start OV/resync since it is already active",
- [-SS_AlreadyStandAlone] = "Can not disconnect a StandAlone device",
- [-SS_CW_FailedByPeer] = "State changed was refused by peer node",
- [-SS_IsDiskLess] =
- "Device is diskless, the requesed operation requires a disk",
- [-SS_DeviceInUse] = "Device is held open by someone",
- [-SS_NoNetConfig] = "Have no net/connection configuration",
- [-SS_NoVerifyAlg] = "Need a verify algorithm to start online verify",
- [-SS_NeedConnection] = "Need a connection to start verify or resync",
- [-SS_NotSupported] = "Peer does not support protocol",
- [-SS_LowerThanOutdated] = "Disk state is lower than outdated",
- [-SS_InTransientState] = "In transient state, retry after next state change",
- [-SS_ConcurrentStChg] = "Concurrent state changes detected and aborted",
+ [-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config",
+ [-SS_NO_UP_TO_DATE_DISK] = "Refusing to be Primary without at least one UpToDate disk",
+ [-SS_BOTH_INCONSISTENT] = "Refusing to be inconsistent on both nodes",
+ [-SS_SYNCING_DISKLESS] = "Refusing to be syncing and diskless",
+ [-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected",
+ [-SS_PRIMARY_NOP] = "Refusing to be Primary while peer is not outdated",
+ [-SS_RESYNC_RUNNING] = "Can not start OV/resync since it is already active",
+ [-SS_ALREADY_STANDALONE] = "Can not disconnect a StandAlone device",
+ [-SS_CW_FAILED_BY_PEER] = "State changed was refused by peer node",
+ [-SS_IS_DISKLESS] = "Device is diskless, the requesed operation requires a disk",
+ [-SS_DEVICE_IN_USE] = "Device is held open by someone",
+ [-SS_NO_NET_CONFIG] = "Have no net/connection configuration",
+ [-SS_NO_VERIFY_ALG] = "Need a verify algorithm to start online verify",
+ [-SS_NEED_CONNECTION] = "Need a connection to start verify or resync",
+ [-SS_NOT_SUPPORTED] = "Peer does not support protocol",
+ [-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated",
+ [-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change",
+ [-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted",
};
const char *conns_to_name(enum drbd_conns s)
{
/* enums are unsigned... */
- return s > PausedSyncT ? "TOO_LARGE" : drbd_conn_s_names[s];
+ return s > C_PAUSED_SYNC_T ? "TOO_LARGE" : drbd_conn_s_names[s];
}
const char *roles_to_name(enum drbd_role s)
{
- return s > Secondary ? "TOO_LARGE" : drbd_role_s_names[s];
+ return s > R_SECONDARY ? "TOO_LARGE" : drbd_role_s_names[s];
}
const char *disks_to_name(enum drbd_disk_state s)
{
- return s > UpToDate ? "TOO_LARGE" : drbd_disk_s_names[s];
+ return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s];
}
-const char *set_st_err_name(enum set_st_err err)
+const char *set_st_err_name(enum drbd_state_ret_codes err)
{
- return err <= SS_AfterLastError ? "TOO_SMALL" :
- err > SS_TwoPrimaries ? "TOO_LARGE"
+ return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" :
+ err > SS_TWO_PRIMARIES ? "TOO_LARGE"
: drbd_state_sw_errors[-err];
}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 685dc71b8a8b..a39ba573743e 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -99,7 +99,7 @@ void drbd_md_io_complete(struct bio *bio, int error)
void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
{
unsigned long flags = 0;
- struct Tl_epoch_entry *e = NULL;
+ struct drbd_epoch_entry *e = NULL;
struct drbd_conf *mdev;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -129,7 +129,7 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
drbd_queue_work(&mdev->data.work, &e->w);
dec_local(mdev);
- MTRACE(TraceTypeEE, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EE, TRACE_LVL_ALL,
dev_info(DEV, "Moved EE (READ) to worker sec=%llus size=%u ee=%p\n",
(unsigned long long)e->sector, e->size, e);
);
@@ -141,7 +141,7 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
{
unsigned long flags = 0;
- struct Tl_epoch_entry *e = NULL;
+ struct drbd_epoch_entry *e = NULL;
struct drbd_conf *mdev;
sector_t e_sector;
int do_wake;
@@ -191,7 +191,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
list_del(&e->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee);
- MTRACE(TraceTypeEE, TraceLvlAll,
+ MTRACE(TRACE_TYPE_EE, TRACE_LVL_ALL,
dev_info(DEV, "Moved EE (WRITE) to done_ee sec=%llus size=%u ee=%p\n",
(unsigned long long)e->sector, e->size, e);
);
@@ -223,7 +223,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
}
-/* read, readA or write requests on Primary comming from drbd_make_request
+/* read, readA or write requests on R_PRIMARY comming from drbd_make_request
*/
void drbd_endio_pri(struct bio *bio, int error)
{
@@ -260,7 +260,7 @@ int w_io_error(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int ok;
/* NOTE: mdev->bc can be NULL by the time we get here! */
- /* D_ASSERT(mdev->bc->dc.on_io_error != PassOn); */
+ /* D_ASSERT(mdev->bc->dc.on_io_error != EP_PASS_ON); */
/* the only way this callback is scheduled is from _req_may_be_done,
* when it is done and had a local write error, see comments there */
@@ -277,14 +277,14 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
struct drbd_request *req = (struct drbd_request *)w;
/* We should not detach for read io-error,
- * but try to WRITE the DataReply to the failed location,
+ * but try to WRITE the P_DATA_REPLY to the failed location,
* to give the disk the chance to relocate that block */
drbd_io_error(mdev, FALSE); /* tries to schedule a detach and notifies peer */
spin_lock_irq(&mdev->req_lock);
if (cancel ||
- mdev->state.conn < Connected ||
- mdev->state.pdsk <= Inconsistent) {
+ mdev->state.conn < C_CONNECTED ||
+ mdev->state.pdsk <= D_INCONSISTENT) {
_req_mod(req, send_canceled, 0);
spin_unlock_irq(&mdev->req_lock);
dev_alert(DEV, "WE ARE LOST. Local IO failure, no peer.\n");
@@ -324,7 +324,7 @@ STATIC void drbd_csum(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bi
STATIC int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
int digest_size;
void *digest;
int ok;
@@ -348,7 +348,7 @@ STATIC int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel
e->size,
digest,
digest_size,
- CsumRSRequest);
+ P_CSUM_RS_REQUEST);
kfree(digest);
} else {
dev_err(DEV, "kmalloc() of digest failed.\n");
@@ -370,7 +370,7 @@ STATIC int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel
STATIC int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{
- struct Tl_epoch_entry *e;
+ struct drbd_epoch_entry *e;
if (!inc_local(mdev))
return 0;
@@ -408,7 +408,7 @@ void resync_timer_fn(unsigned long data)
if (likely(!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))) {
queue = 1;
- if (mdev->state.conn == VerifyS)
+ if (mdev->state.conn == C_VERIFY_S)
mdev->resync_work.cb = w_make_ov_request;
else
mdev->resync_work.cb = w_make_resync_request;
@@ -437,18 +437,18 @@ int w_make_resync_request(struct drbd_conf *mdev,
if (unlikely(cancel))
return 1;
- if (unlikely(mdev->state.conn < Connected)) {
+ if (unlikely(mdev->state.conn < C_CONNECTED)) {
dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
return 0;
}
- if (mdev->state.conn != SyncTarget)
+ if (mdev->state.conn != C_SYNC_TARGET)
dev_err(DEV, "%s in w_make_resync_request\n",
conns_to_name(mdev->state.conn));
if (!inc_local(mdev)) {
/* Since we only need to access mdev->rsync a
- inc_local_if_state(mdev,Failed) would be sufficient, but
+ inc_local_if_state(mdev,D_FAILED) would be sufficient, but
to continue resync with a broken disk makes no sense at
all */
dev_err(DEV, "Disk broke down during resync!\n");
@@ -548,7 +548,7 @@ next_sector:
}
} else {
inc_rs_pending(mdev);
- if (!drbd_send_drequest(mdev, RSDataRequest,
+ if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
sector, size, ID_SYNCER)) {
dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
dec_rs_pending(mdev);
@@ -560,7 +560,7 @@ next_sector:
if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
/* last syncer _request_ was sent,
- * but the RSDataReply not yet received. sync will end (and
+ * but the P_RS_DATA_REPLY not yet received. sync will end (and
* next sync group will resume), as soon as we receive the last
* resync data block, and the last bit is cleared.
* until then resync "work" is "inactive" ...
@@ -585,7 +585,7 @@ int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (unlikely(cancel))
return 1;
- if (unlikely(mdev->state.conn < Connected)) {
+ if (unlikely(mdev->state.conn < C_CONNECTED)) {
dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
return 0;
}
@@ -650,7 +650,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
{
unsigned long db, dt, dbdt;
unsigned long n_oos;
- union drbd_state_t os, ns;
+ union drbd_state os, ns;
struct drbd_work *w;
char *khelper_cmd = NULL;
@@ -659,7 +659,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
* resync LRU would be wrong. */
if (drbd_rs_del_all(mdev)) {
/* In case this is not possible now, most probabely because
- * there are RSDataReply Packets lingering on the worker's
+ * there are P_RS_DATA_REPLY Packets lingering on the worker's
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
@@ -690,20 +690,20 @@ int drbd_resync_finished(struct drbd_conf *mdev)
/* This protects us against multiple calls (that can happen in the presence
of application IO), and against connectivity loss just before we arrive here. */
- if (os.conn <= Connected)
+ if (os.conn <= C_CONNECTED)
goto out_unlock;
ns = os;
- ns.conn = Connected;
+ ns.conn = C_CONNECTED;
dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
- (os.conn == VerifyS || os.conn == VerifyT) ?
+ (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ?
"Online verify " : "Resync",
dt + mdev->rs_paused, mdev->rs_paused, dbdt);
n_oos = drbd_bm_total_weight(mdev);
- if (os.conn == VerifyS || os.conn == VerifyT) {
+ if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
if (n_oos) {
dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
n_oos, Bit2KB(1));
@@ -712,7 +712,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
} else {
D_ASSERT((n_oos - mdev->rs_failed) == 0);
- if (os.conn == SyncTarget || os.conn == PausedSyncT)
+ if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target";
if (mdev->csums_tfm && mdev->rs_total) {
@@ -733,24 +733,24 @@ int drbd_resync_finished(struct drbd_conf *mdev)
if (mdev->rs_failed) {
dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
- if (os.conn == SyncTarget || os.conn == PausedSyncT) {
- ns.disk = Inconsistent;
- ns.pdsk = UpToDate;
+ if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
+ ns.disk = D_INCONSISTENT;
+ ns.pdsk = D_UP_TO_DATE;
} else {
- ns.disk = UpToDate;
- ns.pdsk = Inconsistent;
+ ns.disk = D_UP_TO_DATE;
+ ns.pdsk = D_INCONSISTENT;
}
} else {
- ns.disk = UpToDate;
- ns.pdsk = UpToDate;
+ ns.disk = D_UP_TO_DATE;
+ ns.pdsk = D_UP_TO_DATE;
- if (os.conn == SyncTarget || os.conn == PausedSyncT) {
+ if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
if (mdev->p_uuid) {
int i;
- for (i = Bitmap ; i <= History_end ; i++)
+ for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
_drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
- drbd_uuid_set(mdev, Bitmap, mdev->bc->md.uuid[Current]);
- _drbd_uuid_set(mdev, Current, mdev->p_uuid[Current]);
+ drbd_uuid_set(mdev, UI_BITMAP, mdev->bc->md.uuid[UI_CURRENT]);
+ _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
} else {
dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
}
@@ -762,12 +762,12 @@ int drbd_resync_finished(struct drbd_conf *mdev)
/* Now the two UUID sets are equal, update what we
* know of the peer. */
int i;
- for (i = Current ; i <= History_end ; i++)
+ for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
mdev->p_uuid[i] = mdev->bc->md.uuid[i];
}
}
- _drbd_set_state(mdev, ns, ChgStateVerbose, NULL);
+ _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock:
spin_unlock_irq(&mdev->req_lock);
dec_local(mdev);
@@ -790,11 +790,11 @@ out:
}
/**
- * w_e_end_data_req: Send the answer (DataReply) in response to a DataRequest.
+ * w_e_end_data_req: Send the answer (P_DATA_REPLY) in response to a DataRequest.
*/
int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
int ok;
if (unlikely(cancel)) {
@@ -804,13 +804,13 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
if (likely(drbd_bio_uptodate(e->private_bio))) {
- ok = drbd_send_block(mdev, DataReply, e);
+ ok = drbd_send_block(mdev, P_DATA_REPLY, e);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
(unsigned long long)e->sector);
- ok = drbd_send_ack(mdev, NegDReply, e);
+ ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
drbd_io_error(mdev, FALSE);
}
@@ -832,11 +832,11 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
/**
- * w_e_end_rsdata_req: Send the answer (RSDataReply) to a RSDataRequest.
+ * w_e_end_rsdata_req: Send the answer (P_RS_DATA_REPLY) to a RSDataRequest.
*/
int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
int ok;
if (unlikely(cancel)) {
@@ -845,15 +845,15 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1;
}
- if (inc_local_if_state(mdev, Failed)) {
+ if (inc_local_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, e->sector);
dec_local(mdev);
}
if (likely(drbd_bio_uptodate(e->private_bio))) {
- if (likely(mdev->state.pdsk >= Inconsistent)) {
+ if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
- ok = drbd_send_block(mdev, RSDataReply, e);
+ ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Not sending RSDataReply, "
@@ -865,7 +865,7 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
(unsigned long long)e->sector);
- ok = drbd_send_ack(mdev, NegRSDReply, e);
+ ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
drbd_io_error(mdev, FALSE);
@@ -891,7 +891,7 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
struct digest_info *di;
int digest_size;
void *digest = NULL;
@@ -925,14 +925,14 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (eq) {
drbd_set_in_sync(mdev, e->sector, e->size);
mdev->rs_same_csum++;
- ok = drbd_send_ack(mdev, RSIsInSync, e);
+ ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
} else {
inc_rs_pending(mdev);
e->block_id = ID_SYNCER;
- ok = drbd_send_block(mdev, RSDataReply, e);
+ ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
}
} else {
- ok = drbd_send_ack(mdev, NegRSDReply, e);
+ ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
drbd_io_error(mdev, FALSE);
@@ -958,7 +958,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
int digest_size;
void *digest;
int ok = 1;
@@ -974,7 +974,7 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (digest) {
drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
- digest, digest_size, OVReply);
+ digest, digest_size, P_OV_REPLY);
if (ok)
inc_rs_pending(mdev);
kfree(digest);
@@ -1004,7 +1004,7 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct Tl_epoch_entry *e = (struct Tl_epoch_entry *)w;
+ struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
struct digest_info *di;
int digest_size;
void *digest;
@@ -1033,7 +1033,7 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
kfree(digest);
}
} else {
- ok = drbd_send_ack(mdev, NegRSDReply, e);
+ ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
drbd_io_error(mdev, FALSE);
@@ -1048,7 +1048,7 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
else
ov_oos_print(mdev);
- ok = drbd_send_ack_ex(mdev, OVResult, e->sector, e->size,
+ ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size,
eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
spin_lock_irq(&mdev->req_lock);
@@ -1072,8 +1072,8 @@ int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_barrier *b = (struct drbd_barrier *)w;
- struct Drbd_Barrier_Packet *p = &mdev->data.sbuf.Barrier;
+ struct drbd_tl_epoch *b = (struct drbd_tl_epoch *)w;
+ struct p_barrier *p = &mdev->data.sbuf.barrier;
int ok = 1;
/* really avoid racing with tl_clear. w.cb may have been referenced
@@ -1082,7 +1082,7 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* barrier packet here, and otherwise do nothing with the object.
* but compare with the head of w_clear_epoch */
spin_lock_irq(&mdev->req_lock);
- if (w->cb != w_send_barrier || mdev->state.conn < Connected)
+ if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
cancel = 1;
spin_unlock_irq(&mdev->req_lock);
if (cancel)
@@ -1094,8 +1094,8 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
/* inc_ap_pending was done where this was queued.
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in w_clear_epoch. */
- ok = _drbd_send_cmd(mdev, mdev->data.socket, Barrier,
- (struct Drbd_Header *)p, sizeof(*p), 0);
+ ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
+ (struct p_header *)p, sizeof(*p), 0);
drbd_put_data_sock(mdev);
return ok;
@@ -1105,7 +1105,7 @@ int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
if (cancel)
return 1;
- return drbd_send_short_cmd(mdev, UnplugRemote);
+ return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
}
/**
@@ -1140,14 +1140,14 @@ int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1;
}
- ok = drbd_send_drequest(mdev, DataRequest, req->sector, req->size,
+ ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
(unsigned long)req);
if (!ok) {
- /* ?? we set Timeout or BrokenPipe in drbd_send();
+ /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
* so this is probably redundant */
- if (mdev->state.conn >= Connected)
- drbd_force_state(mdev, NS(conn, NetworkFailure));
+ if (mdev->state.conn >= C_CONNECTED)
+ drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
}
req_mod(req, ok ? handed_over_to_network : send_failed, 0);
@@ -1163,8 +1163,8 @@ STATIC int _drbd_may_sync_now(struct drbd_conf *mdev)
return 1;
odev = minor_to_mdev(odev->sync_conf.after);
ERR_IF(!odev) return 1;
- if ((odev->state.conn >= SyncSource &&
- odev->state.conn <= PausedSyncT) ||
+ if ((odev->state.conn >= C_SYNC_SOURCE &&
+ odev->state.conn <= C_PAUSED_SYNC_T) ||
odev->state.aftr_isp || odev->state.peer_isp ||
odev->state.user_isp)
return 0;
@@ -1186,11 +1186,11 @@ STATIC int _drbd_pause_after(struct drbd_conf *mdev)
odev = minor_to_mdev(i);
if (!odev)
continue;
- if (odev->state.conn == StandAlone && odev->state.disk == Diskless)
+ if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (!_drbd_may_sync_now(odev))
- rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), ChgStateHard, NULL)
- != SS_NothingToDo);
+ rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
+ != SS_NOTHING_TO_DO);
}
return rv;
@@ -1211,13 +1211,13 @@ STATIC int _drbd_resume_next(struct drbd_conf *mdev)
odev = minor_to_mdev(i);
if (!odev)
continue;
- if (odev->state.conn == StandAlone && odev->state.disk == Diskless)
+ if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (odev->state.aftr_isp) {
if (_drbd_may_sync_now(odev))
rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
- ChgStateHard, NULL)
- != SS_NothingToDo) ;
+ CS_HARD, NULL)
+ != SS_NOTHING_TO_DO) ;
}
}
return rv;
@@ -1254,7 +1254,7 @@ void drbd_alter_sa(struct drbd_conf *mdev, int na)
/**
* drbd_start_resync:
- * @side: Either SyncSource or SyncTarget
+ * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
* Start the resync process. Called from process context only,
* either admin command or drbd_receiver.
* Note, this function might bring you directly into one of the
@@ -1262,50 +1262,50 @@ void drbd_alter_sa(struct drbd_conf *mdev, int na)
*/
void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
{
- union drbd_state_t ns;
+ union drbd_state ns;
int r;
- MTRACE(TraceTypeResync, TraceLvlSummary,
+ MTRACE(TRACE_TYPE_RESYNC, TRACE_LVL_SUMMARY,
dev_info(DEV, "Resync starting: side=%s\n",
- side == SyncTarget ? "SyncTarget" : "SyncSource");
- );
+ side == C_SYNC_TARGET ? "SyncTarget" : "SyncSource");
+ );
drbd_bm_recount_bits(mdev);
/* In case a previous resync run was aborted by an IO error... */
drbd_rs_cancel_all(mdev);
- if (side == SyncTarget) {
- /* Since application IO was locked out during WFBitMapT and
- WFSyncUUID we are still unmodified. Before going to SyncTarget
+ if (side == C_SYNC_TARGET) {
+ /* Since application IO was locked out during C_WF_BITMAP_T and
+ C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
we check that we might make the data inconsistent. */
r = drbd_khelper(mdev, "before-resync-target");
r = (r >> 8) & 0xff;
if (r > 0) {
dev_info(DEV, "before-resync-target handler returned %d, "
"dropping connection.\n", r);
- drbd_force_state(mdev, NS(conn, Disconnecting));
+ drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
return;
}
}
drbd_state_lock(mdev);
- if (!inc_local_if_state(mdev, Negotiating)) {
+ if (!inc_local_if_state(mdev, D_NEGOTIATING)) {
drbd_state_unlock(mdev);
return;
}
- if (side == SyncTarget) {
+ if (side == C_SYNC_TARGET) {
mdev->bm_resync_fo = 0;
- } else /* side == SyncSource */ {
+ } else /* side == C_SYNC_SOURCE */ {
u64 uuid;
get_random_bytes(&uuid, sizeof(u64));
- drbd_uuid_set(mdev, Bitmap, uuid);
+ drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_send_sync_uuid(mdev, uuid);
- D_ASSERT(mdev->state.disk == UpToDate);
+ D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
}
write_lock_irq(&global_state_lock);
@@ -1315,18 +1315,18 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
ns.conn = side;
- if (side == SyncTarget)
- ns.disk = Inconsistent;
- else /* side == SyncSource */
- ns.pdsk = Inconsistent;
+ if (side == C_SYNC_TARGET)
+ ns.disk = D_INCONSISTENT;
+ else /* side == C_SYNC_SOURCE */
+ ns.pdsk = D_INCONSISTENT;
- r = __drbd_set_state(mdev, ns, ChgStateVerbose, NULL);
+ r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
ns = mdev->state;
- if (ns.conn < Connected)
- r = SS_UnknownError;
+ if (ns.conn < C_CONNECTED)
+ r = SS_UNKNOWN_ERROR;
- if (r == SS_Success) {
+ if (r == SS_SUCCESS) {
mdev->rs_total =
mdev->rs_mark_left = drbd_bm_total_weight(mdev);
mdev->rs_failed = 0;
@@ -1340,7 +1340,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_state_unlock(mdev);
dec_local(mdev);
- if (r == SS_Success) {
+ if (r == SS_SUCCESS) {
dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
conns_to_name(ns.conn),
(unsigned long) mdev->rs_total << (BM_BLOCK_SIZE_B-10),
@@ -1351,7 +1351,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
return;
}
- if (ns.conn == SyncTarget) {
+ if (ns.conn == C_SYNC_TARGET) {
D_ASSERT(!test_bit(STOP_SYNC_TIMER, &mdev->flags));
mod_timer(&mdev->resync_timer, jiffies);
}
@@ -1360,7 +1360,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
}
}
-int drbd_worker(struct Drbd_thread *thi)
+int drbd_worker(struct drbd_thread *thi)
{
struct drbd_conf *mdev = thi->mdev;
struct drbd_work *w = NULL;
@@ -1421,11 +1421,11 @@ int drbd_worker(struct Drbd_thread *thi)
list_del_init(&w->list);
spin_unlock_irq(&mdev->data.work.q_lock);
- if (!w->cb(mdev, w, mdev->state.conn < Connected)) {
+ if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
/* dev_warn(DEV, "worker: a callback failed! \n"); */
- if (mdev->state.conn >= Connected)
+ if (mdev->state.conn >= C_CONNECTED)
drbd_force_state(mdev,
- NS(conn, NetworkFailure));
+ NS(conn, C_NETWORK_FAILURE));
}
}
@@ -1452,7 +1452,7 @@ int drbd_worker(struct Drbd_thread *thi)
*/
spin_unlock_irq(&mdev->data.work.q_lock);
- D_ASSERT(mdev->state.disk == Diskless && mdev->state.conn == StandAlone);
+ D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
/* _drbd_set_state only uses stop_nowait.
* wait here for the Exiting receiver. */
drbd_thread_stop(&mdev->receiver);