Skip to content

Commit

Permalink
Merge branch 'dmraid-fix-6.9' into md-6.9
Browse files Browse the repository at this point in the history
This is the second half of fixes for dmraid. The first half is available
at [1].

This set contains fixes:
 - reshape can start unexpected, cause data corruption, patch 1,5,6;
 - deadlocks that reshape concurrent with IO, patch 8;
 - a lockdep warning, patch 9;

For all the dmraid related tests in lvm2 suite, there is no new
regressions compared against 6.6 kernels (which is good baseline before
recent regressions).

[1] https://lore.kernel.org/all/CAPhsuW7u1UKHCDOBDhD7DzOVtkGemDz_QnJ4DUq_kSN-Q3G66Q@mail.gmail.com/

* dmraid-fix-6.9:
  dm-raid: fix lockdep waring in "pers->hot_add_disk"
  dm-raid456, md/raid456: fix a deadlock for dm-raid456 while io concurrent with reshape
  dm-raid: add a new helper prepare_suspend() in md_personality
  md/dm-raid: don't call md_reap_sync_thread() directly
  dm-raid: really frozen sync_thread during suspend
  md: add a new helper reshape_interrupted()
  md: export helper md_is_rdwr()
  md: export helpers to stop sync_thread
  md: don't clear MD_RECOVERY_FROZEN for new dm-raid until resume
  • Loading branch information
liu-song-6 committed Mar 5, 2024
2 parents 3445139 + 95009ae commit 3a889fd
Show file tree
Hide file tree
Showing 4 changed files with 196 additions and 40 deletions.
93 changes: 72 additions & 21 deletions drivers/md/dm-raid.c
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,7 @@ struct raid_dev {
#define RT_FLAG_RS_IN_SYNC 6
#define RT_FLAG_RS_RESYNCING 7
#define RT_FLAG_RS_GROW 8
#define RT_FLAG_RS_FROZEN 9

/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
Expand Down Expand Up @@ -3240,11 +3241,12 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rs->md.ro = 1;
rs->md.in_sync = 1;

/* Keep array frozen until resume. */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);

/* Has to be held on running the array */
mddev_suspend_and_lock_nointr(&rs->md);

/* Keep array frozen until resume. */
md_frozen_sync_thread(&rs->md);

r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
if (r) {
Expand Down Expand Up @@ -3339,7 +3341,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE;

md_handle_request(mddev, bio);
if (unlikely(!md_handle_request(mddev, bio)))
return DM_MAPIO_REQUEUE;

return DM_MAPIO_SUBMITTED;
}
Expand Down Expand Up @@ -3718,21 +3721,33 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
int ret = 0;

if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;

if (!strcasecmp(argv[0], "frozen"))
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags) ||
test_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags))
return -EBUSY;

if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
if (!strcasecmp(argv[0], "frozen")) {
ret = mddev_lock(mddev);
if (ret)
return ret;

md_frozen_sync_thread(mddev);
mddev_unlock(mddev);
} else if (!strcasecmp(argv[0], "idle")) {
ret = mddev_lock(mddev);
if (ret)
return ret;

md_idle_sync_thread(mddev);
mddev_unlock(mddev);
}

clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY;
else if (!strcasecmp(argv[0], "resync"))
; /* MD_RECOVERY_NEEDED set below */
Expand Down Expand Up @@ -3791,15 +3806,46 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
}

static void raid_presuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;

/*
* From now on, disallow raid_message() to change sync_thread until
* resume, raid_postsuspend() is too late.
*/
set_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);

if (!reshape_interrupted(mddev))
return;

/*
* For raid456, if reshape is interrupted, IO across reshape position
* will never make progress, while caller will wait for IO to be done.
* Inform raid456 to handle those IO to prevent deadlock.
*/
if (mddev->pers && mddev->pers->prepare_suspend)
mddev->pers->prepare_suspend(mddev);
}

static void raid_presuspend_undo(struct dm_target *ti)
{
struct raid_set *rs = ti->private;

clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
}

static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;

if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
/* Writes have to be stopped before suspending to avoid deadlocks. */
if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
md_stop_writes(&rs->md);

/*
* sync_thread must be stopped during suspend, and writes have
* to be stopped before suspending to avoid deadlocks.
*/
md_stop_writes(&rs->md);
mddev_suspend(&rs->md, false);
}
}
Expand Down Expand Up @@ -4012,8 +4058,6 @@ static int raid_preresume(struct dm_target *ti)
}

/* Check for any resize/reshape on @rs and adjust/initiate */
/* Be prepared for mddev_resume() in raid_resume() */
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp;
Expand Down Expand Up @@ -4047,18 +4091,23 @@ static void raid_resume(struct dm_target *ti)
* Take this opportunity to check whether any failed
* devices are reachable again.
*/
mddev_lock_nointr(mddev);
attempt_restore_of_faulty_devices(rs);
mddev_unlock(mddev);
}

if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
/* Only reduce raid set size before running a disk removing reshape. */
if (mddev->delta_disks < 0)
rs_set_capacity(rs);

WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery));
WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
mddev_lock_nointr(mddev);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
mddev->ro = 0;
mddev->in_sync = 0;
md_unfrozen_sync_thread(mddev);
mddev_unlock_and_resume(mddev);
}
}
Expand All @@ -4074,6 +4123,8 @@ static struct target_type raid_target = {
.message = raid_message,
.iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints,
.presuspend = raid_presuspend,
.presuspend_undo = raid_presuspend_undo,
.postsuspend = raid_postsuspend,
.preresume = raid_preresume,
.resume = raid_resume,
Expand Down
73 changes: 57 additions & 16 deletions drivers/md/md.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,18 +99,6 @@ static void mddev_detach(struct mddev *mddev);
static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
static void md_wakeup_thread_directly(struct md_thread __rcu *thread);

enum md_ro_state {
MD_RDWR,
MD_RDONLY,
MD_AUTO_READ,
MD_MAX_STATE
};

static bool md_is_rdwr(struct mddev *mddev)
{
return (mddev->ro == MD_RDWR);
}

/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
Expand Down Expand Up @@ -378,15 +366,15 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
return true;
}

void md_handle_request(struct mddev *mddev, struct bio *bio)
bool md_handle_request(struct mddev *mddev, struct bio *bio)
{
check_suspended:
if (is_suspended(mddev, bio)) {
DEFINE_WAIT(__wait);
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
return;
return true;
}
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
Expand All @@ -402,10 +390,13 @@ void md_handle_request(struct mddev *mddev, struct bio *bio)

if (!mddev->pers->make_request(mddev, bio)) {
percpu_ref_put(&mddev->active_io);
if (!mddev->gendisk && mddev->pers->prepare_suspend)
return false;
goto check_suspended;
}

percpu_ref_put(&mddev->active_io);
return true;
}
EXPORT_SYMBOL(md_handle_request);

Expand Down Expand Up @@ -4942,6 +4933,35 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
mddev_lock_nointr(mddev);
}

void md_idle_sync_thread(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);

clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev, true, true);
}
EXPORT_SYMBOL_GPL(md_idle_sync_thread);

void md_frozen_sync_thread(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);

set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev, true, false);
}
EXPORT_SYMBOL_GPL(md_frozen_sync_thread);

void md_unfrozen_sync_thread(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);

clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);

static void idle_sync_thread(struct mddev *mddev)
{
mutex_lock(&mddev->sync_mutex);
Expand Down Expand Up @@ -6062,7 +6082,10 @@ int md_run(struct mddev *mddev)
pr_warn("True protection against single-disk failure might be compromised.\n");
}

mddev->recovery = 0;
/* dm-raid expect sync_thread to be frozen until resume */
if (mddev->gendisk)
mddev->recovery = 0;

/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;

Expand Down Expand Up @@ -6344,7 +6367,6 @@ static void md_clean(struct mddev *mddev)

static void __md_stop_writes(struct mddev *mddev)
{
stop_sync_thread(mddev, true, false);
del_timer_sync(&mddev->safemode_timer);

if (mddev->pers && mddev->pers->quiesce) {
Expand All @@ -6369,6 +6391,8 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev, true, false);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
Expand Down Expand Up @@ -8712,6 +8736,23 @@ void md_account_bio(struct mddev *mddev, struct bio **bio)
}
EXPORT_SYMBOL_GPL(md_account_bio);

void md_free_cloned_bio(struct bio *bio)
{
struct md_io_clone *md_io_clone = bio->bi_private;
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;

if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;

if (md_io_clone->start_time)
bio_end_io_acct(orig_bio, md_io_clone->start_time);

bio_put(bio);
percpu_ref_put(&mddev->active_io);
}
EXPORT_SYMBOL_GPL(md_free_cloned_bio);

/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
Expand Down
38 changes: 37 additions & 1 deletion drivers/md/md.h
Original file line number Diff line number Diff line change
Expand Up @@ -569,6 +569,37 @@ enum recovery_flags {
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};

enum md_ro_state {
MD_RDWR,
MD_RDONLY,
MD_AUTO_READ,
MD_MAX_STATE
};

static inline bool md_is_rdwr(struct mddev *mddev)
{
return (mddev->ro == MD_RDWR);
}

static inline bool reshape_interrupted(struct mddev *mddev)
{
/* reshape never start */
if (mddev->reshape_position == MaxSector)
return false;

/* interrupted */
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return true;

/* running reshape will be interrupted soon. */
if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
return true;

return false;
}

static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
Expand Down Expand Up @@ -628,6 +659,7 @@ struct md_personality
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev);
void (*prepare_suspend) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
Expand Down Expand Up @@ -761,6 +793,7 @@ extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
void md_account_bio(struct mddev *mddev, struct bio **bio);
void md_free_cloned_bio(struct bio *bio);

extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
Expand Down Expand Up @@ -789,9 +822,12 @@ extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev);

extern void md_handle_request(struct mddev *mddev, struct bio *bio);
extern bool md_handle_request(struct mddev *mddev, struct bio *bio);
extern int mddev_suspend(struct mddev *mddev, bool interruptible);
extern void mddev_resume(struct mddev *mddev);
extern void md_idle_sync_thread(struct mddev *mddev);
extern void md_frozen_sync_thread(struct mddev *mddev);
extern void md_unfrozen_sync_thread(struct mddev *mddev);

extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
Expand Down
Loading

0 comments on commit 3a889fd

Please sign in to comment.