Skip to content

Commit fcff06c

Browse files
committed
Merge branch 'for-next' of git://neil.brown.name/md
Pull md updates from NeilBrown. * 'for-next' of git://neil.brown.name/md: DM RAID: Add support for MD RAID10 md/RAID1: Add missing case for attempting to repair known bad blocks. md/raid5: For odirect-write performance, do not set STRIPE_PREREAD_ACTIVE. md/raid1: don't abort a resync on the first badblock. md: remove duplicated test on ->openers when calling do_md_stop() raid5: Add R5_ReadNoMerge flag which prevent bio from merging at block layer md/raid1: prevent merging too large request md/raid1: read balance chooses idlest disk for SSD md/raid1: make sequential read detection per disk based MD RAID10: Export md_raid10_congested MD: Move macros from raid1*.h to raid1*.c MD RAID1: rename mirror_info structure MD RAID10: rename mirror_info structure MD RAID10: Fix compiler warning. raid5: add a per-stripe lock raid5: remove unnecessary bitmap write optimization raid5: lockless access raid5 overrided bi_phys_segments raid5: reduce chance release_stripe() taking device_lock
2 parents 068535f + 63f33b8 commit fcff06c

File tree

9 files changed

+426
-219
lines changed

9 files changed

+426
-219
lines changed

Documentation/device-mapper/dm-raid.txt

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,10 @@ The target is named "raid" and it accepts the following parameters:
2727
- rotating parity N (right-to-left) with data restart
2828
raid6_nc RAID6 N continue
2929
- rotating parity N (right-to-left) with data continuation
30+
raid10 Various RAID10 inspired algorithms chosen by additional params
31+
- RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
32+
- RAID1E: Integrated Adjacent Stripe Mirroring
33+
- and other similar RAID10 variants
3034

3135
Reference: Chapter 4 of
3236
http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf
@@ -59,6 +63,28 @@ The target is named "raid" and it accepts the following parameters:
5963
logical size of the array. The bitmap records the device
6064
synchronisation state for each region.
6165

66+
[raid10_copies <# copies>]
67+
[raid10_format near]
68+
These two options are used to alter the default layout of
69+
a RAID10 configuration. The number of copies is can be
70+
specified, but the default is 2. There are other variations
71+
to how the copies are laid down - the default and only current
72+
option is "near". Near copies are what most people think of
73+
with respect to mirroring. If these options are left
74+
unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
75+
are given, then the layouts for 2, 3 and 4 devices are:
76+
2 drives 3 drives 4 drives
77+
-------- ---------- --------------
78+
A1 A1 A1 A1 A2 A1 A1 A2 A2
79+
A2 A2 A2 A3 A3 A3 A3 A4 A4
80+
A3 A3 A4 A4 A5 A5 A5 A6 A6
81+
A4 A4 A5 A6 A6 A7 A7 A8 A8
82+
.. .. .. .. .. .. .. .. ..
83+
The 2-device layout is equivalent 2-way RAID1. The 4-device
84+
layout is what a traditional RAID10 would look like. The
85+
3-device layout is what might be called a 'RAID1E - Integrated
86+
Adjacent Stripe Mirroring'.
87+
6288
<#raid_devs>: The number of devices composing the array.
6389
Each device consists of two entries. The first is the device
6490
containing the metadata (if any); the second is the one containing the

drivers/md/dm-raid.c

Lines changed: 90 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include "md.h"
1212
#include "raid1.h"
1313
#include "raid5.h"
14+
#include "raid10.h"
1415
#include "bitmap.h"
1516

1617
#include <linux/device-mapper.h>
@@ -52,7 +53,10 @@ struct raid_dev {
5253
#define DMPF_MAX_RECOVERY_RATE 0x20
5354
#define DMPF_MAX_WRITE_BEHIND 0x40
5455
#define DMPF_STRIPE_CACHE 0x80
55-
#define DMPF_REGION_SIZE 0X100
56+
#define DMPF_REGION_SIZE 0x100
57+
#define DMPF_RAID10_COPIES 0x200
58+
#define DMPF_RAID10_FORMAT 0x400
59+
5660
struct raid_set {
5761
struct dm_target *ti;
5862

@@ -76,6 +80,7 @@ static struct raid_type {
7680
const unsigned algorithm; /* RAID algorithm. */
7781
} raid_types[] = {
7882
{"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
83+
{"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
7984
{"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
8085
{"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
8186
{"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -86,6 +91,17 @@ static struct raid_type {
8691
{"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
8792
};
8893

94+
static unsigned raid10_md_layout_to_copies(int layout)
95+
{
96+
return layout & 0xFF;
97+
}
98+
99+
static int raid10_format_to_md_layout(char *format, unsigned copies)
100+
{
101+
/* 1 "far" copy, and 'copies' "near" copies */
102+
return (1 << 8) | (copies & 0xFF);
103+
}
104+
89105
static struct raid_type *get_raid_type(char *name)
90106
{
91107
int i;
@@ -339,10 +355,16 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
339355
* [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
340356
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
341357
* [region_size <sectors>] Defines granularity of bitmap
358+
*
359+
* RAID10-only options:
360+
* [raid10_copies <# copies>] Number of copies. (Default: 2)
361+
* [raid10_format <near>] Layout algorithm. (Default: near)
342362
*/
343363
static int parse_raid_params(struct raid_set *rs, char **argv,
344364
unsigned num_raid_params)
345365
{
366+
char *raid10_format = "near";
367+
unsigned raid10_copies = 2;
346368
unsigned i, rebuild_cnt = 0;
347369
unsigned long value, region_size = 0;
348370
sector_t sectors_per_dev = rs->ti->len;
@@ -416,11 +438,28 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
416438
}
417439

418440
key = argv[i++];
441+
442+
/* Parameters that take a string value are checked here. */
443+
if (!strcasecmp(key, "raid10_format")) {
444+
if (rs->raid_type->level != 10) {
445+
rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
446+
return -EINVAL;
447+
}
448+
if (strcmp("near", argv[i])) {
449+
rs->ti->error = "Invalid 'raid10_format' value given";
450+
return -EINVAL;
451+
}
452+
raid10_format = argv[i];
453+
rs->print_flags |= DMPF_RAID10_FORMAT;
454+
continue;
455+
}
456+
419457
if (strict_strtoul(argv[i], 10, &value) < 0) {
420458
rs->ti->error = "Bad numerical argument given in raid params";
421459
return -EINVAL;
422460
}
423461

462+
/* Parameters that take a numeric value are checked here */
424463
if (!strcasecmp(key, "rebuild")) {
425464
rebuild_cnt++;
426465

@@ -439,6 +478,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
439478
return -EINVAL;
440479
}
441480
break;
481+
case 10:
442482
default:
443483
DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name);
444484
rs->ti->error = "Rebuild not supported for this RAID type";
@@ -495,7 +535,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
495535
*/
496536
value /= 2;
497537

498-
if (rs->raid_type->level < 5) {
538+
if ((rs->raid_type->level != 5) &&
539+
(rs->raid_type->level != 6)) {
499540
rs->ti->error = "Inappropriate argument: stripe_cache";
500541
return -EINVAL;
501542
}
@@ -520,6 +561,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
520561
} else if (!strcasecmp(key, "region_size")) {
521562
rs->print_flags |= DMPF_REGION_SIZE;
522563
region_size = value;
564+
} else if (!strcasecmp(key, "raid10_copies") &&
565+
(rs->raid_type->level == 10)) {
566+
if ((value < 2) || (value > 0xFF)) {
567+
rs->ti->error = "Bad value for 'raid10_copies'";
568+
return -EINVAL;
569+
}
570+
rs->print_flags |= DMPF_RAID10_COPIES;
571+
raid10_copies = value;
523572
} else {
524573
DMERR("Unable to parse RAID parameter: %s", key);
525574
rs->ti->error = "Unable to parse RAID parameters";
@@ -538,8 +587,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
538587
if (dm_set_target_max_io_len(rs->ti, max_io_len))
539588
return -EINVAL;
540589

541-
if ((rs->raid_type->level > 1) &&
542-
sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) {
590+
if (rs->raid_type->level == 10) {
591+
if (raid10_copies > rs->md.raid_disks) {
592+
rs->ti->error = "Not enough devices to satisfy specification";
593+
return -EINVAL;
594+
}
595+
596+
/* (Len * #mirrors) / #devices */
597+
sectors_per_dev = rs->ti->len * raid10_copies;
598+
sector_div(sectors_per_dev, rs->md.raid_disks);
599+
600+
rs->md.layout = raid10_format_to_md_layout(raid10_format,
601+
raid10_copies);
602+
rs->md.new_layout = rs->md.layout;
603+
} else if ((rs->raid_type->level > 1) &&
604+
sector_div(sectors_per_dev,
605+
(rs->md.raid_disks - rs->raid_type->parity_devs))) {
543606
rs->ti->error = "Target length not divisible by number of data devices";
544607
return -EINVAL;
545608
}
@@ -566,6 +629,9 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
566629
if (rs->raid_type->level == 1)
567630
return md_raid1_congested(&rs->md, bits);
568631

632+
if (rs->raid_type->level == 10)
633+
return md_raid10_congested(&rs->md, bits);
634+
569635
return md_raid5_congested(&rs->md, bits);
570636
}
571637

@@ -884,6 +950,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
884950
case 6:
885951
redundancy = rs->raid_type->parity_devs;
886952
break;
953+
case 10:
954+
redundancy = raid10_md_layout_to_copies(mddev->layout) - 1;
955+
break;
887956
default:
888957
ti->error = "Unknown RAID type";
889958
return -EINVAL;
@@ -1049,12 +1118,19 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
10491118
goto bad;
10501119
}
10511120

1121+
if (ti->len != rs->md.array_sectors) {
1122+
ti->error = "Array size does not match requested target length";
1123+
ret = -EINVAL;
1124+
goto size_mismatch;
1125+
}
10521126
rs->callbacks.congested_fn = raid_is_congested;
10531127
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
10541128

10551129
mddev_suspend(&rs->md);
10561130
return 0;
10571131

1132+
size_mismatch:
1133+
md_stop(&rs->md);
10581134
bad:
10591135
context_free(rs);
10601136

@@ -1203,6 +1279,13 @@ static int raid_status(struct dm_target *ti, status_type_t type,
12031279
DMEMIT(" region_size %lu",
12041280
rs->md.bitmap_info.chunksize >> 9);
12051281

1282+
if (rs->print_flags & DMPF_RAID10_COPIES)
1283+
DMEMIT(" raid10_copies %u",
1284+
raid10_md_layout_to_copies(rs->md.layout));
1285+
1286+
if (rs->print_flags & DMPF_RAID10_FORMAT)
1287+
DMEMIT(" raid10_format near");
1288+
12061289
DMEMIT(" %d", rs->md.raid_disks);
12071290
for (i = 0; i < rs->md.raid_disks; i++) {
12081291
if (rs->dev[i].meta_dev)
@@ -1277,7 +1360,7 @@ static void raid_resume(struct dm_target *ti)
12771360

12781361
static struct target_type raid_target = {
12791362
.name = "raid",
1280-
.version = {1, 2, 0},
1363+
.version = {1, 3, 0},
12811364
.module = THIS_MODULE,
12821365
.ctr = raid_ctr,
12831366
.dtr = raid_dtr,
@@ -1304,6 +1387,8 @@ module_init(dm_raid_init);
13041387
module_exit(dm_raid_exit);
13051388

13061389
MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1390+
MODULE_ALIAS("dm-raid1");
1391+
MODULE_ALIAS("dm-raid10");
13071392
MODULE_ALIAS("dm-raid4");
13081393
MODULE_ALIAS("dm-raid5");
13091394
MODULE_ALIAS("dm-raid6");

drivers/md/md.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3942,17 +3942,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
39423942
break;
39433943
case clear:
39443944
/* stopping an active array */
3945-
if (atomic_read(&mddev->openers) > 0)
3946-
return -EBUSY;
39473945
err = do_md_stop(mddev, 0, NULL);
39483946
break;
39493947
case inactive:
39503948
/* stopping an active array */
3951-
if (mddev->pers) {
3952-
if (atomic_read(&mddev->openers) > 0)
3953-
return -EBUSY;
3949+
if (mddev->pers)
39543950
err = do_md_stop(mddev, 2, NULL);
3955-
} else
3951+
else
39563952
err = 0; /* already inactive */
39573953
break;
39583954
case suspended:

0 commit comments

Comments
 (0)