Skip to content
This repository has been archived by the owner on Mar 26, 2020. It is now read-only.

Commit

Permalink
Support for loopback bricks
Browse files Browse the repository at this point in the history
Register the Bricks hosting directory using,

```
glustercli device add <peerid> <path> --provisioner loop
```

Example:

```
glustercli device add 70d79c3f-e7af-43f6-8b65-05dff2423da1 \
	   /exports --provisioner loop
```

Now create the volume using,

```
glustercli volume create gv1 --size 1G \
	   --provisioner loop \
	   --replica 3
```

Fixes: #1418
Signed-off-by: Aravinda VK <avishwan@redhat.com>
  • Loading branch information
aravindavk committed Feb 14, 2019
1 parent 830c8c9 commit bebbc65
Show file tree
Hide file tree
Showing 29 changed files with 1,003 additions and 92 deletions.
523 changes: 523 additions & 0 deletions e2e/smartvol_ops_loop_test.go

Large diffs are not rendered by default.

7 changes: 4 additions & 3 deletions e2e/smartvol_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -651,6 +651,7 @@ func TestSmartVolume(t *testing.T) {
var err error

r := require.New(t)
loopDevicesCleanup(t)

tc, err := setupCluster(t, "./config/1.toml", "./config/2.toml", "./config/3.toml")
r.Nil(err)
Expand All @@ -668,19 +669,19 @@ func TestSmartVolume(t *testing.T) {
r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev2.img", "2", "250M"))
r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev3.img", "3", "250M"))

_, err = client.DeviceAdd(tc.gds[0].PeerID(), "/dev/gluster_loop1")
_, err = client.DeviceAdd(tc.gds[0].PeerID(), "/dev/gluster_loop1", api.ProvisionerTypeLvm)
r.Nil(err)
dev, err := client.DeviceList(tc.gds[0].PeerID(), "/dev/gluster_loop1")
r.Nil(err)
r.Equal(dev[0].Device, "/dev/gluster_loop1")

_, err = client.DeviceAdd(tc.gds[1].PeerID(), "/dev/gluster_loop2")
_, err = client.DeviceAdd(tc.gds[1].PeerID(), "/dev/gluster_loop2", api.ProvisionerTypeLvm)
r.Nil(err)
dev, err = client.DeviceList(tc.gds[1].PeerID(), "/dev/gluster_loop2")
r.Nil(err)
r.Equal(dev[0].Device, "/dev/gluster_loop2")

_, err = client.DeviceAdd(tc.gds[2].PeerID(), "/dev/gluster_loop3")
_, err = client.DeviceAdd(tc.gds[2].PeerID(), "/dev/gluster_loop3", api.ProvisionerTypeLvm)
r.Nil(err)
dev, err = client.DeviceList(tc.gds[2].PeerID(), "/dev/gluster_loop3")
r.Nil(err)
Expand Down
4 changes: 2 additions & 2 deletions e2e/snapshot_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,11 +392,11 @@ func testSnapshotOnSmartVol(t *testing.T, tc *testCluster) {
r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev2.img", "6", "500M"))

// brickCount+1
_, err := client.DeviceAdd(tc.gds[0].PeerID(), "/dev/gluster_loop5")
_, err := client.DeviceAdd(tc.gds[0].PeerID(), "/dev/gluster_loop5", api.ProvisionerTypeLvm)
r.Nil(err)

// brickCount+2
_, err = client.DeviceAdd(tc.gds[1].PeerID(), "/dev/gluster_loop6")
_, err = client.DeviceAdd(tc.gds[1].PeerID(), "/dev/gluster_loop6", api.ProvisionerTypeLvm)
r.Nil(err)

smartvolname := formatVolName(t.Name())
Expand Down
5 changes: 4 additions & 1 deletion glustercli/cmd/device.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@ const (
helpDeviceInfoCmd = "Get device info"
)

var flagDeviceAddProvisioner string

func init() {
deviceAddCmd.Flags().StringVar(&flagDeviceAddProvisioner, "provisioner", "lvm", "Provisioner Type(lvm, loop)")
deviceCmd.AddCommand(deviceAddCmd)
deviceCmd.AddCommand(deviceInfoCmd)
}
Expand Down Expand Up @@ -111,7 +114,7 @@ var deviceAddCmd = &cobra.Command{
peerid := args[0]
devname := args[1]

_, err := client.DeviceAdd(peerid, devname)
_, err := client.DeviceAdd(peerid, devname, flagDeviceAddProvisioner)

if err != nil {
if GlobalFlag.Verbose {
Expand Down
3 changes: 3 additions & 0 deletions glustercli/cmd/volume-create.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ var (
flagCreateSubvolZoneOverlap bool
flagAverageFileSize string
flagCreateMaxBrickSize string
flagProvisionerType string

volumeCreateCmd = &cobra.Command{
Use: "create <volname> [<brick> [<brick>]...|--size <size>]",
Expand Down Expand Up @@ -87,6 +88,7 @@ func init() {
volumeCreateCmd.Flags().BoolVar(&flagCreateSubvolZoneOverlap, "subvols-zones-overlap", false, "Brick belonging to other Sub volume can be created in the same zone")
volumeCreateCmd.Flags().StringVar(&flagAverageFileSize, "average-file-size", "1M", "Average size of the files")
volumeCreateCmd.Flags().StringVar(&flagCreateMaxBrickSize, "max-brick-size", "", "Max brick size for auto distribute count")
volumeCreateCmd.Flags().StringVar(&flagProvisionerType, "provisioner", "lvm", "Brick Provisioner Type(lvm, loop)")

volumeCmd.AddCommand(volumeCreateCmd)
}
Expand Down Expand Up @@ -144,6 +146,7 @@ func smartVolumeCreate(cmd *cobra.Command, args []string) {
ExcludeZones: flagCreateExcludeZones,
SubvolZonesOverlap: flagCreateSubvolZoneOverlap,
Force: flagCreateForce,
ProvisionerType: flagProvisionerType,
}

vol, err := client.VolumeCreate(req)
Expand Down
1 change: 1 addition & 0 deletions glusterd2/brick/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type DeviceInfo struct {
LvName string
VgName string
RootDevice string
TotalSize uint64
}

// Brickinfo is the static information about the brick
Expand Down
32 changes: 21 additions & 11 deletions glusterd2/bricksplanner/planner.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,17 +141,25 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) {
}
eachBrickTpSize := uint64(float64(eachBrickSize) * req.SnapshotReserveFactor)

mntopts := "rw,inode64,noatime,nouuid,discard"
if req.ProvisionerType == api.ProvisionerTypeLoop {
mntopts += ",loop"
}

tpsize := lvmutils.NormalizeSize(eachBrickTpSize)
tpmsize := lvmutils.GetPoolMetadataSize(eachBrickTpSize)
bricks = append(bricks, api.BrickReq{
Type: brickType,
Path: fmt.Sprintf("%s/%s/subvol%d/brick%d/brick", bricksMountRoot, req.Name, i+1, j+1),
BrickDirSuffix: "/brick",
TpName: fmt.Sprintf("tp_%s_s%d_b%d", req.Name, i+1, j+1),
LvName: fmt.Sprintf("brick_%s_s%d_b%d", req.Name, i+1, j+1),
Size: lvmutils.NormalizeSize(eachBrickSize),
TpSize: lvmutils.NormalizeSize(eachBrickTpSize),
TpMetadataSize: lvmutils.GetPoolMetadataSize(eachBrickTpSize),
TpSize: tpsize,
TpMetadataSize: tpmsize,
TotalSize: tpsize + tpmsize,
FsType: "xfs",
MntOpts: "rw,inode64,noatime,nouuid",
MntOpts: mntopts,
})
}

Expand Down Expand Up @@ -198,19 +206,20 @@ func PlanBricks(req *api.VolCreateReq) error {
// with device with expected space available.
numBricksAllocated := 0
for bidx, b := range sv.Bricks {
totalsize := b.TpSize + b.TpMetadataSize

for _, vg := range availableVgs {
_, zoneUsed := zones[vg.Zone]
if vg.AvailableSize >= totalsize && !zoneUsed && !vg.Used {
if vg.AvailableSize >= b.TotalSize && !zoneUsed && !vg.Used {
subvols[idx].Bricks[bidx].PeerID = vg.PeerID
subvols[idx].Bricks[bidx].VgName = vg.Name
subvols[idx].Bricks[bidx].RootDevice = vg.Device
subvols[idx].Bricks[bidx].DevicePath = "/dev/" + vg.Name + "/" + b.LvName
if req.ProvisionerType == api.ProvisionerTypeLoop {
subvols[idx].Bricks[bidx].DevicePath = vg.Device + "/" + b.TpName + "/" + b.LvName + ".img"
}

zones[vg.Zone] = struct{}{}
numBricksAllocated++
vg.AvailableSize -= totalsize
vg.AvailableSize -= b.TotalSize
vg.Used = true
break
}
Expand All @@ -227,19 +236,20 @@ func PlanBricks(req *api.VolCreateReq) error {
// but enough space is available in the devices
for bidx := numBricksAllocated; bidx < len(sv.Bricks); bidx++ {
b := sv.Bricks[bidx]
totalsize := b.TpSize + b.TpMetadataSize

for _, vg := range availableVgs {
_, zoneUsed := zones[vg.Zone]
if vg.AvailableSize >= totalsize && !zoneUsed {
if vg.AvailableSize >= b.TotalSize && !zoneUsed {
subvols[idx].Bricks[bidx].PeerID = vg.PeerID
subvols[idx].Bricks[bidx].VgName = vg.Name
subvols[idx].Bricks[bidx].RootDevice = vg.Device
subvols[idx].Bricks[bidx].DevicePath = "/dev/" + vg.Name + "/" + b.LvName
if req.ProvisionerType == api.ProvisionerTypeLoop {
subvols[idx].Bricks[bidx].DevicePath = vg.Device + "/" + b.TpName + "/" + b.LvName + ".img"
}

zones[vg.Zone] = struct{}{}
numBricksAllocated++
vg.AvailableSize -= totalsize
vg.AvailableSize -= b.TotalSize
vg.Used = true
break
}
Expand Down
14 changes: 13 additions & 1 deletion glusterd2/bricksplanner/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,11 @@ func GetAvailableVgs(req *api.VolCreateReq) ([]Vg, error) {
continue
}

// If Provisioner type does not match the requested provisioner type
if d.ProvisionerType != req.ProvisionerType {
continue
}

vgs = append(vgs, Vg{
Device: d.Device,
Name: d.VgName(),
Expand All @@ -117,8 +122,11 @@ func GetNewBrick(availableVgs []Vg, brickInfo brick.Brickstatus, vol *volume.Vol
brickSize := brickInfo.Size.Capacity
lvName := fmt.Sprintf("brick_%s_s%d_b%d", vol.Name, subVolIndex, brickIndex)
brickTpSize := uint64(float64(brickSize) * vol.SnapshotReserveFactor)
brickTpSize = lvmutils.NormalizeSize(brickTpSize)
tpmsize := lvmutils.GetPoolMetadataSize(brickTpSize)
for _, vg := range availableVgs {
if vg.AvailableSize >= brickTpSize {

newBrick = api.BrickReq{
Type: "brick",
Path: brickInfo.Info.Path,
Expand All @@ -127,13 +135,17 @@ func GetNewBrick(availableVgs []Vg, brickInfo brick.Brickstatus, vol *volume.Vol
LvName: lvName,
Size: brickSize,
TpSize: brickTpSize,
TpMetadataSize: lvmutils.GetPoolMetadataSize(brickTpSize),
TpMetadataSize: tpmsize,
FsType: "xfs",
MntOpts: "rw,inode64,noatime,nouuid",
PeerID: vg.PeerID,
VgName: vg.Name,
DevicePath: "/dev/" + vg.Name + "/" + lvName,
RootDevice: vg.Device,
TotalSize: brickTpSize + tpmsize,
}
if vol.ProvisionerType == api.ProvisionerTypeLoop {
newBrick.DevicePath = vg.Device + "/" + newBrick.TpName + "/" + newBrick.LvName + ".img"
}
vg.Used = true
break
Expand Down
1 change: 1 addition & 0 deletions glusterd2/commands/snapshot/snapshot-clone.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,7 @@ func createCloneVolinfo(c transaction.TxnCtx) error {
newVol.ID = uuid.NewRandom()
newVol.Name = clonename
newVol.VolfileID = clonename
newVol.ProvisionerType = volinfo.ProvisionerType

if err = createSnapSubvols(newVol, volinfo, nodeData); err != nil {
log.WithError(err).WithFields(log.Fields{
Expand Down
5 changes: 5 additions & 0 deletions glusterd2/commands/snapshot/snapshot-create.go
Original file line number Diff line number Diff line change
Expand Up @@ -757,6 +757,11 @@ func snapshotCreateHandler(w http.ResponseWriter, r *http.Request) {
return
}

if vol.ProvisionerType != api.ProvisionerTypeLvm && vol.ProvisionerType != "" {
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, gderrors.ErrSnapNotSupported)
return
}

txn.Nodes = vol.Nodes()
txn.Steps = []*transaction.Step{
{
Expand Down
3 changes: 2 additions & 1 deletion glusterd2/commands/snapshot/snapshot-restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,7 @@ func createRestoreVolinfo(snapinfo *snapshot.Snapinfo, vol *volume.Volinfo) volu
newVol.Transport = snapVol.Transport
newVol.Type = snapVol.Type
newVol.VolfileID = newVol.Name
newVol.ProvisionerType = snapVol.ProvisionerType
for idx, subvol := range snapVol.Subvols {
subvolType := volume.SubvolTypeToString(subvol.Type)
name := fmt.Sprintf("%s-%s-%d", vol.Name, strings.ToLower(subvolType), idx)
Expand Down Expand Up @@ -294,7 +295,7 @@ func cleanParentBricks(c transaction.TxnCtx) error {
return err
}

return volume.CleanBricks(&volinfo)
return volume.CleanBricksLvm(&volinfo)
}

func registerSnapRestoreStepFuncs() {
Expand Down
10 changes: 8 additions & 2 deletions glusterd2/commands/volumes/brick-replace-txn.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,18 @@ import (
)

func prepareBricks(c transaction.TxnCtx) error {
var volInfo volume.Volinfo
if err := c.Get("volinfo", &volInfo); err != nil {
return err
}
var req api.BrickReq
if err := c.Get("newBrick", &req); err != nil {
return err
}
err := PrepareBrick(req, c)
return err
if volInfo.ProvisionerType == api.ProvisionerTypeLoop {
return PrepareBrickLoop(req, c)
}
return PrepareBrickLvm(req, c)
}

func replaceVolinfo(c transaction.TxnCtx) error {
Expand Down
13 changes: 7 additions & 6 deletions glusterd2/commands/volumes/brick-replace.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,12 +104,13 @@ LOOP:

subvolumes := make([]api.SubvolReq, 0)
volreq := api.VolCreateReq{
Subvols: subvolumes,
Size: vol.Capacity,
LimitPeers: req.LimitPeers,
LimitZones: req.LimitZones,
ExcludePeers: req.ExcludePeers,
ExcludeZones: req.ExcludeZones,
Subvols: subvolumes,
Size: vol.Capacity,
LimitPeers: req.LimitPeers,
LimitZones: req.LimitZones,
ExcludePeers: req.ExcludePeers,
ExcludeZones: req.ExcludeZones,
ProvisionerType: vol.ProvisionerType,
}
availableVgs, err := bricksplanner.GetAvailableVgs(&volreq)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion glusterd2/commands/volumes/volume-create-txn.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,6 @@ func populateSubvols(volinfo *volume.Volinfo, req *api.VolCreateReq) error {
}

func newVolinfo(req *api.VolCreateReq) (*volume.Volinfo, error) {

volinfo := &volume.Volinfo{
ID: uuid.NewRandom(),
Name: req.Name,
Expand All @@ -136,6 +135,7 @@ func newVolinfo(req *api.VolCreateReq) (*volume.Volinfo, error) {
DistCount: len(req.Subvols),
SnapList: []string{},
SnapshotReserveFactor: req.SnapshotReserveFactor,
ProvisionerType: req.ProvisionerType,
Auth: volume.VolAuth{
Username: uuid.NewRandom().String(),
Password: uuid.NewRandom().String(),
Expand Down
4 changes: 4 additions & 0 deletions glusterd2/commands/volumes/volume-create.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,10 @@ func CreateVolume(ctx context.Context, req api.VolCreateReq) (status int, err er
return http.StatusBadRequest, gderrors.ErrReservedGroupProfile
}

if req.ProvisionerType == "" {
req.ProvisionerType = api.ProvisionerTypeLvm
}

if req.Size > 0 {
applyDefaults(&req)

Expand Down
Loading

0 comments on commit bebbc65

Please sign in to comment.