-
Notifications
You must be signed in to change notification settings - Fork 931
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Set OSD pool size when creating ceph
and cephfs
storage pools
#14044
base: main
Are you sure you want to change the base?
Changes from all commits
99f1e58
159de70
86b599c
28eeec4
2288205
d805371
ae68415
ce95425
ec0d03b
50dc7ea
6210423
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -51,17 +51,33 @@ runs: | |
} | ||
trap cleanup ERR HUP INT TERM | ||
|
||
ephemeral_disk="${{ steps.free_ephemeral_disk.outputs.ephemeral_disk }}" | ||
sudo parted "${ephemeral_disk}" --script mklabel gpt | ||
sudo parted "${ephemeral_disk}" --script mkpart primary 0% 33% | ||
sudo parted "${ephemeral_disk}" --script mkpart primary 33% 66% | ||
sudo parted "${ephemeral_disk}" --script mkpart primary 66% 100% | ||
|
||
disk1="$(losetup -f)" | ||
sudo losetup "${disk1}" "${ephemeral_disk}1" | ||
disk2="$(losetup -f)" | ||
sudo losetup "${disk2}" "${ephemeral_disk}2" | ||
disk3="$(losetup -f)" | ||
sudo losetup "${disk3}" "${ephemeral_disk}3" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I take it that MicroCeph still cannot take partitions directly right? How about adding a link to canonical/microceph#251 in a comment? |
||
|
||
sudo snap install microceph --channel "${{ inputs.microceph-channel }}" | ||
sudo microceph cluster bootstrap | ||
sudo microceph.ceph config set global osd_pool_default_size 1 | ||
sudo microceph.ceph config set global mon_allow_pool_size_one true | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How about having the 2 |
||
sudo microceph.ceph config set global mon_allow_pool_delete true | ||
sudo microceph.ceph config set global osd_memory_target 939524096 | ||
sudo microceph.ceph osd crush rule rm replicated_rule | ||
sudo microceph.ceph osd crush rule create-replicated replicated default osd | ||
for flag in nosnaptrim nobackfill norebalance norecover noscrub nodeep-scrub; do | ||
sudo microceph.ceph osd set $flag | ||
done | ||
sudo microceph disk add --wipe "${{ steps.free_ephemeral_disk.outputs.ephemeral_disk }}" | ||
sudo microceph disk add --wipe "${disk1}" | ||
sudo microceph disk add --wipe "${disk2}" | ||
sudo microceph disk add --wipe "${disk3}" | ||
sudo rm -rf /etc/ceph | ||
sudo ln -s /var/snap/microceph/current/conf/ /etc/ceph | ||
sudo microceph enable rgw | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2517,6 +2517,10 @@ Adds support for using a bridge network with a specified VLAN ID as an OVN uplin | |
Adds `logical_cpus` field to `GET /1.0/cluster/members/{name}/state` which | ||
contains the total available logical CPUs available when LXD started. | ||
|
||
<<<<<<< HEAD | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Merge/rebase leftover. |
||
## `vm_limits_cpu_pin_strategy` | ||
|
||
Adds a new {config:option}`instance-resource-limits:limits.cpu.pin_strategy` configuration option for virtual machines. This option controls the CPU pinning strategy. When set to `none`, CPU auto pinning is disabled. When set to `auto`, CPU auto pinning is enabled. | ||
|
||
## `storage_ceph_osd_pool_size` | ||
This introduces the configuration keys {config:option}`storage-ceph-pool-conf:ceph.osd.pool_size`, and {config:option}`storage-cephfs-pool-conf:cephfs.osd_pool_size` to be used when adding or updating a `ceph` or `cephfs` storage pool to instruct LXD to create set the replication size for the underlying OSD pools. |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4894,6 +4894,13 @@ Specify either a cron expression (`<minute> <hour> <dom> <month> <dow>`), a comm | |
|
||
``` | ||
|
||
```{config:option} ceph.osd.pool_size storage-ceph-pool-conf | ||
:defaultdesc: "`3`" | ||
:shortdesc: "Number of RADOS object replicas. Set to 1 for no replication." | ||
:type: "string" | ||
|
||
``` | ||
|
||
```{config:option} ceph.rbd.clone_copy storage-ceph-pool-conf | ||
:defaultdesc: "`true`" | ||
:shortdesc: "Whether to use RBD lightweight clones" | ||
|
@@ -5074,6 +5081,14 @@ This option specifies the number of OSD pool placement groups (`pg_num`) to use | |
when creating a missing OSD pool. | ||
``` | ||
|
||
```{config:option} cephfs.osd_pool_size storage-cephfs-pool-conf | ||
:defaultdesc: "`3`" | ||
:shortdesc: "Number of RADOS object replicas. Set to 1 for no replication." | ||
:type: "string" | ||
This option specifies the number of OSD pool replicas to use | ||
when creating an OSD pool. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why have a long description for |
||
``` | ||
|
||
```{config:option} cephfs.path storage-cephfs-pool-conf | ||
:defaultdesc: "`/`" | ||
:shortdesc: "The base path for the CephFS mount" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,7 +5,9 @@ import ( | |
"context" | ||
"encoding/json" | ||
"fmt" | ||
"math" | ||
"os/exec" | ||
"strconv" | ||
"strings" | ||
|
||
"github.com/canonical/lxd/lxd/migration" | ||
|
@@ -112,6 +114,29 @@ func (d *ceph) FillConfig() error { | |
d.config["ceph.osd.pg_num"] = "32" | ||
} | ||
|
||
if d.config["ceph.osd.pool_size"] == "" { | ||
size, err := shared.TryRunCommand("ceph", | ||
"--name", "client.%s"+d.config["ceph.user.name"], | ||
"--cluster", d.config["ceph.cluster_name"], | ||
"config", | ||
"get", | ||
"mon", | ||
"osd_pool_default_size", | ||
"--format", | ||
"json") | ||
if err != nil { | ||
return err | ||
} | ||
|
||
var sizeInt int | ||
err = json.Unmarshal([]byte(size), &sizeInt) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
d.config["ceph.osd.pool_size"] = strconv.Itoa(sizeInt) | ||
} | ||
|
||
return nil | ||
} | ||
|
||
|
@@ -158,7 +183,7 @@ func (d *ceph) Create() error { | |
if !poolExists { | ||
// Create new osd pool. | ||
_, err := shared.TryRunCommand("ceph", | ||
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]), | ||
"--name", "client.%s"+d.config["ceph.user.name"], | ||
"--cluster", d.config["ceph.cluster_name"], | ||
"osd", | ||
"pool", | ||
|
@@ -171,6 +196,20 @@ func (d *ceph) Create() error { | |
|
||
revert.Add(func() { _ = d.osdDeletePool() }) | ||
|
||
_, err = shared.TryRunCommand("ceph", | ||
"--name", "client.%s"+d.config["ceph.user.name"], | ||
"--cluster", d.config["ceph.cluster_name"], | ||
"osd", | ||
"pool", | ||
"set", | ||
d.config["ceph.osd.pool_name"], | ||
"size", | ||
d.config["ceph.osd.pool_size"], | ||
"--yes-i-really-mean-it") | ||
if err != nil { | ||
return err | ||
} | ||
|
||
// Initialize the pool. This is not necessary but allows the pool to be monitored. | ||
_, err = shared.TryRunCommand("rbd", | ||
"--id", d.config["ceph.user.name"], | ||
|
@@ -217,7 +256,7 @@ func (d *ceph) Create() error { | |
|
||
// Use existing OSD pool. | ||
msg, err := shared.RunCommand("ceph", | ||
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]), | ||
"--name", "client.%s"+d.config["ceph.user.name"], | ||
"--cluster", d.config["ceph.cluster_name"], | ||
"osd", | ||
"pool", | ||
|
@@ -303,6 +342,13 @@ func (d *ceph) Validate(config map[string]string) error { | |
// defaultdesc: `32` | ||
// shortdesc: Number of placement groups for the OSD storage pool | ||
"ceph.osd.pg_num": validate.IsAny, | ||
// lxdmeta:generate(entities=storage-ceph; group=pool-conf; key=ceph.osd.pool_size) | ||
// | ||
// --- | ||
// type: string | ||
// defaultdesc: `3` | ||
// shortdesc: Number of RADOS object replicas. Set to 1 for no replication. | ||
"ceph.osd.pool_size": validate.Optional(validate.IsInRange(1, math.MaxInt64)), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. While technically correct, maybe we could put an upper bound that's a little less... gigantic :) Hardcoding There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I just noticed that |
||
// lxdmeta:generate(entities=storage-ceph; group=pool-conf; key=ceph.osd.pool_name) | ||
// | ||
// --- | ||
|
@@ -358,6 +404,23 @@ func (d *ceph) Validate(config map[string]string) error { | |
|
||
// Update applies any driver changes required from a configuration change. | ||
func (d *ceph) Update(changedConfig map[string]string) error { | ||
newSize := changedConfig["ceph.osd.pool_size"] | ||
if newSize != d.config["ceph.osd.pool_size"] && newSize != "" { | ||
_, err := shared.TryRunCommand("ceph", | ||
"--name", "client.%s"+d.config["ceph.user.name"], | ||
"--cluster", d.config["ceph.cluster_name"], | ||
"osd", | ||
"pool", | ||
"set", | ||
d.config["ceph.osd.pool_name"], | ||
"size", | ||
newSize, | ||
"--yes-i-really-mean-it") | ||
if err != nil { | ||
return err | ||
} | ||
} | ||
|
||
return nil | ||
} | ||
|
||
|
@@ -388,7 +451,7 @@ func (d *ceph) GetResources() (*api.ResourcesStoragePool, error) { | |
|
||
err := shared.RunCommandWithFds(context.TODO(), nil, &stdout, | ||
"ceph", | ||
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]), | ||
"--name", "client.%s"+d.config["ceph.user.name"], | ||
"--cluster", d.config["ceph.cluster_name"], | ||
"df", | ||
"-f", "json") | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I like it and didn't know it could take %.