Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
Pull more operating system specific data from yaml files.
Browse files Browse the repository at this point in the history
This patch includes program paths.
  • Loading branch information
purpleidea committed May 7, 2014
1 parent 32fdb61 commit 9722c05
Show file tree
Hide file tree
Showing 8 changed files with 95 additions and 38 deletions.
13 changes: 11 additions & 2 deletions lib/facter/gluster_ports.rb
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,15 @@

require 'facter'

# get the gluster path. this fact comes from an external fact set in: params.pp
gluster = Facter.value('gluster_program_gluster').to_s.chomp
if gluster == ''
gluster = `which gluster`.chomp
if gluster == ''
gluster = '/usr/sbin/gluster'
end
end

# find the module_vardir
dir = Facter.value('puppet_vardirtmp') # nil if missing
if dir.nil? # let puppet decide if present!
Expand All @@ -43,11 +52,11 @@

# we need the script installed first to be able to generate the port facts...
if not(xmlfile.nil?) and File.exist?(xmlfile)
volumes = `/usr/sbin/gluster volume list`
volumes = `#{gluster} volume list`
if $?.exitstatus == 0
volumes.split.each do |x|
# values come out as comma separated strings for direct usage
cmd = '/usr/sbin/gluster volume status --xml | '+xmlfile+" ports --volume '"+x+"' --host '"+host+"'"
cmd = gluster+' volume status --xml | '+xmlfile+" ports --volume '"+x+"' --host '"+host+"'"
result = `#{cmd}`
if $?.exitstatus == 0
found[x] = result
Expand Down
9 changes: 8 additions & 1 deletion lib/facter/gluster_version.rb
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,14 @@

require 'facter'

gluster = '/usr/sbin/gluster'
# get the gluster path. this fact comes from an external fact set in: params.pp
gluster = Facter.value('gluster_program_gluster').to_s.chomp
if gluster == ''
gluster = `which gluster`.chomp
if gluster == ''
gluster = '/usr/sbin/gluster'
end
end

# create the fact if the gluster executable exists
if File.exist?(gluster)
Expand Down
30 changes: 15 additions & 15 deletions manifests/brick.pp
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,8 @@
# get the raw /dev/vdx device, and append the partition number
$dev0 = "`/bin/readlink -e ${dev}`" # resolve to /dev/<device>

$part_mklabel = "/sbin/parted -s -m -a optimal ${dev0} mklabel ${valid_labeltype}"
$part_mkpart = "/sbin/parted -s -m -a optimal ${dev0} mkpart primary 0% 100%"
$part_mklabel = "${::gluster::params::program_parted} -s -m -a optimal ${dev0} mklabel ${valid_labeltype}"
$part_mkpart = "${::gluster::params::program_parted} -s -m -a optimal ${dev0} mkpart primary 0% 100%"

#
$dev1 = $partition ? {
Expand Down Expand Up @@ -180,15 +180,15 @@
$lvm_dataalignment = inline_template('<%= @raid_su.to_i*@raid_sw.to_i %>')

$lvm_pvcreate = "${raid_su}${raid_sw}" ? { # both empty ?
'' => "/sbin/pvcreate ${dev1}",
default => "/sbin/pvcreate --dataalignment ${lvm_dataalignment}K ${dev1}",
'' => "${::gluster::params::program_pvcreate} ${dev1}",
default => "${::gluster::params::program_pvcreate} --dataalignment ${lvm_dataalignment}K ${dev1}",
}

$lvm_vgcreate = "/sbin/vgcreate ${lvm_vgname} ${dev1}"
$lvm_vgcreate = "${::gluster::params::program_vgcreate} ${lvm_vgname} ${dev1}"

# match --virtualsize with 100% of available vg by default
$lvm_thinp_virtsize = "${lvm_virtsize}" ? { # --virtualsize
'' => "`/sbin/vgs -o size --units b --noheadings ${lvm_vgname}`",
'' => "`${::gluster::params::program_vgs} -o size --units b --noheadings ${lvm_vgname}`",
default => "${lvm_virtsize}",
}

Expand All @@ -208,7 +208,7 @@
# MIRROR: http://man7.org/linux/man-pages/man7/lvmthin.7.html
# TODO: is this the optimal setup for thin-p ?
$lvm_thinp_lvcreate_cmdlist = [
'/sbin/lvcreate',
"${::gluster::params::program_lvcreate}",
"--thinpool ${lvm_vgname}/${lvm_tpname}", # thinp
'--extents 100%FREE', # let lvm figure out the --size
"--virtualsize ${lvm_thinp_virtsize}",
Expand All @@ -221,7 +221,7 @@
# creates dev /dev/vgname/lvname
$lvm_lvcreate = $lvm_thinp ? {
true => "${lvm_thinp_lvcreate}",
default => "/sbin/lvcreate --extents 100%PVS -n ${lvm_lvname} ${lvm_vgname}",
default => "${::gluster::params::program_lvcreate} --extents 100%PVS -n ${lvm_lvname} ${lvm_vgname}",
}
}

Expand Down Expand Up @@ -262,7 +262,7 @@
include gluster::brick::xfs
$exec_requires = [Package["${::gluster::params::package_xfsprogs}"]]

$xfs_arg00 = "/sbin/mkfs.${valid_fstype}"
$xfs_arg00 = "${::gluster::params::program_mkfs_xfs}"

$xfs_arg01 = '-q' # shh!

Expand Down Expand Up @@ -308,7 +308,7 @@
$xfs_cmd = join(delete($xfs_cmdlist, ''), ' ')

# TODO: xfs_admin doesn't have a --quiet flag. silence it...
$xfs_admin = "/usr/sbin/xfs_admin -U '${valid_fsuuid}' ${dev2}"
$xfs_admin = "${::gluster::params::program_xfs_admin} -U '${valid_fsuuid}' ${dev2}"

# mkfs w/ uuid command
$mkfs_exec = "${xfs_cmd} && ${xfs_admin}"
Expand Down Expand Up @@ -347,7 +347,7 @@
$exec_requires = [Package["${::gluster::params::package_e2fsprogs}"]]

# mkfs w/ uuid command
$mkfs_exec = "/sbin/mkfs.${valid_fstype} -U '${valid_fsuuid}' ${dev2}"
$mkfs_exec = "${::gluster::params::program_mkfs_ext4} -U '${valid_fsuuid}' ${dev2}"

# mount options
$options_list = [] # TODO
Expand Down Expand Up @@ -404,7 +404,7 @@
exec { "${lvm_pvcreate}":
logoutput => on_failure,
unless => [ # if one element is true, this *doesn't* run
"/sbin/pvdisplay ${dev1}",
"${::gluster::params::program_pvdisplay} ${dev1}",
'/bin/false', # TODO: add more criteria
],
require => $exec_requires,
Expand All @@ -417,7 +417,7 @@
exec { "${lvm_vgcreate}":
logoutput => on_failure,
unless => [ # if one element is true, this *doesn't* run
"/sbin/vgdisplay ${lvm_vgname}",
"${::gluster::params::program_vgdisplay} ${lvm_vgname}",
'/bin/false', # TODO: add more criteria
],
require => $exec_requires,
Expand All @@ -430,8 +430,8 @@
exec { "${lvm_lvcreate}":
logoutput => on_failure,
unless => [ # if one element is true, this *doesn't* run
#"/sbin/lvdisplay ${lvm_lvname}", # nope!
"/sbin/lvs --separator ':' | /usr/bin/tr -d ' ' | /bin/awk -F ':' '{print \$1}' | /bin/grep -q '${lvm_lvname}'",
#"${::gluster::params::program_lvdisplay} ${lvm_lvname}", # nope!
"${::gluster::params::program_lvs} --separator ':' | /usr/bin/tr -d ' ' | /bin/awk -F ':' '{print \$1}' | /bin/grep -q '${lvm_lvname}'",
'/bin/false', # TODO: add more criteria
],
require => $exec_requires,
Expand Down
8 changes: 4 additions & 4 deletions manifests/mount/base.pp
Original file line number Diff line number Diff line change
Expand Up @@ -64,20 +64,20 @@
#

# modprobe fuse if it's missing
exec { '/sbin/modprobe fuse':
exec { "${::gluster::params::program_modprobe} fuse":
logoutput => on_failure,
onlyif => '/usr/bin/test -z "`/bin/dmesg | /bin/grep -i fuse`"',
alias => 'gluster-fuse',
}
#exec { '/sbin/modprobe fuse':
#exec { "${::gluster::params::program_modprobe} fuse":
# logoutput => on_failure,
# unless => "/sbin/lsmod | /bin/grep -q '^fuse'",
# unless => "${::gluster::params::program_lsmod} | /bin/grep -q '^fuse'",
# alias => 'gluster-modprobe-fuse',
#}

# TODO: will this autoload the fuse module?
#file { '/etc/modprobe.d/fuse.conf':
# content => "fuse\n", # TODO: "install fuse /sbin/modprobe --ignore-install fuse ; /bin/true\n" ?
# content => "fuse\n", # TODO: "install fuse ${::gluster::params::program_modprobe} --ignore-install fuse ; /bin/true\n" ?
# owner => root,
# group => root,
# mode => 644, # u=rw,go=r
Expand Down
40 changes: 40 additions & 0 deletions manifests/params.pp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.

class gluster::params(
# packages...
$package_glusterfs = 'glusterfs',
$package_glusterfs_fuse = 'glusterfs-fuse',
$package_glusterfs_server = 'glusterfs-server',
Expand All @@ -28,11 +29,50 @@
$package_python_lxml = 'python-lxml',
$package_fping = 'fping',

# programs...
$program_gluster = '/usr/sbin/gluster',

$program_modprobe = '/sbin/modprobe',
$program_lsmod = '/sbin/lsmod',

$program_parted = '/sbin/parted',
$program_pvcreate = '/sbin/pvcreate',
$program_vgcreate = '/sbin/vgcreate',
$program_lvcreate = '/sbin/lvcreate',
$program_vgs = '/sbin/vgs',
$program_lvs = '/sbin/lvs',
$program_pvdisplay = '/sbin/pvdisplay',
$program_vgdisplay = '/sbin/vgdisplay',
#$program_lvdisplay = '/sbin/lvdisplay',
$program_xfsadmin = '/usr/sbin/xfs_admin',
$program_mkfs_xfs = '/sbin/mkfs.xfs',
$program_mkfs_ext4 = '/sbin/mkfs.ext4',

$program_fping = '/usr/sbin/fping',

# misc...
$misc_gluster_reload = '/sbin/service glusterd reload',

# comment...
$comment = ''
) {
if "${comment}" == '' {
warning('Unable to load yaml data/ directory!')
}

include puppet::facter
$factbase = "${::puppet::facter::base}"
$hash = {
'gluster_program_gluster' => $program_gluster,
}
# create a custom external fact!
file { "${factbase}gluster_program.yaml":
content => inline_template('<%= @hash.to_yaml %>')
owner => root,
group => root,
mode => 644, # u=rw,go=r
ensure => present,
}
}

# vim: ts=8
24 changes: 12 additions & 12 deletions manifests/volume.pp
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,8 @@
# get the list of bricks fqdn's that don't have our fqdn
$others = inline_template("<%= @valid_bricks.find_all{|x| x.split(':')[0] != '${fqdn}' }.collect {|y| y.split(':')[0] }.join(' ') %>")

$fping = sprintf("/usr/sbin/fping -q %s", $others)
$status = sprintf("/usr/sbin/gluster peer status --xml | ${vardir}/xml.py connected %s", $others)
$fping = sprintf("${::gluster::params::program_fping} -q %s", $others)
$status = sprintf("${::gluster::params::program_gluster} peer status --xml | ${vardir}/xml.py connected %s", $others)

$onlyif = $ping ? {
false => "${status}",
Expand Down Expand Up @@ -230,10 +230,10 @@

# work around stuck connection state (4) of: 'Accepted peer request'...
exec { "gluster-volume-stuck-${name}":
command => '/sbin/service glusterd reload',
command => "${::gluster::params::misc_gluster_reload}",
logoutput => on_failure,
unless => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -", # reconnect if it doesn't exist
onlyif => sprintf("/usr/sbin/gluster peer status --xml | ${vardir}/xml.py stuck %s", $others),
unless => "${::gluster::params::program_gluster} volume list | /bin/grep -qxF '${name}' -", # reconnect if it doesn't exist
onlyif => sprintf("${::gluster::params::program_gluster} peer status --xml | ${vardir}/xml.py stuck %s", $others),
notify => $again ? {
false => undef,
default => Common::Again::Delta['gluster-exec-again'],
Expand All @@ -253,7 +253,7 @@
# FIXME: it would be create to have an --allow-root-storage type option
# instead, so that we don't inadvertently force some other bad thing...
file { "${vardir}/volume/create-${name}.sh":
content => inline_template("#!/bin/bash\n/bin/sleep 5s && /usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec} force > >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stdout') 2> >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stderr' >&2) || (${rmdir_volume_dirs} && /bin/false)\nexit \$?\n"),
content => inline_template("#!/bin/bash\n/bin/sleep 5s && ${::gluster::params::program_gluster} volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec} force > >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stdout') 2> >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stderr' >&2) || (${rmdir_volume_dirs} && /bin/false)\nexit \$?\n"),
owner => root,
group => root,
mode => 755,
Expand Down Expand Up @@ -289,7 +289,7 @@
exec { "gluster-volume-create-${name}":
command => "${vardir}/volume/create-${name}.sh",
logoutput => on_failure,
unless => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -", # add volume if it doesn't exist
unless => "${::gluster::params::program_gluster} volume list | /bin/grep -qxF '${name}' -", # add volume if it doesn't exist
onlyif => $onlyif,
#before => TODO?,
require => $require,
Expand All @@ -299,10 +299,10 @@

if $start == true {
# try to start volume if stopped
exec { "/usr/sbin/gluster volume start ${name}":
exec { "${::gluster::params::program_gluster} volume start ${name}":
logoutput => on_failure,
onlyif => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -",
unless => "/usr/sbin/gluster volume status ${name}", # returns false if stopped
onlyif => "${::gluster::params::program_gluster} volume list | /bin/grep -qxF '${name}' -",
unless => "${::gluster::params::program_gluster} volume status ${name}", # returns false if stopped
notify => $shorewall ? {
false => undef,
default => $again ? {
Expand All @@ -323,9 +323,9 @@
# make its data inaccessible. Do you want to continue? (y/n)
# TODO: http://community.gluster.org/q/how-can-i-make-automatic-scripts/
# TODO: gluster --mode=script volume stop ...
exec { "/usr/bin/yes | /usr/sbin/gluster volume stop ${name}":
exec { "/usr/bin/yes | ${::gluster::params::program_gluster} volume stop ${name}":
logoutput => on_failure,
onlyif => "/usr/sbin/gluster volume status ${name}", # returns true if started
onlyif => "${::gluster::params::program_gluster} volume status ${name}", # returns true if started
require => $settled ? { # require if type exists
false => undef,
default => Exec["gluster-volume-create-${name}"],
Expand Down
7 changes: 4 additions & 3 deletions manifests/volume/property.pp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
) {
include gluster::xml
include gluster::vardir
include gluster::params
include gluster::volume::property::data

#$vardir = $::gluster::vardir::module_vardir # with trailing slash
Expand Down Expand Up @@ -167,9 +168,9 @@
# set a volume property only if value doesn't match what is available
# FIXME: check that the value we're setting isn't the default
# FIXME: you can check defaults with... gluster volume set help | ...
exec { "/usr/sbin/gluster volume set ${volume} ${key} ${safe_value}":
unless => "/usr/bin/test \"`/usr/sbin/gluster volume --xml info ${volume} | ${vardir}/xml.py property --key '${key}'`\" = '${safe_value}'",
onlyif => "/usr/sbin/gluster volume list | /bin/grep -qxF '${volume}' -",
exec { "${::gluster::params::program_gluster} volume set ${volume} ${key} ${safe_value}":
unless => "/usr/bin/test \"`${::gluster::params::program_gluster} volume --xml info ${volume} | ${vardir}/xml.py property --key '${key}'`\" = '${safe_value}'",
onlyif => "${::gluster::params::program_gluster} volume list | /bin/grep -qxF '${volume}' -",
logoutput => on_failure,
require => [
Gluster::Volume[$volume],
Expand Down
2 changes: 1 addition & 1 deletion vagrant/gluster/puppet/modules/puppet

0 comments on commit 9722c05

Please sign in to comment.