diff --git a/dashboard/src/components/gdeploy/Gdeploy-Wizard-Bricks.js b/dashboard/src/components/gdeploy/Gdeploy-Wizard-Bricks.js
index 5577045c..c13a895b 100644
--- a/dashboard/src/components/gdeploy/Gdeploy-Wizard-Bricks.js
+++ b/dashboard/src/components/gdeploy/Gdeploy-Wizard-Bricks.js
@@ -52,7 +52,7 @@ class WizardBricksStep extends Component {
Mount Point |
Thinp |
RAID |
- Strip Size |
+ Stripe Size |
Disk Count |
{bricksRow}
@@ -116,8 +116,8 @@ const BrickRow = ({brick, index, changeCallBack, deleteCallBack}) => {
changeCallBack(index, "stripSize", e.target.value)}
+ value={brick.stripeSize}
+ onChange={(e) => changeCallBack(index, "stripeSize", e.target.value)}
/>
|
diff --git a/dashboard/src/helpers/GdeployUtil.js b/dashboard/src/helpers/GdeployUtil.js
index f11578df..49cf1142 100644
--- a/dashboard/src/helpers/GdeployUtil.js
+++ b/dashboard/src/helpers/GdeployUtil.js
@@ -29,18 +29,18 @@ var GdeployUtil = {
bricks: [
{ name: "engine", device: "vdb",
brick_dir: "/gluster_bricks/engine", size: "150",
- thinp: true, raidType: "RAID6", stripSize: "256",
+ thinp: true, raidType: "raid6", stripeSize: "256",
diskCount: "12"
},
{ name: "data", device: "vdb",
brick_dir: "/gluster_bricks/data", size: "500",
- thinp: true, raidType: "RAID6",
- stripSize: "256", diskCount: "12"
+ thinp: true, raidType: "raid6",
+ stripeSize: "256", diskCount: "12"
},
{ name: "vmstore", device: "vdc",
brick_dir: "/gluster_bricks/vmstore", size: "500",
- thinp: true, raidType: "RAID6",
- stripSize: "256", diskCount: "12"
+ thinp: true, raidType: "raid6",
+ stripeSize: "256", diskCount: "12"
},
]
}
@@ -65,20 +65,26 @@ var GdeployUtil = {
return this.writeConfigFile(filePath, configString)
},
createYumConfig(subscription) {
- //Required only if we have to install some packages
- if (subscription.rpms != null && subscription.rpms.length > 0) {
- return {
+ //Required only if we have to install some packages.
+ if (subscription.rpms.length > 0) {
+ const yumConfig = {
action: 'install',
packages: subscription.rpms,
update: subscription.yumUpdate ? 'yes' : 'no',
gpgcheck: subscription.gpgCheck ? 'yes' : 'no'
}
+ //Required only if we have to add yum repos. if we have a cdn
+ //username then its treated as cdn repo and we should not add here.
+ if (subscription.repos.length > 0 && subscription.username.trim().length === 0) {
+ yumConfig.repos = subscription.repos
+ }
+ return yumConfig
}
return null
},
createRedhatSubscription(subscription) {
//RedHat Subscription can be done only if cdn username is specified
- if (subscription.username != null && subscription.username.length > 0) {
+ if (subscription.username.length > 0) {
return {
action: 'register',
username: subscription.username,
@@ -92,6 +98,15 @@ var GdeployUtil = {
createBrickConfig(glusterModel) {
const brickConfig = { pvConfig: {}, vgConfig: {}, lvConfig: [], thinPoolConfig: {} }
glusterModel.bricks.forEach(function(brick, index) {
+ //If there is a raid param for any brick then add it. Though RAID param is defined for all the bricks
+ //Gdeploy takes only one gloabl RAID param for all the devices
+ if(!brickConfig.hasOwnProperty("raidParam") && brick.hasOwnProperty('raidType') && brick.raidType.length){
+ brickConfig.raidParam = {
+ disktype: brick.raidType,
+ diskcount: brick.diskCount,
+ stripesize: brick.stripeSize
+ }
+ }
//If there is no PV added for the given device, add it now.
if (!brickConfig.pvConfig.hasOwnProperty(brick.device)) {
brickConfig.pvConfig[brick.device] = { action: 'create', devices: brick.device }
@@ -115,7 +130,6 @@ var GdeployUtil = {
thinpool.vgname = VG_NAME + brick.device
thinpool.lvtype = 'thinpool'
thinpool.poolmetadatasize = '10MB'
- thinpool.chunksize = '1024k'
thinpool.size = parseInt(brick.size)
brickConfig.thinPoolConfig[brick.device] = thinpool
}
@@ -138,6 +152,18 @@ var GdeployUtil = {
//Replace the host sections in template with real hosts
if (section === 'hosts') {
gdeployConfig['hosts'] = hosts
+ if (brickConfig.hasOwnProperty("raidParam")) {
+ //Not truly ini format. But we need RAID params in the following format.
+ // [disktype]
+ // raid6
+ // [diskcount]
+ // 4
+ // [stripesize]
+ // 256
+ gdeployConfig.disktype = [brickConfig.raidParam.disktype]
+ gdeployConfig.diskcount = [brickConfig.raidParam.diskcount]
+ gdeployConfig.stripesize = [brickConfig.raidParam.stripesize]
+ }
if (redhatSubscription != null) {
gdeployConfig['RH-subscription'] = redhatSubscription
}
@@ -215,7 +241,6 @@ var GdeployUtil = {
} else {
for (var key in config[section]) {
if (config[section].hasOwnProperty(key)) {
- Number.isInteger
let value = config[section][key]
if (typeof value === 'string' || value instanceof String || Number.isInteger(value)) {
configString = this.appendLine(configString, key + "=" + value)
diff --git a/dashboard/static/gdeploy-templates/ovirt-gluster-hc.conf b/dashboard/static/gdeploy-templates/ovirt-gluster-hc.conf
index 9a5063e9..1fccaec7 100644
--- a/dashboard/static/gdeploy-templates/ovirt-gluster-hc.conf
+++ b/dashboard/static/gdeploy-templates/ovirt-gluster-hc.conf
@@ -5,23 +5,26 @@ host1
host2
host3
-[disktype]
-raid6
+## RAID Parameters will be created by gdeploy plugin
+#[disktype]
+#raid6
-[diskcount]
-4
+#[diskcount]
+#4
-[stripesize]
-256
+#[stripesize]
+#256
[yum1]
action=install
# Setup ntp on the servers before any other operations are done
# Disable the existing public servers
-#[shell1]
-#action=execute
-#command="sed -i 's/^\(server .*iburst\)/#\1/' /etc/ntp.conf"
+[shell1]
+action=execute
+# We need to encode the string with "" to avoid truncation in ini module.
+# More info at https://github.com/npm/ini/issues/44
+command='sed -i 's/^\(server .*iburst\)/#\1/' /etc/ntp.conf'
# Add custom server
[update-file1]
@@ -37,7 +40,7 @@ service=ntpd
action=restart
service=ntpd
-[shell1]
+[shell2]
action=execute
command=vdsm-tool configure --force
|