diff --git a/ibmcloud_scale_templates/sub_modules/instance_template/README.md b/ibmcloud_scale_templates/sub_modules/instance_template/README.md index 035e7805..3f082a1b 100644 --- a/ibmcloud_scale_templates/sub_modules/instance_template/README.md +++ b/ibmcloud_scale_templates/sub_modules/instance_template/README.md @@ -87,7 +87,7 @@ Below steps will provision IBM Cloud resources (compute and storage instances in | [bastion_ssh_private_key](#input_bastion_ssh_private_key) | Bastion SSH private key path, which will be used to login to bastion host. | `string` | | [bastion_user](#input_bastion_user) | Provide the username for Bastion login. | `string` | | [client_cluster_key_pair](#input_client_cluster_key_pair) | The key pair to use to launch the client cluster host. | `list(string)` | -| [client_vsi_osimage_name](#input_client_vsi_osimage_name) | Image name to use for provisioning the client cluster instances. | `string` | +| [client_vsi_osimage_name](#input_client_vsi_osimage_name) | Name of the image that you would like to use to create the client cluster nodes for the IBM Storage Scale cluster. The solution supports only stock images that use RHEL8.8 version. | `string` | | [client_vsi_profile](#input_client_vsi_profile) | Client nodes vis profile | `string` | | [compute_cluster_filesystem_mountpoint](#input_compute_cluster_filesystem_mountpoint) | Compute cluster (accessingCluster) Filesystem mount point. | `string` | | [compute_cluster_key_pair](#input_compute_cluster_key_pair) | The key pair to use to launch the compute cluster host. | `list(string)` | @@ -97,7 +97,7 @@ Below steps will provision IBM Cloud resources (compute and storage instances in | [create_scale_cluster](#input_create_scale_cluster) | Flag to represent whether to create scale cluster or not. | `bool` | | [create_separate_namespaces](#input_create_separate_namespaces) | Flag to select if separate namespace needs to be created for compute instances. | `bool` | | [deploy_controller_sec_group_id](#input_deploy_controller_sec_group_id) | Deployment controller security group id. Default: null | `string` | -| [filesets](#input_filesets) | Mount point(s) and size(s) in GB of file share(s) that can be used to customize shared file storage layout. Provide the details for up to 5 shares. |
list(object({
mount_path = string,
size = number
}))
| +| [filesets](#input_filesets) | Mount point(s) and size(s) in GB of file share(s) that can be used to customize shared file storage layout. Provide the details for up to 5 file shares. |
list(object({
mount_path = string,
size = number
}))
| | [filesystem_block_size](#input_filesystem_block_size) | Filesystem block size. | `string` | | [gklm_instance_dns_domain](#input_gklm_instance_dns_domain) | IBM Cloud DNS domain name to be used for GKLM instances. | `string` | | [gklm_instance_dns_service_id](#input_gklm_instance_dns_service_id) | IBM Cloud GKLM Instance DNS service resource id. | `string` | @@ -132,10 +132,10 @@ Below steps will provision IBM Cloud resources (compute and storage instances in | [storage_vsi_osimage_id](#input_storage_vsi_osimage_id) | Image id to use for provisioning the storage cluster instances. | `string` | | [storage_vsi_osimage_name](#input_storage_vsi_osimage_name) | Image name to use for provisioning the storage cluster instances. | `string` | | [storage_vsi_profile](#input_storage_vsi_profile) | Profile to be used for storage cluster virtual server instance. | `string` | -| [total_client_cluster_instances](#input_total_client_cluster_instances) | Client cluster node counts | `number` | +| [total_client_cluster_instances](#input_total_client_cluster_instances) | Total number of client cluster instances that you need to provision. A minimum of 2 nodes and a maximum of 64 nodes are supported | `number` | | [total_compute_cluster_instances](#input_total_compute_cluster_instances) | Number of instances to be launched for compute cluster. | `number` | | [total_gklm_instances](#input_total_gklm_instances) | Number of instances to be launched for GKLM. | `number` | -| [total_protocol_cluster_instances](#input_total_protocol_cluster_instances) | protocol nodes | `number` | +| [total_protocol_cluster_instances](#input_total_protocol_cluster_instances) | Total number of protocol nodes that you need to provision. A minimum of 2 nodes and a maximum of 16 nodes are supported | `number` | | [total_storage_cluster_instances](#input_total_storage_cluster_instances) | Number of instances to be launched for storage cluster. | `number` | | [using_jumphost_connection](#input_using_jumphost_connection) | If true, will skip the jump/bastion host configuration. | `bool` | | [using_packer_image](#input_using_packer_image) | If true, gpfs rpm copy step will be skipped during the configuration. | `bool` | diff --git a/ibmcloud_scale_templates/sub_modules/instance_template/variables.tf b/ibmcloud_scale_templates/sub_modules/instance_template/variables.tf index 20e76bb8..98aefdec 100644 --- a/ibmcloud_scale_templates/sub_modules/instance_template/variables.tf +++ b/ibmcloud_scale_templates/sub_modules/instance_template/variables.tf @@ -405,7 +405,7 @@ variable "protocol_vsi_profile" { variable "total_protocol_cluster_instances" { type = number default = 2 - description = "protocol nodes" + description = "Total number of protocol nodes that you need to provision. A minimum of 2 nodes and a maximum of 16 nodes are supported" } variable "filesets" { @@ -414,7 +414,7 @@ variable "filesets" { size = number })) default = [{ mount_path = "/mnt/binaries", size = 0 }, { mount_path = "/mnt/data", size = 0 }] - description = "Mount point(s) and size(s) in GB of file share(s) that can be used to customize shared file storage layout. Provide the details for up to 5 shares." + description = "Mount point(s) and size(s) in GB of file share(s) that can be used to customize shared file storage layout. Provide the details for up to 5 file shares." } # Client Cluster Variables @@ -422,13 +422,13 @@ variable "filesets" { variable "total_client_cluster_instances" { type = number default = 2 - description = "Client cluster node counts" + description = "Total number of client cluster instances that you need to provision. A minimum of 2 nodes and a maximum of 64 nodes are supported" } variable "client_vsi_osimage_name" { type = string default = "ibm-redhat-8-8-minimal-amd64-2" - description = "Image name to use for provisioning the client cluster instances." + description = "Name of the image that you would like to use to create the client cluster nodes for the IBM Storage Scale cluster. The solution supports only stock images that use RHEL8.8 version." } variable "client_vsi_profile" { diff --git a/resources/ibmcloud/compute/bare_metal_server_multiple_vol/bare_metal_server_multiple_vol.tf b/resources/ibmcloud/compute/bare_metal_server_multiple_vol/bare_metal_server_multiple_vol.tf index 4820d4b8..5fdfcefb 100644 --- a/resources/ibmcloud/compute/bare_metal_server_multiple_vol/bare_metal_server_multiple_vol.tf +++ b/resources/ibmcloud/compute/bare_metal_server_multiple_vol/bare_metal_server_multiple_vol.tf @@ -57,7 +57,7 @@ echo "StrictHostKeyChecking no" >> ~/.ssh/config echo "DOMAIN=\"${var.dns_domain}\"" >> "/etc/sysconfig/network-scripts/ifcfg-eth0" echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-eth0" sed -i -e "s#QUEUE_COUNT=3#QUEUE_COUNT=\`ethtool -l \$iface | echo \$(awk '\$1 ~ /Combined:/ {print \$2;exit}')\`#g" /var/lib/cloud/scripts/per-boot/iface-config -ethtool -L ens1 combined 16 +ethtool -L eth0 combined 16 chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser systemctl restart NetworkManager systemctl stop firewalld