diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..59900ffb --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +ko_fi: sleepingkyoto +custom: + - "https://monappy.jp/u/lae" diff --git a/README.md b/README.md index 62d43386..afba8c58 100644 --- a/README.md +++ b/README.md @@ -435,6 +435,22 @@ pve_datacenter_cfg: keyboard: en-us ``` +You can also configure [HA manager groups][ha-group]: +``` +pve_cluster_ha_groups: [] # List of HA groups to create in PVE. +``` + +This example creates a group "lab_node01" for resources assigned to the +lab-node01 host: +``` +pve_cluster_ha_groups: + - name: lab_node01 + comment: "My HA group" + nodes: "lab-node01" + nofailback: 0 + restricted: 0 +``` + All configuration options supported in the datacenter.cfg file are documented in the [Proxmox manual datacenter.cfg section][datacenter-cfg]. @@ -564,7 +580,8 @@ successfully used this role to deploy PVE Ceph, it is not fully tested in CI deploy a test environment with your configuration first prior to prod, and report any issues if you run into any. -This role can configure the Ceph storage system on your Proxmox hosts. +This role can configure the Ceph storage system on your Proxmox hosts. The +following definitions show some of the configurations that are possible. ``` pve_ceph_enabled: true @@ -590,13 +607,16 @@ pve_ceph_pools: rule: ssd application: rbd storage: true +# This Ceph pool uses custom size/replication values - name: hdd pgs: 32 rule: hdd application: rbd storage: true -# A CephFS filesystem not defined as a Proxmox storage + size: 2 + min-size: 1 pve_ceph_fs: +# A CephFS filesystem not defined as a Proxmox storage - name: backup pgs: 64 rule: hdd @@ -628,3 +648,5 @@ Michael Holasek ([@mholasek](https://github.com/mholasek)) [acl-module]: https://github.com/lae/ansible-role-proxmox/blob/master/library/proxmox_group.py [storage-module]: https://github.com/lae/ansible-role-proxmox/blob/master/library/proxmox_storage.py [datacenter-cfg]: https://pve.proxmox.com/wiki/Manual:_datacenter.cfg +[ceph_volume]: https://github.com/ceph/ceph-ansible/blob/master/library/ceph_volume.py +[ha-group]: https://pve.proxmox.com/wiki/High_Availability#ha_manager_groups diff --git a/Vagrantfile b/Vagrantfile index 8857eff8..b3a5d02e 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -4,6 +4,7 @@ Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.memory = 2048 libvirt.cpus = 2 + libvirt.storage :file, :size => '2G' end N = 3 @@ -12,6 +13,11 @@ Vagrant.configure("2") do |config| machine.vm.hostname = "pve-#{machine_id}" if machine_id == N + machine.vm.provision :ansible do |ansible| + ansible.limit = "all,localhost" + ansible.playbook = "tests/vagrant/package_role.yml" + ansible.verbose = true + end machine.vm.provision :ansible do |ansible| ansible.limit = "all" ansible.playbook = "tests/vagrant/provision.yml" diff --git a/defaults/main.yml b/defaults/main.yml index 71f45723..eabff5f4 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -32,6 +32,7 @@ pve_cluster_clustername: "{{ pve_group }}" # pve_cluster_addr0: "{{ ansible_default_ipv4.address }}" # pve_cluster_addr1: "{{ ansible_eth1.ipv4.address }} pve_datacenter_cfg: {} +pve_cluster_ha_groups: [] pve_ssl_letsencrypt: false pve_groups: [] pve_users: [] diff --git a/files/00_remove_checked_command_buster.patch b/files/00_remove_checked_command_buster.patch new file mode 100644 index 00000000..bb6abac9 --- /dev/null +++ b/files/00_remove_checked_command_buster.patch @@ -0,0 +1,41 @@ +diff -ur /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js +--- /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js 2019-12-17 11:02:52.000000000 +0000 ++++ /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js 2020-01-18 16:29:37.362953621 +0000 +@@ -6016,7 +6016,7 @@ + var update_btn = new Ext.Button({ + text: gettext('Refresh'), + handler: function() { +- Proxmox.Utils.checked_command(function() { apt_command('update'); }); ++ apt_command('update'); + } + }); + +diff -ur /usr/share/pve-manager/js/pvemanagerlib.js /usr/share/pve-manager/js/pvemanagerlib.js +--- /usr/share/pve-manager/js/pvemanagerlib.js 2019-12-16 14:12:19.000000000 +0000 ++++ /usr/share/pve-manager/js/pvemanagerlib.js 2020-01-18 16:26:07.187536513 +0000 +@@ -19024,7 +19024,7 @@ + var version_btn = new Ext.Button({ + text: gettext('Package versions'), + handler: function(){ +- Proxmox.Utils.checked_command(function() { me.showVersions(); }); ++ me.showVersions(); + } + }); + +@@ -19288,7 +19288,7 @@ + { + text: gettext('System Report'), + handler: function() { +- Proxmox.Utils.checked_command(function (){ me.showReport(); }); ++ me.showReport(); + } + } + ], +@@ -40472,7 +40472,6 @@ + handler: function(data) { + me.login = null; + me.updateLoginData(data); +- Proxmox.Utils.checked_command(function() {}); // display subscription status + } + }); + } diff --git a/files/00_remove_checked_command.patch b/files/00_remove_checked_command_stretch.patch similarity index 100% rename from files/00_remove_checked_command.patch rename to files/00_remove_checked_command_stretch.patch diff --git a/library/ceph_volume.py b/library/ceph_volume.py new file mode 100755 index 00000000..c9aa50ba --- /dev/null +++ b/library/ceph_volume.py @@ -0,0 +1,682 @@ +#!/usr/bin/python +import datetime +import copy +import json +import os + +ANSIBLE_METADATA = { + 'metadata_version': '1.0', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_volume + +short_description: Create ceph OSDs with ceph-volume + +description: + - Using the ceph-volume utility available in Ceph this module + can be used to create ceph OSDs that are backed by logical volumes. + - Only available in ceph versions luminous or greater. + +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + objectstore: + description: + - The objectstore of the OSD, either filestore or bluestore + - Required if action is 'create' + required: false + choices: ['bluestore', 'filestore'] + default: bluestore + action: + description: + - The action to take. Creating OSDs and zapping or querying devices. + required: true + choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list', 'inventory'] + default: create + data: + description: + - The logical volume name or device to use for the OSD data. + required: true + data_vg: + description: + - If data is a lv, this must be the name of the volume group it belongs to. + required: false + osd_fsid: + description: + - The OSD FSID + required: false + journal: + description: + - The logical volume name or partition to use as a filestore journal. + - Only applicable if objectstore is 'filestore'. + required: false + journal_vg: + description: + - If journal is a lv, this must be the name of the volume group it belongs to. + - Only applicable if objectstore is 'filestore'. + required: false + db: + description: + - A partition or logical volume name to use for block.db. + - Only applicable if objectstore is 'bluestore'. + required: false + db_vg: + description: + - If db is a lv, this must be the name of the volume group it belongs to. # noqa E501 + - Only applicable if objectstore is 'bluestore'. + required: false + wal: + description: + - A partition or logical volume name to use for block.wal. + - Only applicable if objectstore is 'bluestore'. + required: false + wal_vg: + description: + - If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501 + - Only applicable if objectstore is 'bluestore'. + required: false + crush_device_class: + description: + - Will set the crush device class for the OSD. + required: false + dmcrypt: + description: + - If set to True the OSD will be encrypted with dmcrypt. + required: false + batch_devices: + description: + - A list of devices to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + required: false + osds_per_device: + description: + - The number of OSDs to create per device. + - Only applicable if action is 'batch'. + required: false + default: 1 + journal_size: + description: + - The size in MB of filestore journals. + - Only applicable if action is 'batch'. + required: false + default: 5120 + block_db_size: + description: + - The size in bytes of bluestore block db lvs. + - The default of -1 means to create them as big as possible. + - Only applicable if action is 'batch'. + required: false + default: -1 + report: + description: + - If provided the --report flag will be passed to 'ceph-volume lvm batch'. + - No OSDs will be created. + - Results will be returned in json format. + - Only applicable if action is 'batch'. + required: false + containerized: + description: + - Wether or not this is a containerized cluster. The value is + assigned or not depending on how the playbook runs. + required: false + default: None + list: + description: + - List potential Ceph LVM metadata on a device + required: false + inventory: + description: + - List storage device inventory. + required: false + +author: + - Andrew Schoen (@andrewschoen) + - Sebastien Han +''' + +EXAMPLES = ''' +- name: set up a filestore osd with an lv data and a journal partition + ceph_volume: + objectstore: filestore + data: data-lv + data_vg: data-vg + journal: /dev/sdc1 + action: create + +- name: set up a bluestore osd with a raw device for data + ceph_volume: + objectstore: bluestore + data: /dev/sdc + action: create + + +- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa e501 + ceph_volume: + objectstore: bluestore + data: data-lv + data_vg: data-vg + db: /dev/sdc1 + wal: /dev/sdc2 + action: create +''' + + +from ansible.module_utils.basic import AnsibleModule # noqa 4502 + + +def fatal(message, module): + ''' + Report a fatal error and exit + ''' + + if module: + module.fail_json(msg=message, changed=False, rc=1) + else: + raise(Exception(message)) + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, 'run', + '--rm', '--privileged', '--net=host', '--ipc=host', + '--ulimit', 'nofile=1024:4096', + '-v', '/run/lock/lvm:/run/lock/lvm:z', + '-v', '/var/run/udev/:/var/run/udev/:z', + '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image] + return command_exec + + +def build_ceph_volume_cmd(action, container_image, cluster=None): + ''' + Build the ceph-volume command + ''' + + if container_image: + binary = 'ceph-volume' + cmd = container_exec( + binary, container_image) + else: + binary = ['ceph-volume'] + cmd = binary + + if cluster: + cmd.extend(['--cluster', cluster]) + + cmd.extend(action) + + return cmd + + +def exec_command(module, cmd): + ''' + Execute command + ''' + + rc, out, err = module.run_command(cmd) + return rc, cmd, out, err + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def get_data(data, data_vg): + if data_vg: + data = '{0}/{1}'.format(data_vg, data) + return data + + +def get_journal(journal, journal_vg): + if journal_vg: + journal = '{0}/{1}'.format(journal_vg, journal) + return journal + + +def get_db(db, db_vg): + if db_vg: + db = '{0}/{1}'.format(db_vg, db) + return db + + +def get_wal(wal, wal_vg): + if wal_vg: + wal = '{0}/{1}'.format(wal_vg, wal) + return wal + + +def batch(module, container_image): + ''' + Batch prepare OSD devices + ''' + + # get module variables + cluster = module.params['cluster'] + objectstore = module.params['objectstore'] + batch_devices = module.params.get('batch_devices', None) + crush_device_class = module.params.get('crush_device_class', None) + journal_size = module.params.get('journal_size', None) + block_db_size = module.params.get('block_db_size', None) + block_db_devices = module.params.get('block_db_devices', None) + wal_devices = module.params.get('wal_devices', None) + dmcrypt = module.params.get('dmcrypt', None) + osds_per_device = module.params.get('osds_per_device', 1) + + if not osds_per_device: + fatal('osds_per_device must be provided if action is "batch"', module) + + if osds_per_device < 1: + fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa E501 + + if not batch_devices: + fatal('batch_devices must be provided if action is "batch"', module) + + # Build the CLI + action = ['lvm', 'batch'] + cmd = build_ceph_volume_cmd(action, container_image, cluster) + cmd.extend(['--%s' % objectstore]) + cmd.append('--yes') + + if container_image: + cmd.append('--prepare') + + if crush_device_class: + cmd.extend(['--crush-device-class', crush_device_class]) + + if dmcrypt: + cmd.append('--dmcrypt') + + if osds_per_device > 1: + cmd.extend(['--osds-per-device', str(osds_per_device)]) + + if objectstore == 'filestore': + cmd.extend(['--journal-size', journal_size]) + + if objectstore == 'bluestore' and block_db_size != '-1': + cmd.extend(['--block-db-size', block_db_size]) + + cmd.extend(batch_devices) + + if block_db_devices: + cmd.extend(['--db-devices', ' '.join(block_db_devices)]) + + if wal_devices: + cmd.extend(['--wal-devices', ' '.join(wal_devices)]) + + return cmd + + +def ceph_volume_cmd(subcommand, container_image, cluster=None): + ''' + Build ceph-volume initial command + ''' + + if container_image: + binary = 'ceph-volume' + cmd = container_exec( + binary, container_image) + else: + binary = ['ceph-volume'] + cmd = binary + + if cluster: + cmd.extend(['--cluster', cluster]) + + cmd.append('lvm') + cmd.append(subcommand) + + return cmd + + +def prepare_or_create_osd(module, action, container_image): + ''' + Prepare or create OSD devices + ''' + + # get module variables + cluster = module.params['cluster'] + objectstore = module.params['objectstore'] + data = module.params['data'] + data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) + journal = module.params.get('journal', None) + journal_vg = module.params.get('journal_vg', None) + db = module.params.get('db', None) + db_vg = module.params.get('db_vg', None) + wal = module.params.get('wal', None) + wal_vg = module.params.get('wal_vg', None) + crush_device_class = module.params.get('crush_device_class', None) + dmcrypt = module.params.get('dmcrypt', None) + + # Build the CLI + action = ['lvm', action] + cmd = build_ceph_volume_cmd(action, container_image, cluster) + cmd.extend(['--%s' % objectstore]) + cmd.append('--data') + cmd.append(data) + + if journal: + journal = get_journal(journal, journal_vg) + cmd.extend(['--journal', journal]) + + if db: + db = get_db(db, db_vg) + cmd.extend(['--block.db', db]) + + if wal: + wal = get_wal(wal, wal_vg) + cmd.extend(['--block.wal', wal]) + + if crush_device_class: + cmd.extend(['--crush-device-class', crush_device_class]) + + if dmcrypt: + cmd.append('--dmcrypt') + + return cmd + + +def list_osd(module, container_image): + ''' + List will detect wether or not a device has Ceph LVM Metadata + ''' + + # get module variables + cluster = module.params['cluster'] + data = module.params.get('data', None) + data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) + + # Build the CLI + action = ['lvm', 'list'] + cmd = build_ceph_volume_cmd(action, container_image, cluster) + if data: + cmd.append(data) + cmd.append('--format=json') + + return cmd + +def list_storage_inventory(module, container_image): + ''' + List storage inventory. + ''' + + action = ['inventory'] + cmd = build_ceph_volume_cmd(action, container_image) + cmd.append('--format=json') + + return cmd + +def activate_osd(): + ''' + Activate all the OSDs on a machine + ''' + + # build the CLI + action = ['lvm', 'activate'] + container_image = None + cmd = build_ceph_volume_cmd(action, container_image) + cmd.append('--all') + + return cmd + + +def zap_devices(module, container_image): + ''' + Will run 'ceph-volume lvm zap' on all devices, lvs and partitions + used to create the OSD. The --destroy flag is always passed so that + if an OSD was originally created with a raw device or partition for + 'data' then any lvs that were created by ceph-volume are removed. + ''' + + # get module variables + data = module.params.get('data', None) + data_vg = module.params.get('data_vg', None) + journal = module.params.get('journal', None) + journal_vg = module.params.get('journal_vg', None) + db = module.params.get('db', None) + db_vg = module.params.get('db_vg', None) + wal = module.params.get('wal', None) + wal_vg = module.params.get('wal_vg', None) + osd_fsid = module.params.get('osd_fsid', None) + + # build the CLI + action = ['lvm', 'zap'] + cmd = build_ceph_volume_cmd(action, container_image) + cmd.append('--destroy') + + if osd_fsid: + cmd.extend(['--osd-fsid', osd_fsid]) + + if data: + data = get_data(data, data_vg) + cmd.append(data) + + if journal: + journal = get_journal(journal, journal_vg) + cmd.extend([journal]) + + if db: + db = get_db(db, db_vg) + cmd.extend([db]) + + if wal: + wal = get_wal(wal, wal_vg) + cmd.extend([wal]) + + return cmd + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + objectstore=dict(type='str', required=False, choices=[ + 'bluestore', 'filestore'], default='bluestore'), + action=dict(type='str', required=False, choices=[ + 'create', 'zap', 'batch', 'prepare', 'activate', 'list', + 'inventory'], default='create'), # noqa 4502 + data=dict(type='str', required=False), + data_vg=dict(type='str', required=False), + journal=dict(type='str', required=False), + journal_vg=dict(type='str', required=False), + db=dict(type='str', required=False), + db_vg=dict(type='str', required=False), + wal=dict(type='str', required=False), + wal_vg=dict(type='str', required=False), + crush_device_class=dict(type='str', required=False), + dmcrypt=dict(type='bool', required=False, default=False), + batch_devices=dict(type='list', required=False, default=[]), + osds_per_device=dict(type='int', required=False, default=1), + journal_size=dict(type='str', required=False, default='5120'), + block_db_size=dict(type='str', required=False, default='-1'), + block_db_devices=dict(type='list', required=False, default=[]), + wal_devices=dict(type='list', required=False, default=[]), + report=dict(type='bool', required=False, default=False), + containerized=dict(type='str', required=False, default=False), + osd_fsid=dict(type='str', required=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + result = dict( + changed=False, + stdout='', + stderr='', + rc='', + start='', + end='', + delta='', + ) + + if module.check_mode: + return result + + # start execution + startd = datetime.datetime.now() + + # get the desired action + action = module.params['action'] + + # will return either the image name or None + container_image = is_containerized() + + # Assume the task's status will be 'changed' + changed = True + + if action == 'create' or action == 'prepare': + # First test if the device has Ceph LVM Metadata + rc, cmd, out, err = exec_command( + module, list_osd(module, container_image)) + + # list_osd returns a dict, if the dict is empty this means + # we can not check the return code since it's not consistent + # with the plain output + # see: http://tracker.ceph.com/issues/36329 + # FIXME: it's probably less confusing to check for rc + + # convert out to json, ansible returns a string... + try: + out_dict = json.loads(out) + except ValueError: + fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa E501 + + if out_dict: + data = module.params['data'] + result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501 + data) + result['rc'] = 0 + module.exit_json(**result) + + # Prepare or create the OSD + rc, cmd, out, err = exec_command( + module, prepare_or_create_osd(module, action, container_image)) + + elif action == 'activate': + if container_image: + fatal( + "This is not how container's activation happens, nothing to activate", module) # noqa E501 + + # Activate the OSD + rc, cmd, out, err = exec_command( + module, activate_osd()) + + elif action == 'zap': + # Zap the OSD + rc, cmd, out, err = exec_command( + module, zap_devices(module, container_image)) + + elif action == 'list': + # List Ceph LVM Metadata on a device + rc, cmd, out, err = exec_command( + module, list_osd(module, container_image)) + + elif action == 'inventory': + # List storage device inventory. + rc, cmd, out, err = exec_command( + module, list_storage_inventory(module, container_image)) + + elif action == 'batch': + # Batch prepare AND activate OSDs + report = module.params.get('report', None) + + # Add --report flag for the idempotency test + report_flags = [ + '--report', + '--format=json', + ] + + cmd = batch(module, container_image) + batch_report_cmd = copy.copy(cmd) + batch_report_cmd.extend(report_flags) + + # Run batch --report to see what's going to happen + # Do not run the batch command if there is nothing to do + rc, cmd, out, err = exec_command( + module, batch_report_cmd) + try: + report_result = json.loads(out) + except ValueError: + strategy_change = "strategy changed" in out + if strategy_change: + out = json.dumps( + {"changed": False, "stdout": out.rstrip("\r\n")}) + rc = 0 + changed = False + else: + out = out.rstrip("\r\n") + result = dict( + cmd=cmd, + stdout=out.rstrip('\r\n'), + stderr=err.rstrip('\r\n'), + rc=rc, + changed=changed, + ) + if strategy_change: + module.exit_json(**result) + module.fail_json(msg='non-zero return code', **result) + + if not report: + # if not asking for a report, let's just run the batch command + changed = report_result['changed'] + if changed: + # Batch prepare the OSD + rc, cmd, out, err = exec_command( + module, batch(module, container_image)) + else: + cmd = batch_report_cmd + + else: + module.fail_json( + msg='State must either be "create" or "prepare" or "activate" or "list" or "zap" or "batch" or "inventory".', changed=False, rc=1) # noqa E501 + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip('\r\n'), + stderr=err.rstrip('\r\n'), + changed=changed, + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/library/proxmox_user.py b/library/proxmox_user.py index f213426e..36d6908d 100755 --- a/library/proxmox_user.py +++ b/library/proxmox_user.py @@ -53,10 +53,6 @@ required: false description: - Optionally sets the user's last name in PVE. - firstname: - required: false - description: - - Optionally sets the user's first name in PVE. password: required: false description: @@ -243,10 +239,10 @@ def main(): groups=dict(default=None, type='list'), comment=dict(default=None, type='str'), email=dict(default=None, type='str'), - expire=dict(default=0, type='int'), firstname=dict(default=None, type='str'), lastname=dict(default=None, type='str'), - password=dict(default=None, type='str', no_log=True) + password=dict(default=None, type='str', no_log=True), + expire=dict(default=0, type='int') ), supports_check_mode=True ) diff --git a/tasks/ceph.yml b/tasks/ceph.yml index 10805484..e1166417 100644 --- a/tasks/ceph.yml +++ b/tasks/ceph.yml @@ -46,13 +46,47 @@ creates: '/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/' when: inventory_hostname != groups[pve_ceph_mon_group][0] -- name: Create Ceph OSDs - command: >- - pveceph osd create {{ item.device }} - {% if "block.db" in item %}--journal_dev {{ item["block.db"] }}{% endif %} - args: - creates: '{{ item.device }}1' - with_items: '{{ pve_ceph_osds }}' +- block: + - name: Get existing ceph volumes + ceph_volume: + action: list + data: "{{ item.device }}" + register: _ceph_volume_data + loop: '{{ pve_ceph_osds }}' + tags: ceph_volume + + - name: Initialize osd variables + set_fact: + _existing_ceph_volumes_tmp: [] + _existing_ceph_volumes: [] + tags: ceph_volume + + - name: Determine ceph volumes Step1 + set_fact: + _existing_ceph_volumes_tmp: "{{ _existing_ceph_volumes_tmp + item.stdout | from_json | json_query('*[].devices[]') }}" + with_items: "{{ _ceph_volume_data.results }}" + tags: ceph_volume + + - name: Determine ceph volumes Step2 + set_fact: + _existing_ceph_volumes: "{{ _existing_ceph_volumes + [{'device': item}] }}" + with_items: "{{ _existing_ceph_volumes_tmp }}" + tags: ceph_volume + + - name: Change osd list (remove existing osds from the list) + set_fact: + pve_ceph_osds_diff: "{{ pve_ceph_osds | difference(_existing_ceph_volumes) }}" + tags: ceph_volume + + - name: Create Ceph OSDs + command: >- + pveceph osd create {{ item.device }} + {% if "block.db" in item %}--journal_dev {{ item["block.db"] }}{% endif %} + args: + creates: '{{ item.device }}1' + with_items: '{{ pve_ceph_osds_diff }}' + + tags: create_osd - block: - name: List Ceph CRUSH rules @@ -87,6 +121,12 @@ {% if 'pgs' in item %} --pg_num {{ item.pgs }} {% endif %} + {% if 'size' in item %} + --size {{ item.size }} + {% endif %} + {% if 'min_size' in item %} + --min_size {{ item.min_size }} + {% endif %} when: item.name not in _ceph_pools.stdout_lines with_items: '{{ pve_ceph_pools }}' diff --git a/tasks/main.yml b/tasks/main.yml index a418dc5f..c6556f3e 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -146,7 +146,7 @@ - name: Remove subscription check wrapper function in web UI patch: - src: "00_remove_checked_command.patch" + src: "00_remove_checked_command_{{ ansible_distribution_release }}.patch" basedir: / strip: 1 backup: yes @@ -175,6 +175,7 @@ - name: Configure Proxmox groups proxmox_group: name: "{{ item.name }}" + state: "{{ item.state | default('present') }}" comment: "{{ item.comment | default(omit) }}" with_items: "{{ pve_groups }}" when: "not pve_cluster_enabled or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0])" @@ -182,10 +183,15 @@ - name: Configure Proxmox user accounts proxmox_user: name: "{{ item.name }}" + state: "{{ item.state | default('present') }}" + enable: "{{ item.enable | default(omit) }}" + groups: "{{ item.groups | default([]) }}" + comment: "{{ item.comment | default(omit) }}" email: "{{ item.email | default(omit) }}" firstname: "{{ item.firstname | default(omit) }}" lastname: "{{ item.lastname | default(omit) }}" - groups: "{{ item.groups | default([]) }}" + password: "{{ item.password | default(omit) }}" + expire: "{{ item.expire | default(omit) }}" with_items: "{{ pve_users }}" when: "not pve_cluster_enabled | bool or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0])" @@ -193,6 +199,7 @@ proxmox_acl: path: "{{ item.path }}" roles: "{{ item.roles }}" + state: "{{ item.state | default('present') }}" groups: "{{ item.groups | default([]) }}" users: "{{ item.users | default([]) }}" with_items: "{{ pve_acls }}" diff --git a/tasks/pve_cluster_config.yml b/tasks/pve_cluster_config.yml index fb85a15e..48fa86c7 100644 --- a/tasks/pve_cluster_config.yml +++ b/tasks/pve_cluster_config.yml @@ -67,3 +67,37 @@ when: - "_pve_active_cluster is not defined" - "inventory_hostname != groups[pve_group][0]" + +- name: Check for PVE cluster HA groups + proxmox_query: + query: "/cluster/ha/groups" + register: _ha_group_list + when: "inventory_hostname == groups[pve_group][0]" + +- name: Create PVE cluster HA groups + command: >- + ha-manager groupadd {{ item.name }} + -comment "{{ item.comment | default('') }}" + -nodes "{{ item.nodes }}" + {% if 'nofailback' in item %} + -nofailback {{ item.nofailback }} + {% endif %} + {% if 'restricted' in item %} + -restricted {{ item.restricted }} + {% endif %} + when: + - "inventory_hostname == groups[pve_group][0]" + - item.name not in _ha_group_list.response | json_query("[*].group") + with_items: "{{ pve_cluster_ha_groups }}" + +- name: Update PVE cluster HA groups + command: >- + ha-manager groupset {{ item.0.name }} -{{ item.1 }} "{{ item.0[item.1] }}" + when: + - "inventory_hostname == groups[pve_group][0]" + - item.0.name in _ha_group_list.response | json_query("[*].group") + - item.1 in item.0 + - item.0[item.1] != _ha_group_list.response + | json_query("[?group=='" + item.0.name + "']." + item.1) | first + loop: "{{ pve_cluster_ha_groups + | product(['comment', 'nodes', 'nofailback', 'restricted']) | list }}" diff --git a/tests/group_vars/all b/tests/group_vars/all index dd6568d4..6625e209 100644 --- a/tests/group_vars/all +++ b/tests/group_vars/all @@ -15,6 +15,11 @@ pve_ssl_certificate: "{{ lookup('file', ssl_host_cert_path) }}" pve_cluster_enabled: yes pve_datacenter_cfg: console: xtermjs +pve_cluster_ha_groups: + - name: proxmox_5_01 + comment: "Resources on proxmox-5-01" + nodes: proxmox-5-01 + restricted: 1 pve_groups: - name: Admins comment: Administrators of this PVE cluster diff --git a/tests/test.yml b/tests/test.yml index 6d533e5e..7fe70544 100644 --- a/tests/test.yml +++ b/tests/test.yml @@ -80,6 +80,24 @@ assert: that: "'console: xtermjs' in datacenter_cfg.content | b64decode" + - name: Query PVE HA groups + command: "pvesh get /cluster/ha/groups --output=json" + register: _ha_group_list + run_once: true + + - name: Check PVE HA group configuration + assert: + that: + - item.name == ha_group.group + - item.comment == ha_group.comment + - item.nodes == ha_group.nodes + - item.restricted == ha_group.restricted + - "'nofailback' not in ha_group" + vars: + ha_group: '{{ _ha_group_list.stdout | from_json + | json_query("[?group==''" + item.name + "'']") | first }}' + with_items: "{{ pve_cluster_ha_groups }}" + - block: - name: pvedaemon service status shell: "journalctl --no-pager -xu pvedaemon.service" diff --git a/tests/vagrant/group_vars/all b/tests/vagrant/group_vars/all index c450e067..75d92777 100644 --- a/tests/vagrant/group_vars/all +++ b/tests/vagrant/group_vars/all @@ -18,10 +18,36 @@ pve_groups: pve_users: - name: root@pam email: postmaster@pve.example + - name: admin@pve + password: "ProxmoxVE6" + comment: Hello World + groups: + - Admins pve_acls: - path: / roles: [ "Administrator" ] groups: [ "Admins" ] +pve_ceph_enabled: true +pve_ceph_crush_rules: + - name: hdd +pve_ceph_mds_group: all +pve_ceph_pools: + - name: vm-storage + pgs: 128 + application: rbd + storage: true +pve_storages: + - name: vm-storage + type: rbd + content: + - images + - rootdir + pool: vm-storage + username: admin + monhost: + - "{{ ansible_fqdn }}:6789" +pve_ceph_osds: + - device: "/dev/vdb" ntp_manage_config: true ntp_servers: - clock.sjc.he.net diff --git a/tests/vagrant/package_role.yml b/tests/vagrant/package_role.yml new file mode 100644 index 00000000..3e2baee2 --- /dev/null +++ b/tests/vagrant/package_role.yml @@ -0,0 +1,16 @@ +--- +- hosts: localhost + connection: local + vars: + role_name: lae.proxmox + tasks: + - block: + - shell: pwd + - name: Package up current working role + shell: "cd $(git rev-parse --show-toplevel); git ls-files -z | xargs -0 tar -czvf $OLDPWD/{{ role_name }}.tar.gz" + - name: Install packaged role + shell: "ansible-galaxy install {{ role_name }}.tar.gz,devel-$(git rev-parse HEAD),{{ role_name }} --force" + - name: Remove packaged role artifact + file: + dest: "{{ role_name }}.tar.gz" + state: absent diff --git a/tests/vagrant/provision.yml b/tests/vagrant/provision.yml index 0c3d559b..0c01c63e 100644 --- a/tests/vagrant/provision.yml +++ b/tests/vagrant/provision.yml @@ -1,20 +1,3 @@ -- hosts: all - vars: - role_name: lae.proxmox - tasks: - - block: - - shell: pwd - - name: Package up current working role - shell: "cd $(git rev-parse --show-toplevel); git ls-files -z | xargs -0 tar -czvf $OLDPWD/{{ role_name }}.tar.gz" - - name: Install packaged role - shell: "ansible-galaxy install {{ role_name }}.tar.gz,devel-$(git rev-parse HEAD),{{ role_name }} --force" - - name: Remove packaged role artifact - file: - dest: "{{ role_name }}.tar.gz" - state: absent - delegate_to: localhost - run_once: True - - hosts: all become: True pre_tasks: