summaryrefslogtreecommitdiff
path: root/roles/vm/guest/install
diff options
context:
space:
mode:
Diffstat (limited to 'roles/vm/guest/install')
-rw-r--r--roles/vm/guest/install/library/wait_for_virt.py179
-rw-r--r--roles/vm/guest/install/tasks/installer-debian.yml21
-rw-r--r--roles/vm/guest/install/tasks/installer-openbsd.yml19
-rw-r--r--roles/vm/guest/install/tasks/main.yml90
4 files changed, 309 insertions, 0 deletions
diff --git a/roles/vm/guest/install/library/wait_for_virt.py b/roles/vm/guest/install/library/wait_for_virt.py
new file mode 100644
index 00000000..6c49fae1
--- /dev/null
+++ b/roles/vm/guest/install/library/wait_for_virt.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import traceback
+import time
+
+try:
+ import libvirt
+except ImportError:
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+VIRT_FAILED = 1
+VIRT_SUCCESS = 0
+VIRT_UNAVAILABLE = 2
+
+VIRT_STATE_NAME_MAP = {
+ 0: "running",
+ 1: "running",
+ 2: "running",
+ 3: "paused",
+ 4: "shutdown",
+ 5: "shutdown",
+ 6: "crashed"
+}
+
+
+class VMNotFound(Exception):
+ pass
+
+
+class LibvirtConnection(object):
+
+ def __init__(self, uri, module):
+
+ self.module = module
+
+ cmd = "uname -r"
+ rc, stdout, stderr = self.module.run_command(cmd)
+
+ if "xen" in stdout:
+ conn = libvirt.open(None)
+ elif "esx" in uri:
+ auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None]
+ conn = libvirt.openAuth(uri, auth)
+ else:
+ conn = libvirt.open(uri)
+
+ if not conn:
+ raise Exception("hypervisor connection failure")
+
+ self.conn = conn
+
+ def find_vm(self, vmid):
+ """
+ Extra bonus feature: vmid = -1 returns a list of everything
+ """
+ conn = self.conn
+
+ vms = []
+
+ # this block of code borrowed from virt-manager:
+ # get working domain's name
+ ids = conn.listDomainsID()
+ for id in ids:
+ vm = conn.lookupByID(id)
+ vms.append(vm)
+ # get defined domain
+ names = conn.listDefinedDomains()
+ for name in names:
+ vm = conn.lookupByName(name)
+ vms.append(vm)
+
+ if vmid == -1:
+ return vms
+
+ for vm in vms:
+ if vm.name() == vmid:
+ return vm
+
+ raise VMNotFound("virtual machine %s not found" % vmid)
+
+ def get_status(self, vmid):
+ state = self.find_vm(vmid).info()[0]
+ return VIRT_STATE_NAME_MAP.get(state, "unknown")
+
+
+class Virt(object):
+
+ def __init__(self, uri, module):
+ self.module = module
+ self.uri = uri
+
+ def __get_conn(self):
+ self.conn = LibvirtConnection(self.uri, self.module)
+ return self.conn
+
+ def status(self, vmid):
+ """
+ Return a state suitable for server consumption. Aka, codes.py values, not XM output.
+ """
+ self.__get_conn()
+ return self.conn.get_status(vmid)
+
+
+def core(module):
+
+ states = module.params.get('states', None)
+ guest = module.params.get('name', None)
+ uri = module.params.get('uri', None)
+ delay = module.params.get('delay', None)
+ sleep = module.params.get('sleep', None)
+ timeout = module.params.get('timeout', None)
+
+ v = Virt(uri, module)
+ res = {'changed': False, 'failed': True}
+
+ if delay > 0:
+ time.sleep(delay)
+
+ for _ in range(0, timeout, sleep):
+ state = v.status(guest)
+ if state in states:
+ res['state'] = state
+ res['failed'] = False
+ res['msg'] = "guest '%s' has reached state: %s" % (guest, state)
+ return VIRT_SUCCESS, res
+
+ time.sleep(sleep)
+
+ res['msg'] = "timeout waiting for guest '%s' to reach one of states: %s" % (guest, ', '.join(states))
+ return VIRT_FAILED, res
+
+
+def main():
+
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(aliases=['guest'], required=True),
+ states=dict(type='list', required=True),
+ uri=dict(default='qemu:///system'),
+ delay=dict(type='int', default=0),
+ sleep=dict(type='int', default=1),
+ timeout=dict(type='int', default=300),
+ ))
+
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
+ for state in module.params.get('states', None):
+ if state not in set(VIRT_STATE_NAME_MAP.values()):
+ module.fail_json(
+ msg="states contains invalid state '%s', must be one of %s" % (state, ', '.join(set(VIRT_STATE_NAME_MAP.values())))
+ )
+
+ rc = VIRT_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/vm/guest/install/tasks/installer-debian.yml b/roles/vm/guest/install/tasks/installer-debian.yml
new file mode 100644
index 00000000..e0492969
--- /dev/null
+++ b/roles/vm/guest/install/tasks/installer-debian.yml
@@ -0,0 +1,21 @@
+---
+- name: fetch debian installer files
+ vars:
+ debian_installer_distro: "{{ install_distro }}"
+ debian_installer_codename: "{{ install_codename }}"
+ debian_installer_arch: "{{ hostvars[install_hostname].install_cooked.arch | default('amd64') }}"
+ debian_installer_variant: netboot
+ import_role:
+ name: installer/debian/fetch
+
+- name: generate host specific initial ramdisk
+ vars:
+ ssh_keys_root: "{{ hostvars[install_hostname].ssh_keys_root }}"
+ preseed_orig_initrd: "{{ installer_base_path }}/{{ install_distro }}-{{ install_codename }}/{{ hostvars[install_hostname].install_cooked.arch | default('amd64') }}-netboot/initrd.gz"
+ preseed_tmpdir: "{{ tmpdir.path }}"
+ preseed_virtual_machine: yes
+ preseed_force_net_ifnames_policy: path
+ preseed_no_netplan: yes
+ install_interface: enp1s1
+ import_role:
+ name: installer/debian/preseed
diff --git a/roles/vm/guest/install/tasks/installer-openbsd.yml b/roles/vm/guest/install/tasks/installer-openbsd.yml
new file mode 100644
index 00000000..afa17c45
--- /dev/null
+++ b/roles/vm/guest/install/tasks/installer-openbsd.yml
@@ -0,0 +1,19 @@
+---
+- name: fetch openbsd installer files
+ vars:
+ openbsd_installer_version: "{{ install_codename }}"
+ openbsd_installer_arch: "{{ hostvars[install_hostname].install_cooked.arch | default('amd64') }}"
+ import_role:
+ name: installer/openbsd/fetch
+
+- name: generate host specific autoinstall iso
+ vars:
+ ssh_keys_root: "{{ hostvars[install_hostname].ssh_keys_root }}"
+ obsd_autoinstall_orig_iso: "{{ installer_base_path }}/openbsd-{{ install_codename }}/{{ hostvars[install_hostname].install_cooked.arch | default('amd64') }}/install{{ openbsd_installer_version_short }}.iso"
+ obsd_autoinstall_tmpdir: "{{ tmpdir.path }}"
+ obsd_autoinstall_version: "{{ install_codename }}"
+ obsd_autoinstall_arch: "{{ hostvars[install_hostname].install_cooked.arch | default('amd64') }}"
+ obsd_autoinstall_serial_device: com0
+ install_interface: vio0
+ import_role:
+ name: installer/openbsd/autoinstall
diff --git a/roles/vm/guest/install/tasks/main.yml b/roles/vm/guest/install/tasks/main.yml
new file mode 100644
index 00000000..21a13b4d
--- /dev/null
+++ b/roles/vm/guest/install/tasks/main.yml
@@ -0,0 +1,90 @@
+---
+- name: create lvm-based disks for vm
+ loop: "{{ hostvars[install_hostname].install_cooked.disks.virtio | default({}) | combine(hostvars[install_hostname].install_cooked.disks.scsi | default({})) | dict2items | selectattr('value.type', 'eq', 'lvm') | list }}"
+ loop_control:
+ label: "{{ item.value.vg }} / {{ item.value.lv }} ({{ item.value.size }})"
+ lvol:
+ vg: "{{ item.value.vg }}"
+ lv: "{{ item.value.lv }}"
+ size: "{{ item.value.size }}"
+ state: present
+
+- name: create zfs base datasets for vm
+ loop: "{{ hostvars[install_hostname].install_cooked.disks.virtio | default({}) | combine(hostvars[install_hostname].install_cooked.disks.scsi | default({})) | dict2items | selectattr('value.type', 'eq', 'zfs') | map(attribute='value.backend') | map('default', 'default') | unique | list }}"
+ zfs:
+ name: "{{ vm_host_cooked.zfs[item].pool }}/{{ vm_host_cooked.zfs[item].name }}/{{ install_hostname }}"
+ state: present
+ extra_zfs_properties:
+ canmount: no
+ mountpoint: none
+
+- name: create zfs-based disk volumes for vm
+ loop: "{{ hostvars[install_hostname].install_cooked.disks.virtio | default({}) | combine(hostvars[install_hostname].install_cooked.disks.scsi | default({})) | dict2items | selectattr('value.type', 'eq', 'zfs') | list }}"
+ loop_control:
+ label: "{{ item.value.name }} on backend {{ item.value.backend | default('default') }} ({{ item.value.size }})"
+ zfs:
+ name: "{{ vm_host_cooked.zfs[item.value.backend | default('default')].pool }}/{{ vm_host_cooked.zfs[item.value.backend | default('default')].name }}/{{ install_hostname }}/{{ item.value.name }}"
+ state: present
+ extra_zfs_properties: "{{ item.value.properties | default({}) | combine({'volsize': item.value.size}) }}"
+
+
+- block:
+ - name: create a temporary workdir
+ tempfile:
+ path: "{{ installer_base_path }}/"
+ prefix: ".{{ install_hostname }}."
+ state: directory
+ register: tmpdir
+
+ - when: install_distro in ['debian', 'ubuntu']
+ import_tasks: installer-debian.yml
+
+ - when: install_distro in ['openbsd']
+ import_tasks: installer-openbsd.yml
+
+ - name: Make installer workdir readable by qemu
+ acl:
+ path: "{{ tmpdir.path }}"
+ state: present
+ entity: libvirt-qemu
+ etype: user
+ permissions: rx
+
+ - name: define installer vm
+ vars:
+ vm_define_installer: yes
+ installer_tmpdir: "{{ tmpdir.path }}"
+ import_role:
+ name: vm/guest/define
+
+ - debug:
+ msg: "you can check on the status of the installer running this command 'virsh console {{ install_hostname }}' on host {{ inventory_hostname }}."
+
+ - when: installer_manual_steps_msg is defined
+ pause:
+ prompt: |
+ Mind that this installer needs manual steps to be performed:
+
+ {{ installer_manual_steps_msg | indent(2) }}
+
+ When done press enter to continue or Ctrl-C + 'A' to abort.
+
+ - name: wait for installer to finish or crash
+ wait_for_virt:
+ name: "{{ install_hostname }}"
+ states: shutdown,crashed
+ timeout: 1800
+ register: installer_result
+ failed_when: installer_result.failed or installer_result.state == "crashed"
+
+ always:
+ - name: cleanup temporary workdir
+ file:
+ path: "{{ tmpdir.path }}"
+ state: absent
+
+- name: define vm
+ vars:
+ vm_define_installer: no
+ import_role:
+ name: vm/guest/define