blob: 6b8f9ca7089408b718ac0c081dae975fc83cd457 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
|
---
- name: create lvm-based disks for vm
loop: "{{ hostvars[install_hostname].install_cooked.disks.virtio | default({}) | combine(hostvars[install_hostname].install_cooked.disks.scsi | default({})) | dict2items | selectattr('value.type', 'eq', 'lvm') | list }}"
loop_control:
label: "{{ item.value.vg }} / {{ item.value.lv }} ({{ item.value.size }})"
lvol:
vg: "{{ item.value.vg }}"
lv: "{{ item.value.lv }}"
size: "{{ item.value.size }}"
state: present
- name: create zfs base datasets for vm
loop: "{{ hostvars[install_hostname].install_cooked.disks.virtio | default({}) | combine(hostvars[install_hostname].install_cooked.disks.scsi | default({})) | dict2items | selectattr('value.type', 'eq', 'zfs') | map(attribute='value.backend') | map('default', 'default') | unique | list }}"
zfs:
name: "{{ vm_host_cooked.zfs[item].pool }}/{{ vm_host_cooked.zfs[item].name }}/{{ install_hostname }}"
state: present
extra_zfs_properties:
canmount: no
mountpoint: none
- name: create zfs-based disk volumes for vm
loop: "{{ hostvars[install_hostname].install_cooked.disks.virtio | default({}) | combine(hostvars[install_hostname].install_cooked.disks.scsi | default({})) | dict2items | selectattr('value.type', 'eq', 'zfs') | list }}"
loop_control:
label: "{{ item.value.name }} on backend {{ item.value.backend | default('default') }} ({{ item.value.size }})"
zfs:
name: "{{ vm_host_cooked.zfs[item.value.backend | default('default')].pool }}/{{ vm_host_cooked.zfs[item.value.backend | default('default')].name }}/{{ install_hostname }}/{{ item.value.name }}"
state: present
extra_zfs_properties: "{{ item.value.properties | default({}) | combine({'volsize': item.value.size}) }}"
- block:
- name: create a temporary workdir
tempfile:
state: directory
register: tmpdir
- when: install_distro in ['debian', 'ubuntu']
vars:
ssh_keys_root: "{{ hostvars[install_hostname].ssh_keys_root }}"
preseed_tmpdir: "{{ tmpdir.path }}"
preseed_virtual_machine: yes
preseed_force_net_ifnames_policy: path
preseed_no_netplan: yes
install_interface: enp1s1
import_role:
name: installer/debian/preseed
- when: install_distro in ['openbsd']
vars:
ssh_keys_root: "{{ hostvars[install_hostname].ssh_keys_root }}"
obsd_autoinstall_tmpdir: "{{ tmpdir.path }}"
obsd_autoinstall_serial_device: com0
install_interface: vio0
import_role:
name: installer/openbsd/autoinstall
- name: Make installer workdir readable by qemu
acl:
path: "{{ tmpdir.path }}"
state: present
entity: libvirt-qemu
etype: user
permissions: rx
- import_role:
name: vm/define
vars:
vm_define_installer: yes
preseed_tmpdir: "{{ tmpdir.path }}"
- debug:
msg: "you can check on the status of the installer running this command 'virsh console {{ install_hostname }}' on host {{ inventory_hostname }}."
- when: installer_manual_steps_msg is defined
pause:
prompt: |
Mind that this installer needs manual steps to be performed:
{{ installer_manual_steps_msg | indent(2) }}
When done press enter to continue or Ctrl-C + 'A' to abort.
- name: wait for installer to finish or crash
wait_for_virt:
name: "{{ install_hostname }}"
states: shutdown,crashed
timeout: 1800
register: installer_result
failed_when: installer_result.failed or installer_result.state == "crashed"
always:
- name: cleanup temporary workdir
file:
path: "{{ tmpdir.path }}"
state: absent
- import_role:
name: vm/define
vars:
vm_define_installer: no
|