diff options
59 files changed, 445 insertions, 260 deletions
diff --git a/chaos-at-home/mz-ap.yml b/chaos-at-home/ch-mz-ap.yml index 46b0aa88..545d8c4a 100644 --- a/chaos-at-home/mz-ap.yml +++ b/chaos-at-home/ch-mz-ap.yml @@ -1,6 +1,6 @@ --- - name: Basic Setup - hosts: mz-ap + hosts: ch-mz-ap connection: local gather_facts: no roles: diff --git a/chaos-at-home/ch-mz-router.yml b/chaos-at-home/ch-mz-router.yml new file mode 100644 index 00000000..65c0c2b0 --- /dev/null +++ b/chaos-at-home/ch-mz-router.yml @@ -0,0 +1,15 @@ +--- +- name: Basic Setup + hosts: ch-mz-router + connection: local + gather_facts: no + roles: + - role: installer/openwrt/image + post_tasks: + - pause: + prompt: | + * scp -O {{ output_images[0] }} ch-mz-router:/tmp/openwrt.bin + * ssh ch-mz-router sysupgrade -n /tmp/openwrt.bin + * ssh ch-mz-router dropbearkey -t ed25519 -f /etc/dyndns/id_ed25519 + replace the key at the dyndns server (ch-pan: /var/lib/dyndns/.ssh/authorized_keys) + after that run the dyndns update script manually to accept the ssh host-key diff --git a/chaos-at-home/host_vars/mz-router.yml b/chaos-at-home/host_vars/mz-router.yml deleted file mode 100644 index 9d5b814b..00000000 --- a/chaos-at-home/host_vars/mz-router.yml +++ /dev/null @@ -1,61 +0,0 @@ -$ANSIBLE_VAULT;1.2;AES256;chaos-at-home -65643339366566643435323363386430633134636135383962623132373433393832663837376539 -3235323334643539356336333737646438393664336265660a393134323731336665386165613435 -33393233666434643462323235656163373365333565373566616666666339616632663464326436 -3061343337356139330a653463376366343835616237646239643338333866653530613364323638 -35336561633037366437333866306231613738336339646538373261656365386231393265363130 -37303830386562646335353462353662383636393233623962376565363435643366633733626334 -35643363306163666662353962393231643939313230343961666661333334313438653234373733 -37376530633163323462366434623532626536323830333562316239306634303731643965386233 -32383466356366613262653731663665343036373136343731393332616435636165393639643165 -30363663376236613533393333663163376332326536396465656162653961316563373861323662 -64393265636566306631323937333164613165616232393633386438316362656635383062303337 -39333932616535613230346666373635653363333761373765346237313731343166666136323734 -31383930646434306137333262376264323539383365303931353666333738666639386537353831 -63616366346336326331663938383161373837356331633265303266353738633233303039383066 -62633738376139626662366632373435373337323737336639306339653231336433333863303130 -64663964393562616635633738333139646334636433316638393835306366363238623562626134 -39643465303936633564373933343163643637616239663534666631633536613165326663663431 -37623931303461376336653562646366383836343534386366306334666330306635396561303661 -62353830666234616438383565636638663436303830356535323935653034646366396530313336 -61646137336435313138326535376339333735393931373333323561373936396664333537336361 -61646332623639663264646362393133356562616338303835336330393265663432323139356233 -66356161366564316339623835613266343233373434666462326531303361313230633638353963 -34303262653534326562623138313566646631343136393766316434663735326661623930626539 -66363066363236363965613765666362616137333035383331666163623266316434353731306366 -32623733613165653265386430663361373466396430306262353631326238396130613165656332 -34373139313063336636626461646563373531383935376436653933306333346431393833656366 -61356437333031346634616539326438613931346666346234333365303463626465353039616437 -37613433396138636534326638393966356661386662396330623234616638633333333161653735 -38386261306561383632613065653538376136306239336663356662386638623338613462353663 -62373666633333333461333963386632613137326165396433633439363938623838656665326339 -30323765613437373539333339646136633263323061653764306264316437353832313263323139 -32323633323562626661313534616263326561613030656363616461393334363833396133323266 -66386139383163386537383433396261373766653164373736323235643631656161393262383738 -61316533336662646232303936356236366436663265646131363237366463363732343964363366 -37653037303630613330333663623535663739643430333263636539613632303738653031663936 -38623665643939353733386335356161336531663333623538343332336264376663623261656633 -30333638646363356236303532363532353039323862366135653166316336623062333537366335 -31633839396461336361643465636664646164663762346236363763396263383163326465653964 -34663134623430353432646130633661636237613435323836386262333363373139376462363765 -63623638366136646265396432333339653234643532336233383461386361616630313936303162 -35626366366262623934333961653363616135313836643365613836343438353365383264623037 -31663231316462616137373435663039633434623466356266633235313865323362393636393862 -64343062336433343137316565373535666337653833353136376635666539656662373763623238 -30666532633965386264323565353431306633666364656662333631646139386138393066356238 -62653837656664333462363334373664373937333932313465353237636134626466343735633466 -31643039333866303233613762323866333264313135373130623166393339613131323537373537 -35626633373838363766623233626130646332336435316333323439613636373536343233633137 -30363863656465636635633936356165386633653637333932396164653835313163376363616133 -35376637376630636336386538353235353364313464313231633663616536323532336432376232 -64396234303332313134366133643664643165393932323361616666383162303337626663396131 -35613865373635303834373062666539386462663238383332616565303866316239613361373661 -37346162623764336332663431303664343430366562633361623566356266616534656562363833 -63366238656261646564306133623433306663376531373563363032303938303538356630636466 -30616630306334616237346661346235376133303538306638663631376163383138636365326230 -32376139373030303239376631316166393363613465323436633932376463303531386161313264 -65323261326232366332396335386639313735353135356139343937386232653737393565376639 -31363530313038306131383236396364666165393837343538316539336263333663643031623136 -30316436633662353162363836633238613833613530613762383662653435393263626161373938 -61613133643937346433643862326165326233363335656431663064336165383462623636383334 -63313438346136633461 diff --git a/chaos-at-home/mz-router.yml b/chaos-at-home/mz-router.yml deleted file mode 100644 index 8f4f056f..00000000 --- a/chaos-at-home/mz-router.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Basic Setup - hosts: mz-router - connection: local - gather_facts: no - roles: - - role: installer/openwrt/image - post_tasks: - - pause: - prompt: "\n****** copy and install image onto router and wait for it to come back ******\n" - - - shell: "base64 -d | ssh chmz-router \"/bin/sh -c 'umask 077; cat > /etc/dyndns/id_rsa'\"" - args: - stdin: "{{ vault_dyndns_ssh_key_b64 }}" diff --git a/inventory/group_vars/promzone-chaos-at-home/vars.yml b/inventory/group_vars/promzone-chaos-at-home/vars.yml index 7969d26f..752bada7 100644 --- a/inventory/group_vars/promzone-chaos-at-home/vars.yml +++ b/inventory/group_vars/promzone-chaos-at-home/vars.yml @@ -14,9 +14,9 @@ prometheus_exporter_smokeping_version: 0.8.1 prometheus_exporter_bind_version: 0.7.0 prometheus_exporter_chrony_version: 0.10.1 prometheus_exporter_chrony_disable_dns_lookups: yes -prometheus_exporter_mosquitto_version: 0.7.2 +prometheus_exporter_mosquitto_version: 0.7.3 -prometheus_server_version: 2.53.2 +prometheus_server_version: 2.54.1 prometheus_alertmanager_version: 0.27.0 prometheus_server: ch-mon diff --git a/inventory/group_vars/promzone-elevate-festival/vars.yml b/inventory/group_vars/promzone-elevate-festival/vars.yml index 8cb3ebbb..398f1511 100644 --- a/inventory/group_vars/promzone-elevate-festival/vars.yml +++ b/inventory/group_vars/promzone-elevate-festival/vars.yml @@ -13,7 +13,7 @@ prometheus_exporter_nut_version: 3.1.1 prometheus_exporter_chrony_version: 0.10.1 prometheus_exporter_chrony_disable_dns_lookups: yes -prometheus_server_version: 2.53.2 +prometheus_server_version: 2.54.1 prometheus_alertmanager_version: 0.27.0 prometheus_server: ele-calypso diff --git a/inventory/host_vars/ch-apps/vars.yml b/inventory/host_vars/ch-apps/vars.yml index a3f34992..57a7e485 100644 --- a/inventory/host_vars/ch-apps/vars.yml +++ b/inventory/host_vars/ch-apps/vars.yml @@ -141,7 +141,7 @@ kubelet_storage: quota: 10G 'syncoid:sync': 'false' -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 42 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ch-companion-raspi.yml b/inventory/host_vars/ch-companion-raspi.yml index d8134f27..d82f5b8e 100644 --- a/inventory/host_vars/ch-companion-raspi.yml +++ b/inventory/host_vars/ch-companion-raspi.yml @@ -29,7 +29,7 @@ docker_pkg_provider: docker-com docker_plugins: - buildx -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 42 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ch-dione.yml b/inventory/host_vars/ch-dione.yml index 8534d2fb..1782ceea 100644 --- a/inventory/host_vars/ch-dione.yml +++ b/inventory/host_vars/ch-dione.yml @@ -49,7 +49,7 @@ kubelet_storage: size: 5G fs: ext4 -# kubernetes_version: 1.30.3 +# kubernetes_version: 1.30.4 # kubernetes_container_runtime: docker # kubernetes_standalone_max_pods: 42 # kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ch-equinox-t450s.yml b/inventory/host_vars/ch-equinox-t450s.yml index b34d1ae6..869bcac8 100644 --- a/inventory/host_vars/ch-equinox-t450s.yml +++ b/inventory/host_vars/ch-equinox-t450s.yml @@ -262,6 +262,8 @@ ws_base_extra_packages: - texlive-lang-german - texlive-latex-extra - tlp + - tor + - tor-geoipdb - torbrowser-launcher - totem - unrar @@ -296,9 +298,6 @@ ws_base_extra_packages: - grype # needs apt-repo/ansible - ansible - ### needs apt-repo/tor-project -> https://gitlab.torproject.org/tpo/core/tor/-/issues/40946 - ##- tor - ##- tor-geoipdb # needs apt-repo/kubernetes - kubectl # needs apt-repo/element diff --git a/inventory/host_vars/ch-equinox-ws.yml b/inventory/host_vars/ch-equinox-ws.yml index 8c55d382..70faf7be 100644 --- a/inventory/host_vars/ch-equinox-ws.yml +++ b/inventory/host_vars/ch-equinox-ws.yml @@ -26,7 +26,7 @@ network: nameservers: "{{ network_zones.lan.dns }}" domain: "{{ host_domain }}" primary: &_network_primary_ - name: enp9s0 + name: enp11s0 address: "{{ network_zones.lan.prefix | ansible.utils.ipaddr(network_zones.lan.offsets[inventory_hostname]) }}" gateway: "{{ network_zones.lan.gateway }}" interfaces: @@ -264,6 +264,8 @@ ws_base_extra_packages: - texlive - texlive-lang-german - texlive-latex-extra + - tor + - tor-geoipdb - torbrowser-launcher - totem - unrar @@ -297,9 +299,6 @@ ws_base_extra_packages: - grype # needs apt-repo/ansible - ansible - ### needs apt-repo/tor-project -> https://gitlab.torproject.org/tpo/core/tor/-/issues/40946 - ##- tor - ##- tor-geoipdb # needs apt-repo/kubernetes - kubectl # needs apt-repo/element diff --git a/inventory/host_vars/ch-helene.yml b/inventory/host_vars/ch-helene.yml index 61d4f7ef..52b3a3f9 100644 --- a/inventory/host_vars/ch-helene.yml +++ b/inventory/host_vars/ch-helene.yml @@ -49,7 +49,7 @@ kubelet_storage: size: 5G fs: ext4 -# kubernetes_version: 1.30.3 +# kubernetes_version: 1.30.4 # kubernetes_container_runtime: docker # kubernetes_standalone_max_pods: 42 # kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ch-mimas.yml b/inventory/host_vars/ch-mimas.yml index bc09509d..d2ad251b 100644 --- a/inventory/host_vars/ch-mimas.yml +++ b/inventory/host_vars/ch-mimas.yml @@ -63,7 +63,7 @@ wireguard_p2p_peers: - pub_key: "9pUDet+les5aI9UnHHVgyw95hNBxlAX8DBCxTjigpEI=" endpoint: host: "{{ network_zones.magenta.prefix | ansible.utils.ipaddr(network_zones.magenta.offsets['ch-router']) | ansible.utils.ipaddr('address') }}" - port: 51820 + port: 5182 allowed_ips: - "{{ network_zones.remote.prefix }}" - "{{ network_zones.svc.prefix }}" diff --git a/inventory/host_vars/mz-ap.yml b/inventory/host_vars/ch-mz-ap.yml index 044f41f9..044f41f9 100644 --- a/inventory/host_vars/mz-ap.yml +++ b/inventory/host_vars/ch-mz-ap.yml diff --git a/inventory/host_vars/mz-router.yml b/inventory/host_vars/ch-mz-router.yml index 254aaf02..c798623b 100644 --- a/inventory/host_vars/mz-router.yml +++ b/inventory/host_vars/ch-mz-router.yml @@ -1,10 +1,4 @@ --- -## TOOD: -# After router upgrades run this command to generate a new dyndns ssh key -# $ dropbearkey -t ed25519 -f /etc/dyndns/id_ed25519 -# Then replace the key at the dyndns server (/var/lib/dyndns/.ssh/authorized_keys) -# after that run the dyndns update script manually to accept the ssh host-key - openwrt_arch: ath79 openwrt_target: generic openwrt_profile: tplink_tl-wdr4300-v1 diff --git a/inventory/host_vars/ch-pan.yml b/inventory/host_vars/ch-pan.yml index 29ec85ae..2b7fc39b 100644 --- a/inventory/host_vars/ch-pan.yml +++ b/inventory/host_vars/ch-pan.yml @@ -58,7 +58,7 @@ wireguard_p2p_peers: - pub_key: "9pUDet+les5aI9UnHHVgyw95hNBxlAX8DBCxTjigpEI=" endpoint: host: "{{ network_zones.magenta.prefix | ansible.utils.ipaddr(network_zones.magenta.offsets['ch-router']) | ansible.utils.ipaddr('address') }}" - port: 51820 + port: 5182 allowed_ips: - "{{ network_zones.remote.prefix }}" - "{{ network_zones.svc.prefix }}" @@ -88,7 +88,7 @@ dyndns: - "dyn.schaaas.at. 7200 IN AAAA 2a02:3e0:407::19" - "captive.schaaas.at. 7200 IN CNAME dyn.schaaas.at." clients: - mz-router: mzl + ch-mz-router: mzl ch-equinox-t450s: equinox ele-media: elemedia diff --git a/inventory/host_vars/ch-router.yml b/inventory/host_vars/ch-router.yml index 3f31bcbe..aaa46bb2 100644 --- a/inventory/host_vars/ch-router.yml +++ b/inventory/host_vars/ch-router.yml @@ -185,7 +185,7 @@ openwrt_mixin: ip protocol icmp accept ip6 nexthdr ipv6-icmp accept tcp dport { {{ ansible_port }} } accept - udp dport { openvpn, 51820 } accept + udp dport { openvpn, 5182 } accept } chain input_openvpn { @@ -368,7 +368,7 @@ openwrt_uci: options: proto: wireguard private_key: "{{ vault_wireguard_remote_private_key }}" - listen_port: 51820 + listen_port: 5182 addresses: - "{{ network_zones.remote.prefix | ansible.utils.ipaddr(network_zones.remote.offsets[inventory_hostname]) }}" nohostroute: 1 @@ -404,8 +404,8 @@ openwrt_uci: options: enabled: '1' interface: 'eth1' - download: '147000' - upload: '20000' + download: '510000' + upload: '72000' qdisc: 'cake' script: 'piece_of_cake.qos' qdisc_advanced: '0' diff --git a/inventory/host_vars/ele-calypso.yml b/inventory/host_vars/ele-calypso.yml index 90e92097..8da4c4af 100644 --- a/inventory/host_vars/ele-calypso.yml +++ b/inventory/host_vars/ele-calypso.yml @@ -74,7 +74,7 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 42 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ele-companion-raspi.yml b/inventory/host_vars/ele-companion-raspi.yml index 7d130639..b25acb27 100644 --- a/inventory/host_vars/ele-companion-raspi.yml +++ b/inventory/host_vars/ele-companion-raspi.yml @@ -29,7 +29,7 @@ docker_pkg_provider: docker-com docker_plugins: - buildx -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 42 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ele-coturn.yml b/inventory/host_vars/ele-coturn.yml index f966ac9e..1cbc2767 100644 --- a/inventory/host_vars/ele-coturn.yml +++ b/inventory/host_vars/ele-coturn.yml @@ -27,7 +27,7 @@ acme_directory_server: "{{ acme_directory_server_le_live_v2 }}" acme_client: acmetool -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 100 kubernetes_standalone_pod_cidr: 192.168.255.0/24 diff --git a/inventory/host_vars/ele-helene.yml b/inventory/host_vars/ele-helene.yml index 6eef576d..76f7978c 100644 --- a/inventory/host_vars/ele-helene.yml +++ b/inventory/host_vars/ele-helene.yml @@ -92,7 +92,7 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 42 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ele-jitsi.yml b/inventory/host_vars/ele-jitsi.yml index ea446019..4fe526c0 100644 --- a/inventory/host_vars/ele-jitsi.yml +++ b/inventory/host_vars/ele-jitsi.yml @@ -32,7 +32,7 @@ acme_directory_server: "{{ acme_directory_server_le_live_v2 }}" acme_client: acmetool -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 100 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ele-media.yml b/inventory/host_vars/ele-media.yml index e97e1671..d0fe5e2f 100644 --- a/inventory/host_vars/ele-media.yml +++ b/inventory/host_vars/ele-media.yml @@ -73,7 +73,7 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/ele-thetys.yml b/inventory/host_vars/ele-thetys.yml index 2911be57..8d00359e 100644 --- a/inventory/host_vars/ele-thetys.yml +++ b/inventory/host_vars/ele-thetys.yml @@ -77,7 +77,7 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 42 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/glt-jitsi.yml b/inventory/host_vars/glt-jitsi.yml index 2e36c347..69e51909 100644 --- a/inventory/host_vars/glt-jitsi.yml +++ b/inventory/host_vars/glt-jitsi.yml @@ -27,7 +27,7 @@ acme_directory_server: "{{ acme_directory_server_le_live_v2 }}" acme_client: acmetool -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 100 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/s2-thetys.yml b/inventory/host_vars/s2-thetys.yml index 5f2897c9..8f03e497 100644 --- a/inventory/host_vars/s2-thetys.yml +++ b/inventory/host_vars/s2-thetys.yml @@ -53,7 +53,7 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 42 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/host_vars/sk-cloudio/nextcloud.yml b/inventory/host_vars/sk-cloudio/nextcloud.yml index df947c89..3c5e5ae0 100644 --- a/inventory/host_vars/sk-cloudio/nextcloud.yml +++ b/inventory/host_vars/sk-cloudio/nextcloud.yml @@ -96,8 +96,6 @@ nextcloud_instances: # new: true version: 29.0.3 port: 8105 - hostnames: - - wolke.elev8.at storage: type: zfs parent: "{{ _nextcloud_zfs_base_ }}" @@ -110,3 +108,9 @@ nextcloud_instances: type: mariadb version: 10.11.5 password: "{{ vault_nextcloud_database_passwords['wolke.elev8.at'] }}" + publish: + zone: "{{ apps_publish_zone__sk_cloudio }}" + hostnames: + - wolke.elev8.at + tls: + certificate_provider: acmetool diff --git a/inventory/host_vars/sk-cloudio/vars.yml b/inventory/host_vars/sk-cloudio/vars.yml index 8c57def9..be136e82 100644 --- a/inventory/host_vars/sk-cloudio/vars.yml +++ b/inventory/host_vars/sk-cloudio/vars.yml @@ -84,6 +84,8 @@ zfs_sanoid_modules: docker_pkg_provider: docker-com +docker_plugins: + - buildx docker_storage: type: zfs diff --git a/inventory/host_vars/sk-testvm.yml b/inventory/host_vars/sk-testvm.yml index 0c45dfcb..d728464d 100644 --- a/inventory/host_vars/sk-testvm.yml +++ b/inventory/host_vars/sk-testvm.yml @@ -56,7 +56,7 @@ kubelet_storage: size: 1G fs: ext4 -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 100 kubernetes_standalone_pod_cidr: 192.168.255.0/24 diff --git a/inventory/host_vars/sk-tomnext-nc.yml b/inventory/host_vars/sk-tomnext-nc.yml index 4b6df0d7..01cf6e8c 100644 --- a/inventory/host_vars/sk-tomnext-nc.yml +++ b/inventory/host_vars/sk-tomnext-nc.yml @@ -88,7 +88,7 @@ kubelet_storage: properties: quota: 15G -kubernetes_version: 1.30.3 +kubernetes_version: 1.30.4 kubernetes_container_runtime: docker kubernetes_standalone_max_pods: 15 kubernetes_standalone_cni_variant: with-portmap diff --git a/inventory/hosts.ini b/inventory/hosts.ini index 60f41c10..f4c61e2a 100644 --- a/inventory/hosts.ini +++ b/inventory/hosts.ini @@ -71,8 +71,8 @@ chaos-at-home-sensors chaos-at-home-ups [chaos-at-home_mz] -mz-router ansible_host=chmz-router -mz-ap ansible_host=chmz-ap +ch-mz-router host_name=mz-router +ch-mz-ap host_name=mz-ap [chaos-at-home_mur-at] ch-atlas host_name=atlas @@ -328,8 +328,8 @@ ch-testvm-openwrt ch-installsmb ch-gw-c3voc ch-raspi-openwrt -mz-ap -mz-router +ch-mz-ap +ch-mz-router ele-router-hmtsaal ele-router-orpheum ele-router-emc diff --git a/roles/apps/nextcloud/base/defaults/main.yml b/roles/apps/nextcloud/base/defaults/main.yml new file mode 100644 index 00000000..1a8a6d52 --- /dev/null +++ b/roles/apps/nextcloud/base/defaults/main.yml @@ -0,0 +1,9 @@ +--- +nextcloud_app_uid: "950" +nextcloud_app_gid: "950" + +nextcloud_db_uid: "951" +nextcloud_db_gid: "951" + +nextcloud_redis_uid: "952" +nextcloud_redis_gid: "952" diff --git a/roles/apps/nextcloud/base/tasks/main.yml b/roles/apps/nextcloud/base/tasks/main.yml new file mode 100644 index 00000000..4c85a35c --- /dev/null +++ b/roles/apps/nextcloud/base/tasks/main.yml @@ -0,0 +1,51 @@ +--- +- name: add group for nextcloud app + group: + name: nc-app + gid: "{{ nextcloud_app_gid }}" + +- name: add user for nextcloud app + user: + name: nc-app + uid: "{{ nextcloud_app_uid }}" + group: nc-app + password: "!" + +- name: add group for nextcloud db + group: + name: nc-db + gid: "{{ nextcloud_db_gid }}" + +- name: add user for nextcloud db + user: + name: nc-db + uid: "{{ nextcloud_db_uid }}" + group: nc-db + password: "!" + +- name: add group for nextcloud redis + group: + name: nc-redis + gid: "{{ nextcloud_redis_gid }}" + +- name: add user for nextcloud redis + user: + name: nc-redis + uid: "{{ nextcloud_redis_uid }}" + group: nc-redis + password: "!" + +- name: install template systemd unit for cron trigger + template: + src: cron@.service.j2 + dest: /etc/systemd/system/nextcloud-cron@.service + +- name: install management scripts + loop: + - nextcloud-upgrade + - nextcloud-occ + - nextcloud-cron + template: + src: "{{ item }}.j2" + dest: "/usr/local/bin/{{ item }}" + mode: 0755 diff --git a/roles/apps/nextcloud/templates/cron@.service.j2 b/roles/apps/nextcloud/base/templates/cron@.service.j2 index d8cde0a3..d8cde0a3 100644 --- a/roles/apps/nextcloud/templates/cron@.service.j2 +++ b/roles/apps/nextcloud/base/templates/cron@.service.j2 diff --git a/roles/apps/nextcloud/templates/nextcloud-cron.j2 b/roles/apps/nextcloud/base/templates/nextcloud-cron.j2 index 355ae2c3..cf1d9715 100755 --- a/roles/apps/nextcloud/templates/nextcloud-cron.j2 +++ b/roles/apps/nextcloud/base/templates/nextcloud-cron.j2 @@ -16,4 +16,4 @@ if [ -z "$pod_id" ]; then echo "Pod not found"; exit 1; fi container_id=$(crictl ps -q --name '^nextcloud$' -p "$pod_id") if [ -z "$container_id" ]; then echo "Container not found"; exit 1; fi -exec crictl exec "$container_id" php -f /var/www/html/cron.php +exec crictl exec "$container_id" bash -c 'php -f /var/www/html/occ status -e; if [ $? -eq 0 ]; then php -f /var/www/html/cron.php; else echo "not running cron script when in maintenance mode"; fi' diff --git a/roles/apps/nextcloud/templates/nextcloud-occ.j2 b/roles/apps/nextcloud/base/templates/nextcloud-occ.j2 index f12f1259..01383c95 100755 --- a/roles/apps/nextcloud/templates/nextcloud-occ.j2 +++ b/roles/apps/nextcloud/base/templates/nextcloud-occ.j2 @@ -16,4 +16,4 @@ if [ -z "$pod_id" ]; then echo "Pod not found"; exit 1; fi container_id=$(crictl ps -q --name '^nextcloud$' -p "$pod_id") if [ -z "$container_id" ]; then echo "Container not found"; exit 1; fi -exec crictl exec -it "$container_id" php /var/www/html/occ $@ +exec crictl exec -it "$container_id" php -f /var/www/html/occ $@ diff --git a/roles/apps/nextcloud/templates/nextcloud-upgrade.j2 b/roles/apps/nextcloud/base/templates/nextcloud-upgrade.j2 index ffa912e8..f6edcb44 100755 --- a/roles/apps/nextcloud/templates/nextcloud-upgrade.j2 +++ b/roles/apps/nextcloud/base/templates/nextcloud-upgrade.j2 @@ -9,6 +9,13 @@ fi set -eu +CURRENT_VERSION=$(nextcloud-occ "$INST_NAME" status -n --no-warnings --output plain | tr -d '\r' | awk -F : '/versionstring/ { print($2) }' | tr -d ' ') +if [ "$CURRENT_VERSION" = "$VERSION" ]; then + echo "The current running version of nextcloud is already $CURRENT_VERSION, nothing to do here." + exit 0 +fi +echo "will upgrade nextcloud instance $INST_NAME from '$CURRENT_VERSION' to '$VERSION'" + K8S_CONFIG_HASH_D="/etc/kubernetes/config-hashes/" K8S_CONFIG_HASH_FILE="$K8S_CONFIG_HASH_D/nextcloud-$INST_NAME.yml" K8S_MANIFEST_D="/etc/kubernetes/manifests/" @@ -41,16 +48,8 @@ else echo "" fi -STORAGE_TYPE=$(findmnt -no fstype -T "$IMAGE_BUILD_D") -if [ $STORAGE_TYPE == "zfs" ]; then - echo "*** creating ZFS snapshot" - echo "" - - IMAGE_NAME_ESCAPED=${IMAGE_NAME/\//\\/} - CURRENT_VERSION=$(cat "$K8S_MANIFEST_FILE" | awk '/image: "'"$IMAGE_NAME_ESCAPED"':.*"/ { print($2) }' | tr -d '"' | cut -d ':' -f 2) - ZFS_VOLUME=$(findmnt -no source -T "$IMAGE_BUILD_D") - zfs snapshot "$ZFS_VOLUME@upgrade_$CURRENT_VERSION-to-$VERSION""_$(date '+%Y-%m-%m_%H:%M:%S')" -fi +INSTANCE_BASE_D=$(dirname "$IMAGE_BUILD_D") +"$INSTANCE_BASE_D/upgrade.sh" prepare "$CURRENT_VERSION" "$VERSION" echo "*** Rebuilding config-hash file" echo "" @@ -70,4 +69,6 @@ cat "$TMP_D/config-hash.yml" > "$K8S_CONFIG_HASH_FILE" cat "$TMP_D/manifest.yml" > "$K8S_MANIFEST_FILE" echo "" +"$INSTANCE_BASE_D/upgrade.sh" finalize "$CURRENT_VERSION" "$VERSION" + exit 0 diff --git a/roles/apps/nextcloud/defaults/main.yml b/roles/apps/nextcloud/defaults/main.yml index ac87de94..631b0a0a 100644 --- a/roles/apps/nextcloud/defaults/main.yml +++ b/roles/apps/nextcloud/defaults/main.yml @@ -1,21 +1,9 @@ --- -nextcloud_app_uid: "950" -nextcloud_app_gid: "950" - -nextcloud_db_uid: "951" -nextcloud_db_gid: "951" - -nextcloud_redis_uid: "952" -nextcloud_redis_gid: "952" - # nextcloud_instances: # example: # new: yes # version: 17.0.0 # port: 8100 -# hostnames: -# - wolke.example.com -# - cloud.example.com # storage: # type: ... # database: @@ -26,3 +14,10 @@ nextcloud_redis_gid: "952" # from: foo/bar:1.0 # optional # dockerfile: | # RUN apt-get install ... +# publish: +# zone: "{{ apps_publish_zone__foo }}" +# hostnames: +# - wolke.example.com +# - cloud.example.com +# tls: +# certificate_provider: ... diff --git a/roles/apps/nextcloud/instance/tasks/main.yml b/roles/apps/nextcloud/instance/tasks/main.yml index 373aa0a8..71a3ee79 100644 --- a/roles/apps/nextcloud/instance/tasks/main.yml +++ b/roles/apps/nextcloud/instance/tasks/main.yml @@ -47,6 +47,43 @@ dest: "{{ nextcloud_instance_basepath }}/config/ports.conf" +- name: create tls directory + file: + path: "{{ nextcloud_instance_basepath }}/tls" + owner: "{{ nextcloud_app_uid }}" + group: "{{ nextcloud_app_gid }}" + mode: 0500 + state: directory + +- name: generate/install TLS certificates for publishment + vars: + x509_certificate_name: "nextcloud-{{ nextcloud_instance }}_publish" + x509_certificate_hostnames: [] + x509_certificate_config: + ca: "{{ nextcloud_instances[nextcloud_instance].publish.zone.certificate_ca_config }}" + cert: + common_name: "nextcloud-{{ nextcloud_instance }}.{{ inventory_hostname }}" + extended_key_usage: + - serverAuth + extended_key_usage_critical: yes + create_subject_key_identifier: yes + not_after: +100w + x509_certificate_renewal: + install: + - dest: "{{ nextcloud_instance_basepath }}/tls/cert.pem" + src: + - cert + owner: "{{ nextcloud_app_uid }}" + mode: "0400" + - dest: "{{ nextcloud_instance_basepath }}/tls/key.pem" + src: + - key + owner: "{{ nextcloud_app_uid }}" + mode: "0400" + include_role: + name: "x509/{{ nextcloud_instances[nextcloud_instance].publish.zone.certificate_provider }}/cert" + + - name: build custom image # when: "'custom_image' in nextcloud_instances[nextcloud_instance]" include_tasks: custom-image.yml @@ -71,6 +108,13 @@ name: kubernetes/standalone/pod +- name: install upgrade helper script + template: + src: upgrade.sh.j2 + dest: "{{ nextcloud_instance_basepath }}/upgrade.sh" + mode: 0755 + + - name: install systemd timer unit template: src: cron-.timer.j2 @@ -84,29 +128,44 @@ enabled: yes -- name: configure nginx vhost +- name: configure nginx vhost for publishment vars: - nginx_vhost: - name: "nextcloud-{{ nextcloud_instance }}" + nginx_vhost__yaml: | + name: "nextcloud-{{ nextcloud_instance }}.{{ inventory_hostname }}" template: generic + {% if 'tls' in nextcloud_instances[nextcloud_instance].publish %} tls: - certificate_provider: "{{ acme_client }}" - hostnames: "{{ nextcloud_instances[nextcloud_instance].hostnames }}" + {{ nextcloud_instances[nextcloud_instance].publish.tls | to_nice_yaml(indent=2) | indent(2) }} + {% endif %} + hostnames: + {% for hostname in nextcloud_instances[nextcloud_instance].publish.hostnames %} + - {{ hostname }} + {% endfor %} locations: '/': - proxy_pass: "http://127.0.0.1:{{ nextcloud_instances[nextcloud_instance].port }}" + {% if nextcloud_instances[nextcloud_instance].publish.zone.publisher == inventory_hostname %} + proxy_pass: "https://127.0.0.1:{{ nextcloud_instances[nextcloud_instance].port }}" + {% else %} + proxy_pass: "https://{{ ansible_default_ipv4.address }}:{{ nextcloud_instances[nextcloud_instance].port }}" + {% endif %} proxy_redirect: - - redirect: "http://$host/" - replacement: "https://$host/" - - redirect: "http://$host:8080/" + - redirect: "https://$host:8080/" replacement: "https://$host/" + proxy_ssl: + trusted_certificate: "/etc/ssl/apps-publish-{{ nextcloud_instances[nextcloud_instance].publish.zone.name }}/apps-publish-{{ nextcloud_instances[nextcloud_instance].publish.zone.name }}-ca-crt.pem" + verify: "on" + name: "nextcloud-{{ nextcloud_instance }}.{{ inventory_hostname }}" + protocols: "TLSv1.3" extra_directives: |- client_max_body_size 0; types { text/javascript js mjs; } + nginx_vhost: "{{ nginx_vhost__yaml | from_yaml }}" include_role: name: nginx/vhost + apply: + delegate_to: "{{ nextcloud_instances[nextcloud_instance].publish.zone.publisher }}" # TODO: @@ -118,7 +177,7 @@ prompt: | ************* {{ nextcloud_instance }} is a new instance ** - ** Go to https://{{ nextcloud_instances[nextcloud_instance].hostnames[0] }} and finalize the + ** Go to https://{{ nextcloud_instances[nextcloud_instance].publish.hostnames[0] }} and finalize the ** installation. After that run the following commands: ** ** $ nextcloud-occ {{ nextcloud_instance }} config:system:set default_phone_region --value='at' diff --git a/roles/apps/nextcloud/instance/templates/apache-site.conf.j2 b/roles/apps/nextcloud/instance/templates/apache-site.conf.j2 index a52a7fc5..8df06113 100644 --- a/roles/apps/nextcloud/instance/templates/apache-site.conf.j2 +++ b/roles/apps/nextcloud/instance/templates/apache-site.conf.j2 @@ -1,3 +1,7 @@ +IncludeOptional mods-available/socache_shmcb.load +IncludeOptional mods-available/ssl.load +IncludeOptional mods-available/ssl.conf + <VirtualHost *:8080> ServerAdmin webmaster@localhost DocumentRoot /var/www/html @@ -5,6 +9,12 @@ UseCanonicalName Off UseCanonicalPhysicalPort Off + ServerName nextcloud-{{ nextcloud_instance }}.{{ inventory_hostname }} + SSLEngine on + SSLCertificateFile "/etc/ssl/publish/cert.pem" + SSLCertificateKeyFile "/etc/ssl/publish/key.pem" + SSLProtocol TLSv1.3 + ErrorLog ${APACHE_LOG_DIR}/error.log CustomLog ${APACHE_LOG_DIR}/access.log combined </VirtualHost> diff --git a/roles/apps/nextcloud/instance/templates/pod-spec-with-mariadb.yml.j2 b/roles/apps/nextcloud/instance/templates/pod-spec-with-mariadb.yml.j2 index 42d76757..c1a4f2ea 100644 --- a/roles/apps/nextcloud/instance/templates/pod-spec-with-mariadb.yml.j2 +++ b/roles/apps/nextcloud/instance/templates/pod-spec-with-mariadb.yml.j2 @@ -13,9 +13,7 @@ containers: {% if 'new' in nextcloud_instances[nextcloud_instance] and nextcloud_instances[nextcloud_instance].new %} env: - name: NEXTCLOUD_TRUSTED_DOMAINS - value: "{{ nextcloud_instances[nextcloud_instance].hostnames | join(' ') }}" - - name: OVERWRITEPROTOCOL - value: "https" + value: "{{ nextcloud_instances[nextcloud_instance].publish.hostnames | join(' ') }}" - name: MYSQL_HOST value: 127.0.0.1 - name: MYSQL_DATABASE @@ -36,6 +34,9 @@ containers: mountPath: /etc/apache2/ports.conf subPath: ports.conf readOnly: true + - name: tls + mountPath: /etc/ssl/publish + readOnly: true ports: - containerPort: 8080 hostPort: {{ nextcloud_instances[nextcloud_instance].port }} @@ -91,6 +92,10 @@ volumes: hostPath: path: "{{ nextcloud_instance_basepath }}/config/" type: Directory +- name: tls + hostPath: + path: "{{ nextcloud_instance_basepath }}/tls/" + type: Directory - name: nextcloud hostPath: path: "{{ nextcloud_instance_basepath }}/nextcloud" diff --git a/roles/apps/nextcloud/instance/templates/upgrade.sh.j2 b/roles/apps/nextcloud/instance/templates/upgrade.sh.j2 new file mode 100644 index 00000000..62f6641e --- /dev/null +++ b/roles/apps/nextcloud/instance/templates/upgrade.sh.j2 @@ -0,0 +1,77 @@ +#!/bin/bash + +set -e + +if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then + echo "Usage: $0 (preapre|finalize) <old-version> <new-version>" + exit 1 +fi + +COMMAND="$1" +OLD_VERSION="$2" +NEW_VERSION="$3" +POD_NAME="{{ nextcloud_instance }}-$(hostname)" + +maintenance_mode() { + POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q) + CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q) + crictl exec "$CONTAINER_ID" php -f /var/www/html/occ maintenance:mode "$1" +} + +wait_for_cronjobs() { + POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q) + CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q) + crictl exec "$CONTAINER_ID" bash -c 'echo -n "waiting for running cron script "; while [ -n "$(pgrep -a php | grep cron.php)" ]; do echo -n "."; sleep 1; done; echo ""' +} + +wait_for_upgrade_complete() { + NEW_VERSION="$1" + + set +e + echo -n "waiting for new version to be ready " + while true; do + POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q) + if [ -z $POD_ID ]; then continue; fi + CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q) + if [ -z $CONTAINER_ID ]; then continue; fi + STATUS_OUTPUT=$(crictl exec "$CONTAINER_ID" php -f /var/www/html/occ status -n --no-warnings --output plain) + if [ $? -eq 0 ]; then + RUNNING_VERSION=$(echo "$STATUS_OUTPUT" | awk -F : '/versionstring/ { print($2) }' | tr -d ' ') + if [ "$RUNNING_VERSION" = "$NEW_VERSION" ]; then + break + fi + echo -n "." + fi + sleep 1 + done + echo "" + set -e + crictl exec "$CONTAINER_ID" bash -c 'echo -n "waiting for apache to start "; while [ -z "$(pgrep apache2)" ]; do echo -n "."; sleep 1; done; echo ""' +} + +storage_snapshot() { + OLD_VERSION="$1" + NEW_VERSION="$2" + +{% if nextcloud_instances[nextcloud_instance].storage.type == 'zfs' %} + ZFS_VOLUME=$(findmnt -no source -T "{{ nextcloud_instance_basepath }}") + echo "creating snapshot for zfs volume: $ZFS_VOLUME" + zfs snapshot "$ZFS_VOLUME@upgrade_$OLD_VERSION-to-$NEW_VERSION""_$(date '+%Y-%m-%m_%H:%M:%S')" +{% endif %} +} + +case "$COMMAND" in + prepare) + maintenance_mode --on + wait_for_cronjobs + storage_snapshot "$OLD_VERSION" "$NEW_VERSION" + ;; + finalize) + wait_for_upgrade_complete "$NEW_VERSION" + maintenance_mode --off + ;; + *) + echo "unknown command: $COMMAND, must be prepare or finalize" + exit 1 + ;; +esac diff --git a/roles/apps/nextcloud/meta/main.yml b/roles/apps/nextcloud/meta/main.yml new file mode 100644 index 00000000..c00c47ce --- /dev/null +++ b/roles/apps/nextcloud/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: apps/nextcloud/base diff --git a/roles/apps/nextcloud/tasks/main.yml b/roles/apps/nextcloud/tasks/main.yml index 69bbba6a..6e81f351 100644 --- a/roles/apps/nextcloud/tasks/main.yml +++ b/roles/apps/nextcloud/tasks/main.yml @@ -1,55 +1,4 @@ --- -- name: add group for nextcloud app - group: - name: nc-app - gid: "{{ nextcloud_app_gid }}" - -- name: add user for nextcloud app - user: - name: nc-app - uid: "{{ nextcloud_app_uid }}" - group: nc-app - password: "!" - -- name: add group for nextcloud db - group: - name: nc-db - gid: "{{ nextcloud_db_gid }}" - -- name: add user for nextcloud db - user: - name: nc-db - uid: "{{ nextcloud_db_uid }}" - group: nc-db - password: "!" - -- name: add group for nextcloud redis - group: - name: nc-redis - gid: "{{ nextcloud_redis_gid }}" - -- name: add user for nextcloud redis - user: - name: nc-redis - uid: "{{ nextcloud_redis_uid }}" - group: nc-redis - password: "!" - -- name: install template systemd unit for cron trigger - template: - src: cron@.service.j2 - dest: /etc/systemd/system/nextcloud-cron@.service - -- name: install management scripts - loop: - - nextcloud-upgrade - - nextcloud-occ - - nextcloud-cron - template: - src: "{{ item }}.j2" - dest: "/usr/local/bin/{{ item }}" - mode: 0755 - - name: instance specific tasks loop: "{{ nextcloud_instances | list }}" loop_control: diff --git a/roles/core/base/tasks/Debian.yml b/roles/core/base/tasks/Debian.yml index 4d2abc17..43f1876e 100644 --- a/roles/core/base/tasks/Debian.yml +++ b/roles/core/base/tasks/Debian.yml @@ -156,6 +156,15 @@ state: "{{ base_enable_fstrim | ternary('started', 'stopped') }}" enabled: "{{ base_enable_fstrim }}" +- name: configure timezone + timezone: + name: "{{ base_timezone }}" + register: etc_localtime + +- name: make sure legacy /etc/timezone is in sync with /etc/localtime + when: etc_localtime is changed + command: dpkg-reconfigure -f noninteractive tzdata + - name: remove cloud-init bullshit loop: - /var/log/cloud-init.log diff --git a/roles/core/base/tasks/OpenBSD.yml b/roles/core/base/tasks/OpenBSD.yml index fbc06379..7ff4f324 100644 --- a/roles/core/base/tasks/OpenBSD.yml +++ b/roles/core/base/tasks/OpenBSD.yml @@ -13,3 +13,7 @@ openbsd_pkg: name: "{{ base_packages_extra }}" state: present + +- name: configure timezone + timezone: + name: "{{ base_timezone }}" diff --git a/roles/core/base/tasks/main.yml b/roles/core/base/tasks/main.yml index fe4a396c..a70bc7ad 100644 --- a/roles/core/base/tasks/main.yml +++ b/roles/core/base/tasks/main.yml @@ -34,7 +34,3 @@ copy: src: "{{ global_files_dir }}/common/htoprc" dest: "{{ item }}/.config/htop/" - -- name: configure timezone - timezone: - name: "{{ base_timezone }}" diff --git a/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 b/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 index 85742836..8c007ee9 100644 --- a/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 +++ b/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 @@ -1,65 +1,65 @@ services: vulnerability-tests: - image: greenbone/vulnerability-tests + image: registry.community.greenbone.net/community/vulnerability-tests environment: STORAGE_PATH: /var/lib/openvas/22.04/vt-data/nasl volumes: - vt_data_vol:/mnt notus-data: - image: greenbone/notus-data + image: registry.community.greenbone.net/community/notus-data volumes: - notus_data_vol:/mnt scap-data: - image: greenbone/scap-data + image: registry.community.greenbone.net/community/scap-data volumes: - scap_data_vol:/mnt cert-bund-data: - image: greenbone/cert-bund-data + image: registry.community.greenbone.net/community/cert-bund-data volumes: - cert_data_vol:/mnt dfn-cert-data: - image: greenbone/dfn-cert-data + image: registry.community.greenbone.net/community/dfn-cert-data volumes: - cert_data_vol:/mnt depends_on: - cert-bund-data data-objects: - image: greenbone/data-objects + image: registry.community.greenbone.net/community/data-objects volumes: - data_objects_vol:/mnt report-formats: - image: greenbone/report-formats + image: registry.community.greenbone.net/community/report-formats volumes: - data_objects_vol:/mnt depends_on: - data-objects gpg-data: - image: greenbone/gpg-data + image: registry.community.greenbone.net/community/gpg-data volumes: - gpg_data_vol:/mnt redis-server: - image: greenbone/redis-server + image: registry.community.greenbone.net/community/redis-server restart: on-failure volumes: - redis_socket_vol:/run/redis/ pg-gvm: - image: greenbone/pg-gvm:stable + image: registry.community.greenbone.net/community/pg-gvm:stable restart: on-failure volumes: - psql_data_vol:/var/lib/postgresql - psql_socket_vol:/var/run/postgresql gvmd: - image: greenbone/gvmd:stable + image: registry.community.greenbone.net/community/gvmd:stable restart: on-failure volumes: - gvmd_data_vol:/var/lib/gvm @@ -86,7 +86,7 @@ services: condition: service_completed_successfully gsa: - image: greenbone/gsa:stable + image: registry.community.greenbone.net/community/gsa:stable restart: on-failure ports: - 127.0.0.1:9392:80 @@ -94,9 +94,73 @@ services: - gvmd_socket_vol:/run/gvmd depends_on: - gvmd + # Sets log level of openvas to the set LOG_LEVEL within the env + # and changes log output to /var/log/openvas instead /var/log/gvm + # to reduce likelyhood of unwanted log interferences + configure-openvas: + image: registry.community.greenbone.net/community/openvas-scanner:stable + volumes: + - openvas_data_vol:/mnt + - openvas_log_data_vol:/var/log/openvas + command: + - /bin/sh + - -c + - | + printf "table_driven_lsc = yes\nopenvasd_server = http://openvasd:80\n" > /mnt/openvas.conf + sed "s/level=.*/level=INFO/" /etc/openvas/openvas_log.conf | sed 's/gvm/openvas/' > /mnt/openvas_log.conf + chmod 644 /mnt/openvas.conf + chmod 644 /mnt/openvas_log.conf + touch /var/log/openvas/openvas.log + chmod 666 /var/log/openvas/openvas.log + + # shows logs of openvas + openvas: + image: registry.community.greenbone.net/community/openvas-scanner:stable + restart: on-failure + volumes: + - openvas_data_vol:/etc/openvas + - openvas_log_data_vol:/var/log/openvas + command: + - /bin/sh + - -c + - | + cat /etc/openvas/openvas.conf + tail -f /var/log/openvas/openvas.log + depends_on: + configure-openvas: + condition: service_completed_successfully + + openvasd: + image: registry.community.greenbone.net/community/openvas-scanner:stable + restart: on-failure + environment: + # `service_notus` is set to disable everything but notus, + # if you want to utilize openvasd directly removed `OPENVASD_MODE` + OPENVASD_MODE: service_notus + GNUPGHOME: /etc/openvas/gnupg + LISTENING: 0.0.0.0:80 + volumes: + - openvas_data_vol:/etc/openvas + - openvas_log_data_vol:/var/log/openvas + - gpg_data_vol:/etc/openvas/gnupg + - notus_data_vol:/var/lib/notus + # enable port forwarding when you want to use the http api from your host machine + # ports: + # - 127.0.0.1:3000:80 + depends_on: + vulnerability-tests: + condition: service_completed_successfully + configure-openvas: + condition: service_completed_successfully + gpg-data: + condition: service_completed_successfully + networks: + default: + aliases: + - openvasd ospd-openvas: - image: greenbone/ospd-openvas:stable + image: registry.community.greenbone.net/community/ospd-openvas:stable restart: on-failure hostname: ospd-openvas.local cap_add: @@ -111,8 +175,6 @@ services: "-f", "--config", "/etc/gvm/ospd-openvas.conf", - "--mqtt-broker-address", - "mqtt-broker", "--notus-feed-dir", "/var/lib/notus/advisories", "-m", @@ -124,6 +186,8 @@ services: - notus_data_vol:/var/lib/notus - ospd_openvas_socket_vol:/run/ospd - redis_socket_vol:/run/redis/ + - openvas_data_vol:/etc/openvas/ + - openvas_log_data_vol:/var/log/openvas depends_on: redis-server: condition: service_started @@ -131,32 +195,11 @@ services: condition: service_completed_successfully vulnerability-tests: condition: service_completed_successfully - - mqtt-broker: - restart: on-failure - image: greenbone/mqtt-broker - networks: - default: - aliases: - - mqtt-broker - - broker - - notus-scanner: - restart: on-failure - image: greenbone/notus-scanner:stable - volumes: - - notus_data_vol:/var/lib/notus - - gpg_data_vol:/etc/openvas/gnupg - environment: - NOTUS_SCANNER_MQTT_BROKER_ADDRESS: mqtt-broker - NOTUS_SCANNER_PRODUCTS_DIRECTORY: /var/lib/notus/products - depends_on: - - mqtt-broker - - gpg-data - - vulnerability-tests + configure-openvas: + condition: service_completed_successfully gvm-tools: - image: greenbone/gvm-tools + image: registry.community.greenbone.net/community/gvm-tools volumes: - gvmd_socket_vol:/run/gvmd - ospd_openvas_socket_vol:/run/ospd @@ -177,3 +220,5 @@ volumes: gvmd_socket_vol: ospd_openvas_socket_vol: redis_socket_vol: + openvas_data_vol: + openvas_log_data_vol: diff --git a/roles/installer/debian/preseed/defaults/main.yml b/roles/installer/debian/preseed/defaults/main.yml index 1a8b5b67..ffc9521c 100644 --- a/roles/installer/debian/preseed/defaults/main.yml +++ b/roles/installer/debian/preseed/defaults/main.yml @@ -10,7 +10,7 @@ debian_preseed_locales: debian_preseed_keyboard_layout: de debian_preseed_keyboard_variant: nodeadkeys -debian_preseed_timezone: Europe/Vienna +debian_preseed_timezone: "{{ base_timezone | default('Europe/Vienna') }}" # debian_preseed_force_net_ifnames_policy: path debian_preseed_no_netplan: no diff --git a/roles/installer/openbsd/autoinstall/defaults/main.yml b/roles/installer/openbsd/autoinstall/defaults/main.yml index b166c191..4d8fd865 100644 --- a/roles/installer/openbsd/autoinstall/defaults/main.yml +++ b/roles/installer/openbsd/autoinstall/defaults/main.yml @@ -14,3 +14,5 @@ obsd_autoinstall_file_sets: #- xfont #- xserv #- xshare + +obsd_autoinstall_timezone: "{{ base_timezone | default('Europe/Vienna') }}" diff --git a/roles/installer/openbsd/autoinstall/templates/auto_install.conf.j2 b/roles/installer/openbsd/autoinstall/templates/auto_install.conf.j2 index 8c28d6e6..10fa17be 100644 --- a/roles/installer/openbsd/autoinstall/templates/auto_install.conf.j2 +++ b/roles/installer/openbsd/autoinstall/templates/auto_install.conf.j2 @@ -23,7 +23,7 @@ Setup a user = no Start sshd(8) by default = yes Allow root ssh login = prohibit-password -What timezone are you in = Europe/Vienna +What timezone are you in = {{ obsd_autoinstall_timezone }} Location of sets = cd0 Pathname to the sets = / diff --git a/roles/installer/raspios/image/defaults/main.yml b/roles/installer/raspios/image/defaults/main.yml index 3f6ab3a3..35a76e38 100644 --- a/roles/installer/raspios/image/defaults/main.yml +++ b/roles/installer/raspios/image/defaults/main.yml @@ -13,5 +13,5 @@ raspios_keep_boot_dir_mounted: no # line: 'dtparam=i2c_vc=on' raspios_locale: en_US.UTF-8 -raspios_timezone: Europe/Vienna +raspios_timezone: "{{ base_timezone | default('Europe/Vienna') }}" raspios_keyboard_layout: de diff --git a/roles/installer/ubuntu/autoinstall/defaults/main.yml b/roles/installer/ubuntu/autoinstall/defaults/main.yml index 50a35ae1..16d08a19 100644 --- a/roles/installer/ubuntu/autoinstall/defaults/main.yml +++ b/roles/installer/ubuntu/autoinstall/defaults/main.yml @@ -6,7 +6,7 @@ ubuntu_autoinstall_locale: en_US ubuntu_autoinstall_keyboard_layout: de ubuntu_autoinstall_keyboard_variant: nodeadkeys -ubuntu_autoinstall_timezone: Europe/Vienna +ubuntu_autoinstall_timezone: "{{ base_timezone | default('Europe/Vienna') }}" # ubuntu_autoinstall_kernel_image: ubuntu_autoinstall_virtual_machine: no diff --git a/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 b/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 index 1de70b70..24cce9b7 100644 --- a/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 +++ b/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 @@ -237,6 +237,9 @@ autoinstall: dpkg -l | grep "^rc" | awk "{ print(\$2) }" | xargs -r dpkg -P sed '/^PasswordAuthentication /d' -i '/etc/ssh/sshd_config' rm -f '/etc/ssh/sshd_config.d/50-cloud-init.conf' + swapoff -a + sed -e '/^\/swapfile/d' -e '/^\/swap\.img/d' -i /etc/fstab + rm -f /swapfile /swap.img rm -f /root/post-cleanup.sh {% if ubuntu_autoinstall_poweroff_when_done %} poweroff @@ -262,7 +265,6 @@ autoinstall: {% endfor %} late-commands: - - curtin in-target --target=/target -- swapoff -a; sed -e '/^\/swapfile/d' -e '/^\/swap\.img/d' -i /etc/fstab; rm -f /swapfile /swap.img {% if ansible_port is defined %} - curtin in-target --target=/target -- sed -e 's/^\s*#*\s*Port\s\s*[0-9][0-9]*$/Port {{ ansible_port }}/' -i /etc/ssh/sshd_config - curtin in-target --target=/target -- bash -c "mkdir -p /etc/systemd/system/ssh.socket.d; echo -e '[Socket]\nListenStream=\nListenStream={{ ansible_port }}' > /etc/systemd/system/ssh.socket.d/port.conf" @@ -272,7 +274,7 @@ autoinstall: - curtin in-target --target=/target -- apt-get -y -q purge systemd-oomd {% endif %} {% if (install_codename | ubuntu_release_compare('>=', 'noble')) %} - - curtin in-target --target=/target -- apt-get -y -q purge ubuntu-kernel-accessories ubuntu-pro-client lxd-installer + - curtin in-target --target=/target -- apt-get -y -q purge ubuntu-kernel-accessories ubuntu-pro-client lxd-installer unminimize {% endif %} {% if ubuntu_autoinstall_desktop is undefined %} - curtin in-target --target=/target -- apt-mark manual iputils-ping isc-dhcp-client netcat-openbsd netplan.io sudo diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml index 6d114b87..252eb7e6 100644 --- a/roles/kubernetes/base/tasks/cri_docker.yml +++ b/roles/kubernetes/base/tasks/cri_docker.yml @@ -24,6 +24,7 @@ - "native.cgroupdriver=systemd" bridge: "none" iptables: false + ip6tables: false log-driver: "json-file" log-opts: max-size: "10M" diff --git a/roles/monitoring/prometheus/server/defaults/main/main.yml b/roles/monitoring/prometheus/server/defaults/main/main.yml index 25cffa5b..dd290e9e 100644 --- a/roles/monitoring/prometheus/server/defaults/main/main.yml +++ b/roles/monitoring/prometheus/server/defaults/main/main.yml @@ -75,5 +75,27 @@ prometheus_server_web_listen_address: 127.0.0.1:9090 # - node # - blackbox +prometheus_server_remote_write_receiver: no + +# prometheus_server_remote_write_destinations: +# example: +# url: "https://mon.example.com/prometheus/api/v1/write" +# basic_auth: +# username: remote +# password_file: /etc/prometheus/prometheus-remote.secret +# tls_config: +# ca: | +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# write_relabel_configs: +# - source_labels: ['__name__'] +# regex: 'go_gc_.*' +# action: 'drop' +# - source_labels: ['job'] +# regex: 'alertmanager' +# action: 'drop' + # prometheus_server_secret_files: # user: secret +# remote: othersecret diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml index 4db6cd17..5cb27264 100644 --- a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml +++ b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml @@ -93,13 +93,13 @@ prometheus_server_rules_prometheus: description: "Prometheus has no target in service discovery\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: PrometheusTargetScrapingSlow - expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60 + expr: prometheus_target_interval_length_seconds{quantile="0.9"} / on (interval, instance, job) prometheus_target_interval_length_seconds{quantile="0.5"} > 1.05 for: 5m labels: severity: warning annotations: summary: Prometheus target scraping slow (instance {{ '{{' }} $labels.instance {{ '}}' }}) - description: "Prometheus is scraping exporters slowly\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + description: "Prometheus is scraping exporters slowly since it exceeded the requested interval time\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: PrometheusLargeScrape expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10 diff --git a/roles/monitoring/prometheus/server/templates/prometheus.service.j2 b/roles/monitoring/prometheus/server/templates/prometheus.service.j2 index e65e9425..86c30cbd 100644 --- a/roles/monitoring/prometheus/server/templates/prometheus.service.j2 +++ b/roles/monitoring/prometheus/server/templates/prometheus.service.j2 @@ -6,7 +6,7 @@ After=time-sync.target [Service] Restart=on-failure User=prometheus -ExecStart=/usr/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/var/lib/prometheus/metrics2/ --storage.tsdb.retention.time={{ prometheus_server_retention }}{% if prometheus_server_web_external_url is defined %} --web.external-url={{ prometheus_server_web_external_url }}{% endif %}{% if prometheus_server_web_route_prefix is defined %} --web.route-prefix={{ prometheus_server_web_route_prefix }}{% endif %}{% if prometheus_server_auth_users is defined %} --web.config.file=/etc/prometheus/prometheus-web.yml{% endif %} --web.listen-address={{ prometheus_server_web_listen_address }} +ExecStart=/usr/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/var/lib/prometheus/metrics2/ --storage.tsdb.retention.time={{ prometheus_server_retention }}{% if prometheus_server_web_external_url is defined %} --web.external-url={{ prometheus_server_web_external_url }}{% endif %}{% if prometheus_server_web_route_prefix is defined %} --web.route-prefix={{ prometheus_server_web_route_prefix }}{% endif %}{% if prometheus_server_auth_users is defined %} --web.config.file=/etc/prometheus/prometheus-web.yml{% endif %}{% if prometheus_server_remote_write_receiver %} --web.enable-remote-write-receiver{% endif %} --web.listen-address={{ prometheus_server_web_listen_address }} ExecReload=/bin/kill -HUP $MAINPID TimeoutStopSec=20s SendSIGKILL=no diff --git a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 index 85adfa52..d72a4815 100644 --- a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 +++ b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 @@ -96,3 +96,10 @@ scrape_configs: - targets: ['{{ config.url }}'] {% endfor %} {% endfor %} +{% if prometheus_server_remote_write_destinations is defined %} + +remote_write: +{% for name, config in prometheus_server_remote_write_destinations.items() %} + - {{ config | combine({'name': name }) | to_nice_yaml(indent=2) | indent(4) }} +{% endfor %} +{% endif %} |