diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 0000000..d03cb8f --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,2 @@ +skip_list: + - '301' diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000..416e400 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,19 @@ +--- +kind: pipeline +type: docker +name: check + +steps: + - name: yamllint + image: python:3.9-alpine + commands: + - pip install yamllint==1.25.0 + - yamllint -c .yamllint.yml . + + - name: ansible-lint + image: python:3.9-alpine + commands: + - apk add --no-cache gcc libc-dev libffi-dev openssl-dev + - pip install ansible-lint==4.3.7 + - ansible-lint *.yml +... diff --git a/.gitignore b/.gitignore index a8b42eb..ea2eabf 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ *.retry +tmp +ldap-password.txt diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4cf864e..c62f35b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,19 @@ --- -image: quay.io/ansible/molecule:2.19 +image: python:3.9-alpine + +stages: + - lint yamllint: - script: yamllint -c .yamllint.yml . + stage: lint + script: + - pip install yamllint==1.25.0 + - yamllint -c .yamllint.yml . + +ansible-lint: + stage: lint + script: + - apk add gcc libc-dev libffi-dev openssl-dev + - pip install ansible-lint==4.3.7 + - ansible-lint *.yml ... diff --git a/.yamllint.yml b/.yamllint.yml index bcc5101..3a7ea3d 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -3,5 +3,6 @@ extends: default rules: line-length: + max: 120 level: warning ... diff --git a/README.md b/README.md index d91d960..00897a4 100644 --- a/README.md +++ b/README.md @@ -1,49 +1,140 @@ -# Playbook et rôles Ansible d'Aurore +# Recettes Ansible d'Aurore -## Exécution d'un playbook +Ensemble des recettes de déploiement Ansible pour les serveurs d'Aurore. +Pour les utiliser, vérifiez que vous avez au moins Ansible 2.7. -Pour appliquer le playbook `base.yml` : -```bash -ansible-playbook --ask-vault-pass base.yml -``` +## Ansible 101 -Il est souhaitable de faire un test avant avec `--check` si on a des doutes ! +Si vous n'avez jamais touché à Ansible avant, voilà une rapide introduction. -## FAQ +**Inventory** : c'est le fichier `hosts` d'inventaire. +Il contient la définition de chaque machine et le regroupement. -### Mettre sa clé SSH sur une machine +Quand on regroupe avec un `:children` en réalité on groupe des groupes. -``` -ssh-copy-id -i ~/.ssh/id_rsa_aurore.pub virtu.fede-aurore.net -``` +Chaque machine est annoncée avec son hostname. Il faut pouvoir SSH sur cette machine +avec ce hostname, car c'est ce qu'Ansible fera. -### Automatiquement ajouter fingerprint ECDSA (dangereux !) +**Playbook** : c'est une politique de déploiement. +Il contient les associations des rôles avec les machines. -Il faut changer la variable d'environnement suivante : -`ANSIBLE_HOST_KEY_CHECKING=0`. +L'idée au Crans est de regrouper par thème. Exemple, le playbook `monitoring.yml` +va contenir toutes les définitions machines-rôles qui touchent au monitoring. +Cela permet de déployer manuellement tout le monitoring sans toucher au reste. -### Configurer la connexion au bastion +**Rôle** : un playbook donne des rôles à des machines. Ces rôles sont tous dans +le dossier `roles/`. Un rôle installe un service précis sur un serveur. -Envoyer son agent SSH peut être dangereux ([source](https://heipei.io/2015/02/26/SSH-Agent-Forwarding-considered-harmful/)). +Il est préférable d'être atomique sur les rôles plutôt d'en coder un énorme +qui sera difficilement maintenable. -On va utiliser plutôt ProxyJump. -Dans la configuration SSH : +*Exemples de rôle* : activer les backports pour ma version de Debian, installer NodeJS, +déployer un serveur prometheus, déployer une node prometheus… -``` -# Use a key to log on all Aurore servers -# and use a bastion -Host 10.128.0.* *.adm.auro.re - IdentityFile ~/.ssh/id_rsa_aurore - ProxyJump proxy.auro.re -``` +**Tâche** : un rôle est composé de tâches. Une tâche effectue une et une seule +action. Elle est associée à un module Ansible. -Il faut savoir que depuis Ansible 2.5, des connexions persistantes sont créées -vers les serveurs puis détruites à la fin de l'exécution. -Il faut donc éviter de lancer une connexion SSH persistante pendant l'exécution -d'Ansible. +*Exemples de tâche* : installer un paquet avec le module `apt`, ajouter une ligne dans +un fichier avec le module `lineinfile`, copier une template avec le module `template`… + +Une tâche peut avoir des paramètres supplémentaires pour la réessayer quand elle plante, +récupérer son résultat dans une varible, mettre une boucle dessus, mettre des conditions… + +N'oubliez pas d'aller lire l'excellent documentation de RedHat sur tous les modules +d'Ansible ! + +### Gestion des groupes de machines + +Pour la liste complète, je vous invite à lire le fichier `hosts`. + + * pour tester les versions de Debian, + + ```YAML + ansible_lsb.codename == 'stretch' + ``` + + * pour tester si c'est un CPU Intel x86_64, + + ```YAML + ansible_processor[0].find('Intel') != -1 + and ansible_architecture == 'x86_64' + ``` + +Pour les fonctions (`proxy-server`, `dhcp-dynamique`…) il a été choisi +de ne pas faire de groupe particulier mais plutôt de sélectionner/enlever +les machines pertinentes directement dans les playbooks. ### Lister tout ce que sait Ansible sur un hôte +Lors du lancement d'Ansible, il collecte un ensemble de faits sur les serveurs +qui peuvent ensuite être utilisés dans des variables. +Pour lister tous les faits qu'Ansible collecte nativement d'un serveur +on peut exécuter le module `setup` manuellement. + ``` -ansible -i hosts ldap-replica-fleming1.adm.auro.re -m setup --ask-vault-pass +ansible proxy.adm.auro.re -m setup --ask-vault-pass ``` + +## Exécution d'Ansible + +### Configurer la connexion au vlan adm + +Envoyer son agent SSH peut être dangereux +([source](https://heipei.io/2015/02/26/SSH-Agent-Forwarding-considered-harmful/)). + +On va utiliser plutôt `ProxyJump`. +Dans la configuration SSH : + +``` +# Use a proxy jump server to log on all Aurore inventory +Host 10.128.0.* *.adm.auro.re + ProxyJump passerelle.auro.re +``` + +Il faut sa clé SSH configurée sur le serveur que l'on déploit. +```bash +ssh-copy-id proxy.adm.auro.re +``` + +### Lancer Ansible + +Il faut `python3-netaddr` sur sa machine. + +Pour tester le playbook `base.yml` : +```bash +ansible-playbook --ask-vault-pass base.yml --check +``` + +Vous pouvez ensuite enlever `--check` si vous voulez appliquer les changements ! + +Si vous avez des soucis de fingerprint ECDSA, vous pouvez ignorer une +première fois (dangereux !) : `ANSIBLE_HOST_KEY_CHECKING=0 ansible-playbook...`. + +### Ajouter tous les empruntes de serveur + +```bash +#!/bin/bash +for ip in `cat hosts|grep .adm.auro.re`; do + ssh-copy-id -i ~/.ssh/id_rsa.pub $ip +done +``` + + +### Passage à Ansible 2.10 (release: 30 juillet) + +Installez la version de développement d'ansible pour faire fonctionner les +playbooks de ce repo, ainsi que les collections suivantes : + +```bash +ansible-galaxy collection install community.general +ansible-galaxy collection install ansible.posix +``` + + +Si vous n'arrivez pas à entrer votre _become password_ (bug dans ansible?), un +workaround est le suivant : + +`$ export ANSIBLE_BECOME_PASS=''` + +Notez l'espace au début pour ne pas log la commande dans votre historique +shell. diff --git a/ansible.cfg b/ansible.cfg index 560f008..e2d6a32 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,19 +1,25 @@ -# Aurore Ansible configuration +# Ansible configuration [defaults] -# Use Aurore inventory +# Do not create .retry files +retry_files_enabled = False + +# Use inventory inventory = ./hosts # Custom header in templates -ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} - -# Do not create retry files -retry_files_enabled = False +ansible_managed = Ansible managed, modified on %Y-%m-%d %H:%M:%S by {uid} # Do not use cows (with cowsay) nocows = 1 +# Do more parallelism +forks = 15 + +# Some SSH connection will take time +timeout = 60 + [privilege_escalation] # Use sudo to get priviledge access @@ -27,3 +33,6 @@ become_ask_pass = True # TO know what changed always = yes + +[ssh_connection] +pipelining = True diff --git a/base.yml b/base.yml old mode 100644 new mode 100755 index 90e258b..5aee2d2 --- a/base.yml +++ b/base.yml @@ -1,5 +1,12 @@ +#!/usr/bin/env ansible-playbook --- # Put a common configuration on all servers -- hosts: all +- hosts: all,!unifi roles: - - baseconfig + - baseconfig + - basesecurity + +# Plug LDAP on all servers +- hosts: all,!unifi + roles: + - ldap_client diff --git a/codimd.yml b/codimd.yml deleted file mode 100644 index ac330c8..0000000 --- a/codimd.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# Install CodiMD on CodiMD containers -- hosts: codimd.adm.auro.re - roles: - - debian-backports - - codimd diff --git a/copy-keys.sh b/copy-keys.sh new file mode 100755 index 0000000..6aa1bb6 --- /dev/null +++ b/copy-keys.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +# Grab valid unique hostnames from the Ansible inventory. +HOSTS=$(grep -ve '^[#\[]' hosts \ +| grep -F adm.auro.re \ +| sort -u) + +# Ask password +read -s -p "Hello adventurer, what is your LDAP password? " passwd +echo + +for host in $HOSTS; do + echo "[+] Handling host $host" + + # sshpass can be used for non-interactive password authentication. + # place your password in ldap-password.txt. + SSHPASS=${passwd} sshpass -v -e ssh-copy-id -i ~/.ssh/id_rsa "$host" +done + diff --git a/dokuwiki.yml b/dokuwiki.yml deleted file mode 100644 index a27d148..0000000 --- a/dokuwiki.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# Install DokuWiki on the wiki TEST container -# When it will be operational, horus-wikitest will replace horus-wiki -- hosts: horus-wikitest - roles: - - dokuwiki diff --git a/etherpad.yml b/etherpad.yml deleted file mode 100644 index 65f1f26..0000000 --- a/etherpad.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# Install EtherPad on EtherPad containers -- hosts: pad.adm.auro.re - roles: - - debian-backports - - etherpad diff --git a/group_vars/all/vars.yml b/group_vars/all/vars.yml index 05828fc..599e834 100644 --- a/group_vars/all/vars.yml +++ b/group_vars/all/vars.yml @@ -1,20 +1,19 @@ --- # Use Python 3 -ansible_python_interpreter: '/usr/bin/env python3' +ansible_python_interpreter: /usr/bin/python3 # LDAP binding # You can hash LDAP passwords with `slappasswd` tool ldap_base: 'dc=auro,dc=re' -ldap_master_ipv4: '10.128.0.11' -ldap_master_uri: "ldap://{{ ldap_master_ipv4 }}" +ldap_master_ipv4: '10.128.0.21' +ldap_master_uri: "ldap://re2o-ldap.adm.auro.re" ldap_user_tree: "cn=Utilisateurs,{{ ldap_base }}" ldap_nslcd_bind_dn: "cn=nslcd,ou=service-users,{{ ldap_base }}" ldap_nslcd_passwd: "{{ vault_ldap_nslcd_passwd }}" -ldap_codimd_bind_dn: "cn=codimd,ou=service-users,{{ ldap_base }}" -ldap_codimd_password: "{{ vault_ldap_codimd_password }}" ldap_matrix_bind_dn: "cn=matrix,ou=service-users,{{ ldap_base }}" ldap_matrix_password: "{{ vault_ldap_matrix_password }}" ldap_replica_password: "{{ vault_ldap_replica_password }}" +ldap_admin_password: "{{ vault_ldap_admin_password }}" ldap_admin_hashed_passwd: "{{ vault_ldap_admin_hashed_passwd }}" # Databases @@ -33,3 +32,60 @@ ssh_pub_keys: "{{ vault_ssh_pub_keys }}" # Monitoring monitoring_mail: 'monitoring.aurore@lists.crans.org' + +# Matrix +matrix_webhooks_secret: "{{ vault_matrix_webhooks_secret }}" +matrix_discord_client_id: "559305991494303747" +matrix_discord_bot_token: "{{ vault_matrix_discord_bot_token }}" + +### +# DNS +### + +# Dernier octet (en décimal) de l'addresse des serveurs DNS récursifs de chaque +# résidence. +dns_host_suffix_main: 253 +dns_host_suffix_backup: 153 + +backup_dns_servers: + - "80.67.169.12" # French Data Network (FDN) (ns0.fdn.fr) + +# Finally raised! +mtu: 1500 + +subnet_ids: + ap: "14{{ apartment_block_id }}" + users_wired: "{{ apartment_block_id }}0" + users_wifi: "{{ apartment_block_id }}1" + users_banni: "{{ apartment_block_id }}2" + users_accueil: "{{ apartment_block_id }}3" + +# Keepalived +keepalived_password: "{{ vault_keepalived_password[apartment_block] }}" + + +# Re2o config +re2o_secret_key: "{{ vault_re2o_secret_key }}" +re2o_db_password: "{{ vault_re2o_db_password }}" +re2o_aes_key: "{{ vault_re2o_aes_key }}" + +# Radius +radius_secret_aurore: "{{ vault_radius_secrets.aurore }}" +radius_secret_wifi: "{{ vault_radius_secrets.wifi }}" +radius_secret_wired: "{{ vault_radius_secrets.wired[apartment_block] }}" +radius_secret_federez: "{{ vault_radius_secrets.federez }}" + +radius_pg_replication_password: "{{ vault_re2o_db_user_passwords.replication }}" +radius_pg_re2o_ro_password: "{{ vault_re2o_db_user_passwords.re2o_ro }}" + + +# DHCP +dhcp_failover_enabled: true +apartment_block_dhcp: "{{ apartment_block }}" + + +# Careful, this is not byte-aligned, just nibble-aligned (RIPE gave us a /28). +# However, we ALWAYS keep the trailing 0 to have byte alignment. +ipv6_base_prefix: "2a09:6840" + +is_aurore_host: "{{ 'aurore_vm' in group_names }}" diff --git a/group_vars/all/vault.yml b/group_vars/all/vault.yml index 2b8e8ab..b813ba3 100644 --- a/group_vars/all/vault.yml +++ b/group_vars/all/vault.yml @@ -1,106 +1,174 @@ $ANSIBLE_VAULT;1.1;AES256 -38616339383037366465353638383665643938633061323863386539323861633135363964303364 -6335326537383039316566623031306136633364336337320a343434623066313535363430303866 -61613635663336376530333463363037316230323339393463393465313366643438316565306564 -3263613463653764330a363330623331323762306330336465633538353637373838396231643762 -35353137306132653861343736323934326161663436653737333462333938393330393861363764 -38333837353564396563623039636438663662626330323632383234333662613835633138326161 -33626537633863393864373937306332386131626562323762616439643830626339633833313638 -65363930353162323635356430313030303734386339393730383135323937623939393233663335 -39316564396463643134636363326262343538666234333837373164646434643232623666636162 -61616637396561323166313131393163353166353764396564373330326262346666373034663465 -35326563306538653562333864383662336338656566313432633831313363663131393930336234 -61336230643539303532656437616638323266623434393863623661366237386537386232383332 -38643433626466343633626665386633663964306566643436363839363462326632393863616230 -62386662386363613737613839386436383734623461386363313832636661636334633132363433 -30383135646465353232623936366337663130376232343231376565313563303534313462336462 -39353861663230623536613735663865373238303832306636323532393734303538383837336237 -36613732623539616237663962666263393138303064646335623336343432323266663335363061 -36306361653035653334343031303663373334613365393635616362343461616535343964306666 -36663030373133346663663839373363663163653961323533356361373533643636626339356332 -34663063326363646638646330626666633564373763326636363764653831336134393131653864 -62353163613534386364323430346435316339363338666563366262336536373162346265353165 -66363834306536383465376339323462303265373263333065623837656133623035366366396335 -34313566326565333930386336353333396237633333613336666566393465616563393563633335 -33313232646466313333633564653535343564626566353863613964303732616436626535656135 -39313632346530636639333435643034666138633831313864316266376330336334333263356132 -30613865336164353433383232316637663131653231393235343262666636623461653332323664 -63323637396132666333633863393131363935626435313366646563366366356265346130373162 -66306261303539323763336130616165323863646466643336316437663266303730313433343932 -39643639376531303839636632333063323837306638373863663132386533353735343234666139 -33633462346438356338613830313533306263383263383036363566323437356235386636333432 -31383462626338346339353536653437623731643036363366666437316362356266396634383865 -65663332383837373334393863386531663831643366613131663666303436306336313935643065 -61336661313562326465393936643130393465346636363234386436616165613436393436623664 -33396165376363316334613737613261396633306233643337393936393465313763343439646136 -38643135346234653234316536363964303331303932656134323165333239623362333931323334 -62323634373764663464343031626432653131306664336130396265363964663866663538376239 -33653430366265623733376536613632646662646632653132336537383336303335623966396131 -62303136613934656664386162373433323962336134663435373730366632666135663030653133 -38613632623765616465383266356238313333336434376238396339343031323465343833323665 -61666366326363303832663164363838353739653534313235336530396330346466656432386636 -36613239613239613835633265363064316134346464616338333532376132373637356665346230 -38343461613335663061393037643466383664323263336364356364363262346236326161653362 -63653963303139616430333738373038333661333562376130363061366338346430303231363138 -66373866303331333035316533373336376466346630326166373734353834306237383462356430 -33633332366636613534313933323065633736656432656236653930623663616133336435313337 -36623031663333386362306138343231363130626131323735316562363730656230376132303439 -36323932666362613237666164653037353631353730346466303764326162366266376130316166 -32363739373437333731643936366334376463383664373139363465666538666331636535626462 -32323834616338376264393461626665393635336638383333653466336332316364363330333738 -61303161303564383563653039653263343161336362373563616237373865653965343639343465 -35363536613233313534633665373264303139393731626339646466663431363361373938623334 -38613766643861326538663563656662383665376137393333616566646136336339306333653066 -34373864353230343032386236386165616462316562363062613330636264303632396339646632 -37316630303536316436356138626533653865366137666131363064643964333838633266366365 -64663661333764616635383263376662616239323930643261613161313265633236623834376263 -31623565633964376632346533373037333166346332313266316138613665353530336562643739 -32393161396338353630363838643130323431343339376431373165313238383064616335323064 -36313366633161303635383062656538343538363039343931623736623766343762343563646134 -33626264643639333364343131636163646361626130646237326435373731613237663736393439 -37376639333930396337333938373866383133613730613162356364323265373431633836653838 -34323532613437313064383035313030306636346435396666653933626232313638656531366234 -34313837623136656361393230653637633535333063333662363137323962353232386366656264 -30303164363531343261626262653962383664313131333535313038343461303464326464373337 -34646537353031386265313066313136613533663335306636346237613264313061666233376131 -62346537666362373539626334376630633539343932623133653431393731356266623730353036 -64626133623638303132356639663538363537623739386166333337333431363939666362663137 -62303664306663376136353235613031623435393065376635353032666136396332613861376630 -65633061623466313838383664613839663033333736396536333863636435306263666466313265 -62633539623037616535333164316538616439333838633730313066366232616566646435376530 -66316164323737396230373339333936313532343266303765356530633534643439626236313136 -65373833633461393734353737363566353634353764333961643033393932353236633336393436 -61333537666263666238303736316462356138396232373839343434643730643766333535643565 -38333163353634633637393766346261613931663134363230353633336533643339353832653632 -64343438353266343937373138643635616331336164316166343536643434616665326331353161 -39303732663037653634303263386134366536333830363366663639383033323832653364393766 -30333438313361613764633239373036336230353333343262613761323930656265313031323065 -39666562393565326139373061313061396265386239343462346535623766313364343136343038 -39373462663434633731636662653966383233396163643137373530303134633465643130383265 -61376538626365396236643139336366643966306131393635623438393932313164623766353233 -61643732303837366139356236353937303036343232363537623231303661653035383831343830 -64663763383563303461633335313663643161336435343735333536393661663235636665383662 -37393139333361323136366564383132633830386335376137396563343937306431653665316161 -38323137643437383636373033326236613936396564626466386432393733633361623032326135 -61346166393964313637356330653339646365326365326130386537343535353038643538393336 -66366335316538393333613631346365393038623261303536363732383637643430346632343139 -61643265333466323666633834333462613161653165663432613934353036343937343939303033 -35636134343761666534656636383632363562633634383364323039613431666239346136616231 -37383838316338666462376133613632663633663935643563346333356464663434393734366433 -37376539366139396536323535316539643261636533303065326164643132313439313632306664 -33386136393734613739396336396635353439393531393363326137363562646363333762386135 -32376162373766393763343634326434323034336533336166666635636135393066353238626161 -37376132306339626263313663376638633337656162333431313439643930303332666138643962 -37313331616563643638343464653535653038613136313637336534636537343763376634626538 -64633133656237346461346330333061636663336262646264326237373736366337663331323764 -65353931373266336232366434356631383338386537616535656664323334363963666631363331 -37653666316236613931386161346439663935656466396137643765626632643263323163623138 -34326363373633303365353530383633623936313436616635353733643837656233613635383966 -37356665626530626532356632323338623062386332616332643061623031366366613433383734 -33623732636637363237336439373432616332643638303131363332393230663565323139303163 -36643134363861383834656539626166386164326236336266633435626163383437366338353930 -33373932333832373237343532623537363931366636346132616162343839303965386337316164 -61656233616633323933306165643666306661366632613933383932616664613663663234653431 -30306163346630653239323966396638643162303766363131646430303366303435343866353639 -66373465393764396335346131346331353561643434633364656530626238326333333731383361 -35653232373235383633616634643966636336333461663237653764626534346336 +34336231623938346631313932323131336439623837626366646338396137633436646365386639 +6332383765386235396331373836366230663563376665380a616436373136633933376435653230 +64333963663436393265666434653164643164616134353665306462326666623530383838343135 +3531343533656332350a343432336636316131386132306238653736633966363235623833343638 +38643061383963396466346536343061653034333037393664356661376565643765306462626231 +39326233363962373839303464333833306532343834306232653731326135653934643836323639 +36343937626536346331613263663865346634666534646266623061303639626636393230616261 +32336366356439353738633234326138656464656630303362623664616634306230623538373965 +32346439306337623737616666353830626630373562366436653131393532313035303836326430 +64613235646366616533313065396663366434363832333535336631323366336437396664303834 +30336466313064636565326564356435306136396363373464326534303366323262303732626661 +38326663313332633530353739346538343434316133343066313530366637376135323564306537 +65626261303231656432333364333965663065346436626631666466643934623064333163626339 +32633565303734303862326365336339346133393431636266303530626564326361653230626536 +32313231373037633134623761663832393666353732613965613436323939343233613433343538 +37326438383130303861316663396333376662386337353964633930353536653437653061356635 +35646232343535313130646237643835376162623639333961323964353830653366626438346237 +36343663346332656537363434396633336161373730663364306239306432343930643230656465 +37633537616232656661313764626232303535383563353861396431643735326162383866626231 +61383165613332666537656137636430323332326335323763303537386662646263353539613964 +37323966306364306436653033393931663239383435613836356164633135306233356364313036 +39356661613434633930633066646437636535313565356366303732613731333062643231313035 +65333461396131663764626665393562623030343561313136363964393664376136303839333664 +65313465623331333538393734373264313562643232666130303930333662616465656432363039 +66616530336666343861336434633063343561323931323931346132376263376565313366306639 +64646465303432333136353661323936633965666364356633653861363139616562653834313861 +63306133613066373462383236613939316130623937643939323134343936356638376335323836 +39383334656236633037633230313138326238303863623231353465346661663162623138353461 +33343738613137366364633730346261366564646161373837613865393233663431636361663962 +38313230363737306265636435353533666262333666383639343364633464396566333433333538 +39643934646537653234336361613664333434623739353831316531313666396638333136343638 +33653034366362363562633462303165626333306664326366353334363964663936616430643662 +30616334326638323133366632663237356238353934323361376237613632396134663536336364 +39363439326335363437373939353564646663616464663763353931323233316135656634343137 +34396130386134386331643534353461663963323435656337653032376565313635623231343135 +34303130316239303065386134663332393938636332363665643832326439653733633231346537 +63383634333034323434376237663932613638363835393837613632663265616363303233653539 +61333765313463616665613136303533343230303735626437343635303934613365326166333966 +66613538393466666630363333643730653239393435616634303430396635383631613439623433 +36646431393865666162373232343335356366366633633264326639643434396234313863333163 +63396534623931633833656565396635333133376165613031663831633564663061656131303564 +61303132666264636139313738643161313134643733633366376538366135663135333333333564 +64366262353837363061653663616265393264373230346330636465336439623063636639356136 +65383638643961326661396336373163643832366561363764626461623662333436373136616437 +30316537653432356133616338353165633462643634323563306366343965326635363863316232 +61633135643861333635383464383937306236626632366235363433313335663431366531356337 +37303465323638383930336138356665343966336137356137656564303733373565366162343330 +38326366653733376138356339313564616165626235356363343430353239616339656239323964 +31643734653263653461333135386261646265323134633334376262323330396634643764323635 +30336262323035613338333166353364333836623865393132613338393237363734616330366463 +64646163303337323531636532383438356237306337656439663565643032633462316366663164 +33613039326337353531303831313136653539353261373930613030383134653261363833653439 +31343662623035393238646263633066653362323434306137633339393330376462356139333362 +35363436356530363134663064653031376561343732346262383333353733363136396262643135 +31326566303535343833326562376464643632363434323839366366626134303830323563633237 +37313964353033316163303738636632346137353437333463303135323631383132623133663130 +32373163393861366137303138363134653534613236636439623731393837306130626638343134 +39313532386338343662333134353761653162663665396664366239633536613132313735373334 +37613161383633653861376433633632333163653439633938386137313632396137616337373465 +65383238396439666537313833663364333731613434333739393161363437306665363834653761 +34303464386633633163353636643964393233383232623765373239376633393139326630653765 +62646439646534376234323661383063656463313437323231333165626163626262626562376338 +62646362346261313738323830613037663035666361386139666432613230346334323063326239 +65303065343061613736343663363630336333623439383032313137616131623933323636306331 +34636130626338303039356137353532346562363531623936316162336663306437386532363236 +36333661316161613237343032623764396435346632363963643438316430666539393566353939 +33333234313839636537366465356364303438313830663261373563346538626432313139303030 +33333066626463663663643833323764643737386162663766356665643064313263376434353038 +37643630643737663566653562353261333734636262626437393239383063613661643166626630 +31313564346239396561326162333534376264616435313762623032636432363832383630343964 +30343663643935633465393465626131633931623930653962303830333065363435383237653566 +65646632376330306437663334313932653230653562356338663366616463303466366263366137 +64633934626339633235386630396561376130373763313137386531356637633863393035306634 +65353432323235363135633832373032623837376333346131303162303464616234313062316563 +64646634633963663032613533636665333335656539323238623362306363313835626632306236 +30663637356463363530316434316639326639633539333335633330333834643035353932313638 +64356565653065666131373538356462306633343161376537323762313666373235353236313963 +65613561633266306632616538616461626532666435663038646138386430376164663766363138 +35316262393065653739323035666531333330326235386133383834383865356635666537333533 +31376138353231313262646334386566376264323066373934666363313431643738383064666437 +36656437313039656666373530346534393735353163646635663839326366643333393665626464 +36616637303631653661373433653865323634363065303433386534363064356564636465366265 +31333064383233636538393032376234663663353162343530376631356533653231303730396465 +33366162376464633633313664303939306330613865663431653037303061633130626635653638 +66626264363333376463386666313663333964333137333231303361616533393236373861656534 +32326335306566623332396638383133353434363565316432353963353062313662326361336537 +34396632656234333263663831326566353434316234613365316132363730643665373761666562 +31393565653663653731633333633730326265376135666162656132623238333765333363653130 +61353632313532616266363139336162336565356365316531336364623930636430353831623233 +61616131313438306633333066613764313161333934316139633738623164623564646365663566 +66356464376133363137313036623930373362306166623838373131313330393837396261656561 +66396233313530643164353264656563383632363139333262626532376562613630643437666266 +66656335656634613138316138643666623430363833663035616138336461303035633731636262 +36393939333765346239666433323032323361343934656463396365333366623337316663396263 +36616431626633663963636135643833666234613830366434636532373031343263316436306162 +39356365376561643665323866656465313434623138326238353662653735613565623264333336 +61393763363862613766653064636130323732663466366133666361636339356464313037353462 +63633936653235656538383433393065393162643034393538666433616131343462346235393164 +39353663373338626665663563663162633430343330373430376336326432346233663365376533 +32656465343538643137326366653232343530363834383831386634366262303333636261353863 +32633437343432653936643766363338636535613532323362656435613363393238626466303861 +38633861333638613466306338613932353964393365356637306261626535323732316362623731 +33313963623439613939333639346461663338373334396165636231666266613065323731373964 +64313133383435333935376531313432663766633133633863356563663535333263636237386136 +61653963633166383135333436646465383536373039383538326366636634313061613730653962 +37623962643866396637336231363038373465393637356463656566666661313130313863383233 +37343636346535363832626365396262303862393535336565393635663637323730373564336634 +37363036323733306535336366373630356531353737303165376530656433626634343365626239 +64346136363030663862313431653761666432393933366665346361626361623039326434633835 +32666538653037613361343536383634643762356234366433663639653461303933306434333864 +37386436393465323139306161333738383265323436376536656264356230303163326134323864 +63396331666431666464656161633466333764653631623131646566303366333030653834333335 +31323365353239366232643863386365633861376235643034303563613363663661616564363663 +63326562613365653539383336383339646164623864323830653434623365393432666466323134 +33626330373361393734656632393232363866613863373135636537613934343065306265623964 +34643765636165393336356630353663343065333431656164363638646233663762346536343362 +65653364343537383336373933313464663464653465383830363631316336303464313731356230 +34336130323766386465373162346535396565346630353734303937396130656132376331326563 +36386339383338346533646331666262396432336434646333653664326635386238333763626637 +31363464306465666339316436323265623437636533643431363161323139653065323534636533 +64386334353439373133313937343234373963353331646233346432646430636530663336316134 +66303337313034396232643531643262343036313762633165353665653938313665386363353865 +66333166303636626565613136653365313763303263313239333033353638616566656134396131 +38356434343931303134303362313363343634613361353538636634336332373132356165326163 +30386130326239366532363962316435663862393836326439623862366166376234343439306465 +36346639623939353232366333643963646336383833386565643435393734653936313638663930 +32323065343737663564333961373034393261613862333431663562353964666561643831316432 +35313832356639333937333266306166656538643065386639346337306134613536356137316331 +38376434666332366531393639303561663934353130333161636530383932653236313530616531 +61656664626663373164343863333039356362343034326131376666623264663732303734366363 +30306430353732616131346637626332656434393163313661356465393263393235396662623962 +62643538623331646265643561623366383937313136383939366164613235666234663137653432 +34316138643139336331356663333632656539653632626136613431393736613630353237356164 +33623632643335663163656236633134343464353837346237316162346634633336663564656531 +39373730346130363963376463326238366235613539613466653139306237343164336462353236 +39323361636333353661633863663162633563343937366461346338363061623730633537626562 +30353938383664333861366431343033313961376436363065373430353736343563313531386663 +37313534303564333237616331396437376436383833373936376664666366373235613533663239 +64653863613531356666646233393533646131333961343730663461346235633961306263343831 +64386332653330323937643266373437633465363933653833343930616134626566363339366362 +36356163333730656233653431326430326566386264343330666131393166323537623137396237 +65386234653231666631366533383762643830333261363532666138386263643662633932626335 +66303363613035643931393933303035323566373634663037313338616132373162366334373962 +33666463613435396331326565353433336361303562326562663035313639333232333430373266 +65383235356132353838636565636436356361653831356430663935613766613237366564316566 +37396130393363386566306162346466326165353863636633306335383265306139396339383866 +34326335323962633032386162623033353036643437313832323166363764653339343638343964 +66626662326234306362656162336538353131366337643761643930306163333661653062663832 +61303963623433313565633235306132366663336662616232613339366363373934613631623431 +34323736383366333032343364373533363761323338346163323836653235653136646162306166 +65333734623663346233343961396566313838653036396430396134393839326535363237363638 +38333232333863396334366561303136333863356666656335633630616531363766343535616533 +35656166303837653365303436623431613931336331356531666665346562613263363666626238 +62626236323863383366643162356462306163653032626130333863656337623136646439316337 +33306432663134383038646133346131333732633932383239643733643138303434646565663266 +34616265383733343963323538656138656331396438616133393063356638633965323363653066 +65353837333363613762333839313631373137363064383830353565333832356162323862393030 +35373038613133643466636537626437393837633865363566343565626633376262373766613738 +39343334336238363131373762646564653839623531323066356430326263376534373664363331 +64373735383933303638303661333964333464306338613363326261623438336530636262373766 +35346339643939666162386232666236326131366366303432393838326239313730323431376231 +39363032616666393431326533643865643937363937356431623763363037373333653266376561 +63323462363063343234373534663063353865363037383932386231313338343239653131633561 +34623439396232633265616438623562666333303932396366663330326565363736633461333463 +66346537323061306662323062393061353565393165363532306439343262343632616465363364 +30376331346430313536313963333136663833323064633631653935326366633862336163316538 +33383434336666303434363236396662366664393637656462363331356631613332353766636663 +62323264336235306532343065323834313730353237616463373766303439663533336366363565 +35646461636263646633343634323735383235376330616334373937646165623639363663353361 +65613034353736633332663333616564356265323731613537393430633137333337643663323137 +31623732663331653935316337306433333633353565343265666333363864346562363961333439 +30656136636661396335623566386362333861616663393738626632633537613564636261383138 +3233 diff --git a/group_vars/aurore/main.yml b/group_vars/aurore/main.yml new file mode 100644 index 0000000..e49d40d --- /dev/null +++ b/group_vars/aurore/main.yml @@ -0,0 +1,8 @@ +--- +apartment_block: aurore +apartment_block_id: 0 +router_ip_suffix: 254 + +# We have two -aurore DHCP servers, but no failover peer functionality is needed +# because they only give out IPs assigned by re2o. +dhcp_failover_enabled: false diff --git a/group_vars/edc/ldap_local_replica.yml b/group_vars/edc/ldap_local_replica.yml new file mode 100644 index 0000000..63cfeb4 --- /dev/null +++ b/group_vars/edc/ldap_local_replica.yml @@ -0,0 +1,3 @@ +--- +ldap_local_replica_uri: + - 'ldap://ldap-replica-edc.adm.auro.re' diff --git a/group_vars/edc/main.yml b/group_vars/edc/main.yml new file mode 100644 index 0000000..942e068 --- /dev/null +++ b/group_vars/edc/main.yml @@ -0,0 +1,7 @@ +--- +apartment_block: edc +apartment_block_id: 4 + +router_ip_suffix: 254 + +mtu: 1500 diff --git a/group_vars/edc/sudo_location_group.yml b/group_vars/edc/sudo_location_group.yml new file mode 100644 index 0000000..a7aec2e --- /dev/null +++ b/group_vars/edc/sudo_location_group.yml @@ -0,0 +1,3 @@ +--- +# Users in that group will be able to `sudo` +sudo_group_location: 'sudoedc' diff --git a/group_vars/fleming/main.yml b/group_vars/fleming/main.yml new file mode 100644 index 0000000..94f9cc8 --- /dev/null +++ b/group_vars/fleming/main.yml @@ -0,0 +1,7 @@ +--- +apartment_block: fleming +apartment_block_id: 1 + +router_ip_suffix: 254 + +mtu: 1500 diff --git a/group_vars/fleming/sudo_location_group.yml b/group_vars/fleming/sudo_location_group.yml new file mode 100644 index 0000000..664d024 --- /dev/null +++ b/group_vars/fleming/sudo_location_group.yml @@ -0,0 +1,3 @@ +--- +# Users in that group will be able to `sudo` +sudo_group_location: 'sudofleming' diff --git a/group_vars/gs/main.yml b/group_vars/gs/main.yml new file mode 100644 index 0000000..25c3139 --- /dev/null +++ b/group_vars/gs/main.yml @@ -0,0 +1,7 @@ +--- +apartment_block: gs +apartment_block_dhcp: sand + +apartment_block_id: 5 + +router_ip_suffix: 254 diff --git a/group_vars/gs/sudo_location_group.yml b/group_vars/gs/sudo_location_group.yml new file mode 100644 index 0000000..4ca3c9d --- /dev/null +++ b/group_vars/gs/sudo_location_group.yml @@ -0,0 +1,3 @@ +--- +# Users in that group will be able to `sudo` +sudo_group_location: 'sudogeorgesand' diff --git a/group_vars/pacaterie/main.yml b/group_vars/pacaterie/main.yml new file mode 100644 index 0000000..8ddb5ff --- /dev/null +++ b/group_vars/pacaterie/main.yml @@ -0,0 +1,7 @@ +--- +apartment_block: pacaterie +apartment_block_id: 2 + +router_ip_suffix: 254 + +mtu: 1500 diff --git a/group_vars/pacaterie/sudo_location_group.yml b/group_vars/pacaterie/sudo_location_group.yml new file mode 100644 index 0000000..6244736 --- /dev/null +++ b/group_vars/pacaterie/sudo_location_group.yml @@ -0,0 +1,3 @@ +--- +# Users in that group will be able to `sudo` +sudo_group_location: 'sudopacaterie' diff --git a/group_vars/rives/main.yml b/group_vars/rives/main.yml new file mode 100644 index 0000000..034b19c --- /dev/null +++ b/group_vars/rives/main.yml @@ -0,0 +1,7 @@ +--- +apartment_block: rives +apartment_block_id: 3 + +router_ip_suffix: 254 + +mtu: 1500 diff --git a/group_vars/rives/sudo_location_group.yml b/group_vars/rives/sudo_location_group.yml new file mode 100644 index 0000000..081a6bf --- /dev/null +++ b/group_vars/rives/sudo_location_group.yml @@ -0,0 +1,3 @@ +--- +# Users in that group will be able to `sudo` +sudo_group_location: 'sudorives' diff --git a/host_vars/proxy-ovh.adm.auro.re.yml b/host_vars/proxy-ovh.adm.auro.re.yml new file mode 100644 index 0000000..3b427e8 --- /dev/null +++ b/host_vars/proxy-ovh.adm.auro.re.yml @@ -0,0 +1,64 @@ +--- +certbot: + domains: + - auro.re + - cas.auro.re + - codimd.auro.re + - grafana.auro.re + - pad.auro.re + - passbolt.auro.re + - phabricator.auro.re + - privatebin.auro.re + - riot.auro.re + - sharelatex.auro.re + - status.auro.re + - wiki.auro.re + - www.auro.re + mail: tech.aurore@lists.crans.org + certname: auro.re + +nginx: + ssl: + cert: /etc/letsencrypt/live/auro.re/fullchain.pem + cert_key: /etc/letsencrypt/live/auro.re/privkey.pem + trusted_cert: /etc/letsencrypt/live/auro.re/chain.pem + + redirect_dnames: + - aurores.net + - fede-aurore.net + + redirect_tcp: {} + + redirect_sites: + - from: www.auro.re + to: auro.re + - from: 92.222.211.195 + to: auro.re + + reverseproxy_sites: + - from: phabricator.auro.re + to: 10.128.0.50 + + - from: wiki.auro.re + to: 10.128.0.51 + + - from: www.auro.re + to: 10.128.0.52 + + - from: passbolt.auro.re + to: 10.128.0.53 + + - from: riot.auro.re + to: "10.128.0.150:8080" + - from: codimd.auro.re + to: "10.128.0.150:8081" + - from: grafana.auro.re + to: "10.128.0.150:8082" + - from: privatebin.auro.re + to: "10.128.0.150:8083" + - from: pad.auro.re + to: "10.128.0.150:8084" + - from: cas.auro.re + to: "10.128.0.150:8085" + - from: status.auro.re + to: "10.128.0.150:8086" diff --git a/host_vars/proxy.adm.auro.re.yml b/host_vars/proxy.adm.auro.re.yml new file mode 100644 index 0000000..b8fb2c3 --- /dev/null +++ b/host_vars/proxy.adm.auro.re.yml @@ -0,0 +1,63 @@ +--- +certbot: + domains: + - bbb.auro.re + - drone.auro.re + - gitea.auro.re + - intranet.auro.re + - litl.auro.re + - nextcloud.auro.re + - re2o.auro.re + - vote.auro.re + - re2o-server.auro.re + - re2o-test.auro.re + - wikijs.auro.re + + mail: tech.aurore@lists.crans.org + certname: auro.re + +nginx: + ssl: + cert: /etc/letsencrypt/live/auro.re/fullchain.pem + cert_key: /etc/letsencrypt/live/auro.re/privkey.pem + trusted_cert: /etc/letsencrypt/live/auro.re/chain.pem + + redirect_dnames: + - aurores.net + - fede-aurore.net + + redirect_tcp: + - name: Gitea + port: 2222 + destination: "10.128.0.60:2222" + + redirect_sites: + - from: 45.66.111.61 + to: auro.re + + reverseproxy_sites: + - from: re2o.auro.re + to: 10.128.0.20 + - from: intranet.auro.re + to: 10.128.0.20 + + - from: bbb.auro.re + to: 10.128.0.54 + + - from: nextcloud.auro.re + to: "10.128.0.58:8080" + + - from: gitea.auro.re + to: "10.128.0.60:3000" + + - from: drone.auro.re + to: "10.128.0.64:8000" + + - from: litl.auro.re + to: 10.128.0.35 + + - from: re2o-test.auro.re + to: 10.128.0.80 + + - from: wikijs.auro.re + to: "10.128.0.66:3000" diff --git a/hosts b/hosts index 34cdde3..7920433 100644 --- a/hosts +++ b/hosts @@ -1,70 +1,260 @@ # Aurore servers inventory # How to name your server ? -# > We name servers according to location, then type. -# > So all containers at OVH are in ovh-container. -# > Then we regroup everything in global geographic and type groups. +# > We name servers according to location, then type, then function. +# > Then we regroup everything in global geographic, type and function groups. -[ovh-pve] -horus ansible_host=10.128.0.1 -[ovh-container] -riot.adm.auro.re +############################################################################### +# Aurore : main services + +viviane.adm.auro.re + +[aurore_pve] +merlin.adm.auro.re + +[aurore_vm] +routeur-aurore.adm.auro.re +routeur-aurore-backup.adm.auro.re +radius-aurore.adm.auro.re +dhcp-aurore.adm.auro.re +dhcp-aurore-backup.adm.auro.re +dns-aurore.adm.auro.re +proxy.adm.auro.re +camelot.adm.auro.re +gitea.adm.auro.re +drone.adm.auro.re +nextcloud.adm.auro.re +stream.adm.auro.re +re2o-server.adm.auro.re +re2o-ldap.adm.auro.re +re2o-db.adm.auro.re +pendragon.adm.auro.re +services-bdd-local.adm.auro.re +backup.adm.auro.re +services-web.adm.auro.re +mail.adm.auro.re +wikijs.adm.auro.re + + +############################################################################### +# OVH + +[ovh_pve] +horus.adm.auro.re + +[ovh_container] synapse.adm.auro.re -codimd.adm.auro.re services-bdd.adm.auro.re phabricator.adm.auro.re wiki.adm.auro.re www.adm.auro.re -pad.adm.auro.re -proxy.adm.auro.re +proxy-ovh.adm.auro.re +matrix-services.adm.auro.re -[ovh-vm] -re2o-server.adm.auro.re -re2o-ldap.adm.auro.re -re2o-db.adm.auro.re +[ovh_vm] serge.adm.auro.re +passbolt.adm.auro.re +vpn-ovh.adm.auro.re +docker-ovh.adm.auro.re +switchs-manager.adm.auro.re +ldap-replica-ovh.adm.auro.re -[ovh-testing-vm] -re2o-test.adm.auro.re +[ovh_testing_vm] +#re2o-test.adm.auro.re -[fleming-pve] -#freya.adm.auro.re -#odin.adm.auro.re -[fleming-vm-ldap-replica] -#ldap-replica-fleming1.adm.auro.re -#ldap-replica-fleming2.adm.auro.re +############################################################################### +# Les Jardins de Fleming + +[fleming_pve] +freya.adm.auro.re +marki.adm.auro.re + +[fleming_vm] +ldap-replica-fleming.adm.auro.re +dhcp-fleming.adm.auro.re +dhcp-fleming-backup.adm.auro.re +dns-fleming.adm.auro.re +dns-fleming-backup.adm.auro.re +prometheus-fleming.adm.auro.re +#prometheus-fleming-fo.adm.auro.re +radius-fleming.adm.auro.re +radius-fleming-backup.adm.auro.re +unifi-fleming.adm.auro.re +routeur-fleming.adm.auro.re +routeur-fleming-backup.adm.auro.re + + +############################################################################### +# Pacaterie + +[pacaterie_pve] +mordred.adm.auro.re +titan.adm.auro.re + +[pacaterie_vm] +ldap-replica-pacaterie.adm.auro.re +dhcp-pacaterie.adm.auro.re +dhcp-pacaterie-backup.adm.auro.re +dns-pacaterie.adm.auro.re +dns-pacaterie-backup.adm.auro.re +prometheus-pacaterie.adm.auro.re +#prometheus-pacaterie-fo.adm.auro.re +radius-pacaterie.adm.auro.re +radius-pacaterie-backup.adm.auro.re +unifi-pacaterie.adm.auro.re +routeur-pacaterie.adm.auro.re +routeur-pacaterie-backup.adm.auro.re + + +############################################################################### +# Emilie du Chatelet + +[edc_server] +perceval.adm.auro.re + +[edc_pve] +chapalux.adm.auro.re +escalope.adm.auro.re + +[edc_vm] +routeur-edc.adm.auro.re +routeur-edc-backup.adm.auro.re +dns-edc.adm.auro.re +dns-edc-backup.adm.auro.re +dhcp-edc.adm.auro.re +dhcp-edc-backup.adm.auro.re +unifi-edc.adm.auro.re +radius-edc.adm.auro.re +radius-edc-backup.adm.auro.re +ldap-replica-edc.adm.auro.re + + +############################################################################### +# George Sand + +[gs_pve] +lancelot.adm.auro.re +odin.adm.auro.re + +[gs_vm] +dhcp-gs.adm.auro.re +dhcp-gs-backup.adm.auro.re +dns-gs.adm.auro.re +dns-gs-backup.adm.auro.re +routeur-gs.adm.auro.re +routeur-gs-backup.adm.auro.re +unifi-gs.adm.auro.re +radius-gs.adm.auro.re +radius-gs-backup.adm.auro.re +prometheus-gs.adm.auro.re +ldap-replica-gs.adm.auro.re + +############################################################################### +# Les Rives +[rives_pve] +thor.adm.auro.re + +[rives_vm] +dhcp-rives-backup.adm.auro.re +unifi-rives.adm.auro.re +dns-rives-backup.adm.auro.re +radius-rives-backup.adm.auro.re +routeur-rives-backup.adm.auro.re +ldap-replica-rives.adm.auro.re + +# -aurore services +[aurore:children] +aurore_vm + # everything at ovh [ovh:children] -ovh-pve -ovh-container -ovh-vm - -# everything at ovh-testing -[ovh-testing:children] -ovh-testing-vm +ovh_pve +ovh_container +ovh_vm # everything at fleming [fleming:children] -fleming-pve -fleming-vm-ldap-replica +fleming_pve +fleming_vm +#fleming_unifi + +# everything at pacaterie +[pacaterie:children] +pacaterie_pve +pacaterie_vm +#pacaterie_unifi + +# everything at edc +[edc:children] +edc_pve +edc_vm + +# everything at georgesand +[gs:children] +gs_pve +gs_vm + +# everything at Les Rives +[rives:children] +rives_pve +rives_vm + + +############################################################################### +# Groups by type # every LXC container [container:children] -ovh-container +ovh_container # every virtual machine [vm:children] -ovh-vm -fleming-vm-ldap-replica +ovh_vm +fleming_vm +pacaterie_vm +edc_vm +gs_vm +rives_vm # every PVE [pve:children] -ovh-pve -fleming-pve +ovh_pve +fleming_pve +pacaterie_pve +edc_pve +gs_pve +rives_pve + + +############################################################################### +# Groups by service + +[ldap_replica:children] +ldap_replica_fleming +ldap_replica_pacaterie +ldap_replica_edc +ldap_replica_gs +ldap_replica_ovh +ldap_replica_rives + +[ldap_replica_fleming] +ldap-replica-fleming.adm.auro.re + +[ldap_replica_pacaterie] +ldap-replica-pacaterie.adm.auro.re + +[ldap_replica_edc] +ldap-replica-edc.adm.auro.re + +[ldap_replica_gs] +ldap-replica-gs.adm.auro.re + +[ldap_replica_ovh] +ldap-replica-ovh.adm.auro.re + +[ldap_replica_rives] +ldap-replica-rives.adm.auro.re -# every LDAP replica -[ldap-replica:children] -fleming-vm-ldap-replica diff --git a/ldap.yml b/ldap.yml deleted file mode 100644 index 59d33f3..0000000 --- a/ldap.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Plug LDAP on all servers -- hosts: all - roles: - - ldap-client - -# Clone LDAP on local geographic location -# DON'T DO THIS AS IT RECREATES THE REPLICA -# - hosts: ldap-replica -# roles: -# - ldap-replica diff --git a/ldap_replica.yml b/ldap_replica.yml new file mode 100755 index 0000000..b921957 --- /dev/null +++ b/ldap_replica.yml @@ -0,0 +1,7 @@ +#!/usr/bin/env ansible-playbook +--- +# Clone LDAP on local geographic location +# DON'T DO THIS AS IT RECREATES THE REPLICA +- hosts: ldap_replica + roles: + - ldap_replica diff --git a/matrix.yml b/matrix.yml old mode 100644 new mode 100755 index 1975e50..be54c53 --- a/matrix.yml +++ b/matrix.yml @@ -1,15 +1,18 @@ +#!/usr/bin/env ansible-playbook --- # Install Matrix Synapse on corresponding containers - hosts: synapse.adm.auro.re vars: - mxisd_version: 1.3.1 - synapse_rest_auth_url: https://raw.githubusercontent.com/kamax-matrix/matrix-synapse-rest-auth/master/rest_auth_provider.py + mxisd_releases: https://github.com/kamax-matrix/mxisd/releases + mxisd_deb: "{{ mxisd_releases }}/download/v1.3.1/mxisd_1.3.1_all.deb" roles: - - debian-backports - - matrix-synapse - - matrix-mxisd + - debian_backports + - nodejs + - matrix_synapse + - matrix_appservice_irc + - matrix_appservice_webhooks -# Install Matrix Riot on corresponding containers -- hosts: riot.adm.auro.re +# Install Matrix services +- hosts: matrix-services.adm.auro.re roles: - - matrix-riot + - debian_backports diff --git a/monitoring.yml b/monitoring.yml new file mode 100755 index 0000000..c0c58c8 --- /dev/null +++ b/monitoring.yml @@ -0,0 +1,62 @@ +#!/usr/bin/env ansible-playbook +--- +- hosts: prometheus-fleming.adm.auro.re,prometheus-fleming-fo.adm.auro.re + vars: + prometheus_alertmanager: docker-ovh.adm.auro.re:9093 + snmp_unifi_password: "{{ vault_snmp_unifi_password }}" + + # Prometheus targets.json + prometheus_targets: + - targets: | + {{ groups['fleming_physical'] + groups['fleming_vm'] | list | sort }} + prometheus_unifi_snmp_targets: + - targets: "{{ groups['fleming_unifi'] | list | sort }}" + roles: + - prometheus + +- hosts: prometheus-pacaterie.adm.auro.re,prometheus-pacaterie-fo.adm.auro.re + vars: + prometheus_alertmanager: docker-ovh.adm.auro.re:9093 + snmp_unifi_password: "{{ vault_snmp_unifi_password }}" + + # Prometheus targets.json + prometheus_targets: + - targets: | + {{ groups['pacaterie_physical'] + groups['pacaterie_vm'] | list | sort }} + prometheus_unifi_snmp_targets: + - targets: "{{ groups['pacaterie_unifi'] | list | sort }}" + roles: + - prometheus + +- hosts: prometheus-edc.adm.auro.re,prometheus-edc-fo.adm.auro.re + vars: + prometheus_alertmanager: docker-ovh.adm.auro.re:9093 + snmp_unifi_password: "{{ vault_snmp_unifi_password }}" + + # Prometheus targets.json + prometheus_targets: + - targets: | + {{ groups['edc_physical'] + groups['edc_vm'] | list | sort }} + prometheus_unifi_snmp_targets: + - targets: "{{ groups['edc_unifi'] | list | sort }}" + roles: + - prometheus + +- hosts: prometheus-georgesand.adm.auro.re,prometheus-georgesand-fo.adm.auro.re + vars: + prometheus_alertmanager: docker-ovh.adm.auro.re:9093 + snmp_unifi_password: "{{ vault_snmp_unifi_password }}" + + # Prometheus targets.json + prometheus_targets: + - targets: | + {{ groups['georgesand_physical'] + groups['georgesand_vm'] | list | sort }} + prometheus_unifi_snmp_targets: + - targets: "{{ groups['georgesand_unifi'] | list | sort }}" + roles: + - prometheus + +# Monitor all hosts +- hosts: all,!unifi,!ovh + roles: + - prometheus_node diff --git a/network.yml b/network.yml new file mode 100755 index 0000000..e64d8ff --- /dev/null +++ b/network.yml @@ -0,0 +1,65 @@ +#!/usr/bin/env ansible-playbook +--- +# Set up DHCP servers. +- hosts: dhcp-*.adm.auro.re + roles: + - isc_dhcp_server + + +# Deploy unbound DNS server (recursive). +- hosts: dns-*.adm.auro.re,!dns-aurore*.adm.auro.re + roles: + - unbound + + +# Déploiement du service re2o aurore-firewall et keepalived +# radvd: IPv6 SLAAC (/64 subnets, private IPs). +# Must NOT be on routeur-aurore-*, or will with DHCPv6! +- hosts: ~routeur-(pacaterie|edc|fleming|gs|rives).*\.adm\.auro\.re + roles: + - router + - radvd + +# No radvd here +- hosts: ~routeur-aurore.*\.adm\.auro\.re + roles: + - router + - ipv6_edge_router + +# Radius (backup only for now) +- hosts: radius-*.adm.auro.re + roles: + - radius + + +# WIP: Deploy authoritative DNS servers +# - hosts: authoritative_dns +# vars: +# service_repo: https://gitlab.crans.org/nounous/re2o-dns.git +# service_name: dns +# service_version: crans +# service_config: +# hostname: re2o-server.adm.auro.re +# username: service-user +# password: "{{ vault_serviceuser_passwd }}" +# roles: +# - re2o-service + + +# Deploy Unifi Controller +# - hosts: unifi-fleming.adm.auro.re,unifi-pacaterie.adm.auro.re +# roles: +# - unifi-controller + +# Deploy Re2o switch service +# - hosts: switchs-manager.adm.auro.re +# vars: +# service_repo: https://gitlab.federez.net/re2o/switchs.git +# service_name: switchs +# service_version: master +# service_config: +# hostname: re2o-server.adm.auro.re +# username: service-user +# password: "{{ vault_serviceuser_passwd }}" +# roles: +# - re2o-service diff --git a/nginx-reverse-proxy.yml b/nginx-reverse-proxy.yml deleted file mode 100644 index 426e66b..0000000 --- a/nginx-reverse-proxy.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -# Install NGINX with reverse proxy conf -- hosts: proxy.adm.auro.re - vars: - reversed_proxy_subdomains: - - name: re2o - from: re2o.auro.re - to: re2o-server.adm.auro.re - - name: intranet - from: intranet.auro.re - to: re2o-server.adm.auro.re - - name: pad - from: pad.auro.re - to: pad.adm.auro.re:9001 - - name: phabricator - from: phabricator.auro.re - to: phabricator.adm.auro.re - - name: wiki - from: wiki.auro.re - to: wiki.adm.auro.re - - name: www - from: www.auro.re - to: www.adm.auro.re - - name: re2o-test - from: re2o-test.auro.re - to: re2o-test.adm.auro.re - - name: riot - from: riot.auro.re - to: riot.adm.auro.re - - name: codimd - from: codimd.auro.re - to: codimd.adm.auro.re:8080 - roles: - - nginx-reverse-proxy diff --git a/nuke_radius_dbs.yml b/nuke_radius_dbs.yml new file mode 100755 index 0000000..4af58c2 --- /dev/null +++ b/nuke_radius_dbs.yml @@ -0,0 +1,7 @@ +#!/usr/bin/env ansible-playbook +--- +- hosts: radius-*.adm.auro.re + roles: + - radius + vars: + nuke_radius: true diff --git a/proxmox.yml b/proxmox.yml new file mode 100755 index 0000000..15b62ca --- /dev/null +++ b/proxmox.yml @@ -0,0 +1,432 @@ +#!/usr/bin/env ansible-playbook +--- +# This is a special playbook to create a new VM ! +- hosts: proxy.adm.auro.re # Host with python-proxmoxer and python-requests + become: false # We do not need root as we use Proxmox API + + vars: + vm_definitions: + + # Réseau Pacaterie + - name: ldap-replica-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: unifi-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-9.9.0-amd64-netinst.iso + + # Réseau Fleming + - name: ldap-replica-fleming1 + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: unifi-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-9.9.0-amd64-netinst.iso + + # Réseau EdC + - name: ldap-replica-edc1 + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: unifi-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-9.9.0-amd64-netinst.iso + + # Réseau George Sand + - name: ldap-replica-gs1 + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-gs + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-gs + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-gs + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-gs + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: unifi-gs + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-9.9.0-amd64-netinst.iso + + vars_prompt: + - name: "password" + prompt: "Enter LDAP password for your user" + private: true + + tasks: + - name: Define a virtual machine in Proxmox + proxmox_kvm: + api_user: "{{ ansible_user_id }}@pam" + api_password: "{{ password }}" + api_host: "{{ item.virtu }}.adm.auro.re" + name: "{{ item.name }}" + node: "{{ item.virtu }}" + scsihw: virtio-scsi-pci + scsi: '{"scsi0":"{{ item.virtu }}:{{ item.disksize }},format=raw"}' + sata: '{"sata0":"local:iso/{{ item.installiso }},media=cdrom"}' + net: '{"net0":"virtio,bridge=vmbr2"}' # Adm only by default + cores: "{{ item.cores }}" + memory: "{{ item.memory }}" + balloon: "{{ item.memory // 2 }}" + bios: seabios # Ansible module doesn't support UEFI boot disk + loop: + # Réseau Fleming + - name: ldap-replica-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: unifi-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-9.9.0-amd64-netinst.iso + - name: routeur-fleming + virtu: freya + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + + - name: ldap-replica-fleming-fo + virtu: marki + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-fleming-fo + virtu: marki + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-fleming-fo + virtu: marki + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-fleming-fo + virtu: marki + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-fleming-fo + virtu: marki + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: routeur-fleming-fo + virtu: marki + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + + # Réseau Pacaterie + - name: ldap-replica-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: unifi-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-9.9.0-amd64-netinst.iso + - name: routeur-pacaterie + virtu: mordred + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + + - name: ldap-replica-pacaterie-fo + virtu: titan + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-pacaterie-fo + virtu: titan + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-pacaterie-fo + virtu: titan + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-pacaterie-fo + virtu: titan + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-pacaterie-fo + virtu: titan + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: routeur-pacaterie-fo + virtu: titan + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + + # Réseau EDC + - name: ldap-replica-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: unifi-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-9.9.0-amd64-netinst.iso + - name: routeur-edc + virtu: chapalux + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + + # Réseau George Sand + - name: ldap-replica-georgesand + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dhcp-georgesand + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: dns-georgesand + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: prometheus-georgesand + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: radius-georgesand + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso + - name: unifi-georgesand + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-9.9.0-amd64-netinst.iso + - name: routeur-georgesand + virtu: perceval + cores: 2 # 2 mimimum, 10 maximum + memory: 1024 # M + disksize: 16 # G + installiso: debian-10.0.0-amd64-netinst.iso diff --git a/roles/baseconfig/files/update-motd.d/00-logo b/roles/baseconfig/files/update-motd.d/00-logo index 0a78ea0..025257f 100755 --- a/roles/baseconfig/files/update-motd.d/00-logo +++ b/roles/baseconfig/files/update-motd.d/00-logo @@ -1,6 +1,5 @@ #!/bin/sh -# /etc/update-motd.d/00-logo -# Deployed with Aurore Ansible ! +# {{ ansible_managed }} # Pretty uptime upSeconds="$(/usr/bin/cut -d. -f1 /proc/uptime)" diff --git a/roles/baseconfig/files/update-motd.d/10-uname b/roles/baseconfig/files/update-motd.d/10-uname new file mode 100755 index 0000000..4586095 --- /dev/null +++ b/roles/baseconfig/files/update-motd.d/10-uname @@ -0,0 +1,3 @@ +#!/bin/sh +# {{ ansible_managed }} +uname -snrvm diff --git a/roles/baseconfig/tasks/apt-listchanges.yml b/roles/baseconfig/tasks/apt-listchanges.yml index 56925b2..b4d6214 100644 --- a/roles/baseconfig/tasks/apt-listchanges.yml +++ b/roles/baseconfig/tasks/apt-listchanges.yml @@ -6,14 +6,27 @@ name: apt-listchanges state: present update_cache: true + register: apt_result + retries: 3 + until: apt_result is succeeded # Send email when there is something new - name: Configure apt-listchanges - lineinfile: - dest: /etc/apt/listchanges.conf - regexp: "^{{ item.key }}=" - line: "{{ item.value }}" - with_dict: - confirm: 'confirm=true' - email_address: "email_address={{ monitoring_mail }}" - which: 'which=both' + ini_file: + path: /etc/apt/listchanges.conf + no_extra_spaces: true + section: apt + option: "{{ item.option }}" + value: "{{ item.value }}" + state: present + mode: 0644 + loop: + - option: confirm + value: "true" + + - option: email_address + value: "{{ monitoring_mail }}" + + - option: which + value: both +... diff --git a/roles/baseconfig/tasks/main.yml b/roles/baseconfig/tasks/main.yml index 27fbd03..eb62226 100644 --- a/roles/baseconfig/tasks/main.yml +++ b/roles/baseconfig/tasks/main.yml @@ -3,29 +3,48 @@ - name: Install basic tools when: ansible_os_family == "Debian" apt: - name: "{{ packages }}" - state: present + name: + - acl # advanced ACL + - apt # better than apt-get + - aptitude # nice to have for Ansible + - bash-completion # because bash + - curl # Better that wget + - emacs-nox # for maman + - fish # to motivate @edpibu + - git # code versioning + - less # i like cats + - lsb-release + - htop # better than top + - iotop # monitor i/o + - oidentd # postgresql identification + - molly-guard # prevent reboot + - nano # for vulcain + - net-tools + - ntp # network time sync + - screen # Vulcain asked for this + - sudo + - tree # create a graphical tree of files + - vim # better than nano + - zsh # to be able to ssh @erdnaxe update_cache: true - vars: - packages: - - bash-completion # for bash users - - zsh # alternative shell - - sudo # to gain root access - - git # code versioning - - nano # basic text editor - - vim # like nano but more powerful and complex - - htop # better than top - - less # i like cats - - tree # create a graphical tree of files - - ipython # better Python shell - - acl # for Ansible become support + register: apt_result + retries: 3 + until: apt_result is succeeded # Pimp my server - name: Customize motd copy: - src: 'update-motd.d/00-logo' - dest: '/etc/update-motd.d/00-logo' + src: "update-motd.d/{{ item }}" + dest: "/etc/update-motd.d/{{ item }}" mode: 0755 + loop: + - 00-logo + - 10-uname + +- name: Remove Debian warranty motd + file: + path: /etc/motd + state: absent # Configure APT mirrors on Debian Stretch - name: Configure APT mirrors @@ -33,8 +52,8 @@ - ansible_distribution == 'Debian' - ansible_distribution_release == 'stretch' template: - src: 'apt/sources.list.j2' - dest: '/etc/apt/sources.list' + src: apt/sources.list.j2 + dest: /etc/apt/sources.list mode: 0644 # Patriotisme @@ -50,11 +69,7 @@ question: locales/default_environment_locale value: fr_FR.UTF-8 vtype: select - notify: - - Reconfigure locales - -# Molly-Guard : prevent accidental shutdowns -- include_tasks: molly-guard.yml + notify: Reconfigure locales # APT-List Changes : send email with changelog - include_tasks: apt-listchanges.yml @@ -62,8 +77,15 @@ # User skeleton - name: Configure user skeleton copy: - src: skel/{{ item.key }} - dest: /etc/skel/{{ item.value }} - with_dict: - dot_zshrc: .zshrc - dot_zshrc.local: .zshrc.local + src: "skel/dot_{{ item }}" + dest: "/etc/skel/.{{ item }}" + mode: 0644 + loop: + - zshrc + - zshrc.local + +- name: Configure resolvconf + template: + src: resolv.conf + dest: /etc/resolv.conf + mode: 0644 diff --git a/roles/baseconfig/tasks/molly-guard.yml b/roles/baseconfig/tasks/molly-guard.yml deleted file mode 100644 index bee8e12..0000000 --- a/roles/baseconfig/tasks/molly-guard.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -# Install molly-guard -- name: Install molly-guard - when: ansible_os_family == "Debian" - apt: - name: molly-guard - state: present - update_cache: true - -# Always ask for hostname -- name: Configure molly-guard - lineinfile: - dest: /etc/molly-guard/rc - regexp: '^#*\s*ALWAYS_QUERY_HOSTNAME.*$' - line: 'ALWAYS_QUERY_HOSTNAME=true' diff --git a/roles/baseconfig/templates/resolv.conf b/roles/baseconfig/templates/resolv.conf new file mode 100644 index 0000000..935eeeb --- /dev/null +++ b/roles/baseconfig/templates/resolv.conf @@ -0,0 +1,4 @@ +domain adm.auro.re +nameserver 10.128.0.253 +nameserver 2a09:6840:128::253 +nameserver 80.67.169.12 diff --git a/roles/basesecurity/handlers/main.yml b/roles/basesecurity/handlers/main.yml new file mode 100644 index 0000000..4891585 --- /dev/null +++ b/roles/basesecurity/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Restart sshd service + service: + name: sshd + state: restarted + +- name: Restart fail2ban service + service: + name: fail2ban + state: restarted diff --git a/roles/basesecurity/tasks/main.yml b/roles/basesecurity/tasks/main.yml new file mode 100644 index 0000000..a0c15b6 --- /dev/null +++ b/roles/basesecurity/tasks/main.yml @@ -0,0 +1,87 @@ +--- +- name: Configure sysctl + template: + src: sysctl.d/local.conf.j2 + dest: /etc/sysctl.d/local.conf + mode: 0644 + +# Use this command to list setuid or setgid executables +# find / -type f -perm /6000 -ls 2>/dev/null +- name: Desactivate setuid/setgid on unused binaries + file: + path: "{{ item }}" + mode: u-s,g-s + loop: + - /usr/lib/openssh/sshkeysign # Not used + - /usr/bin/gpasswd # No group auth + - /usr/bin/passwd # Only root should change passwd + - /usr/bin/expiry # With re2o + - /usr/bin/newgrp # No group auth + - /usr/bin/chage # With re2o + - /usr/bin/chsh # With re2o + - /usr/bin/chfn # With re2o + - /bin/mount # Only root should mount + - /bin/umount # Only root should umount + ignore_errors: true # Sometimes file won't exist + +# Only SSH keys to log on root +- name: Prohibit root SSH with password + lineinfile: + dest: /etc/ssh/sshd_config + regexp: '^{{ item.0 }}' + insertafter: '^#{{ item.0 }}' + line: '{{ item.0 }} {{ item.1 }}' + loop: + - ["PermitRootLogin", "prohibit-password"] + - ["AllowAgentForwarding", "no"] + - ["X11Forwarding", "no"] + - ["TCPKeepAlive", "yes"] + notify: Restart sshd service + +# See banned client with `fail2ban-client status sshd` +- name: Install fail2ban + apt: + name: fail2ban + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded + +- name: Configure fail2ban + ini_file: + path: /etc/fail2ban/jail.d/local.conf + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + state: present + mode: 0644 + notify: Restart fail2ban service + loop: + - section: sshd + option: ignoreip + value: 10.128.0.254 # Whitelist bastion + + - section: sshd + option: enabled + value: "true" + + - section: sshd + option: bantime + value: 600 + + - section: sshd + option: findtime + value: 600 + + - section: sshd + option: maxretry + value: 5 + +# See altered packages and configurations with `debsums -ca` +- name: Install debsums + apt: + name: debsums + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded diff --git a/roles/basesecurity/templates/sysctl.d/local.conf.j2 b/roles/basesecurity/templates/sysctl.d/local.conf.j2 new file mode 100644 index 0000000..663a4e9 --- /dev/null +++ b/roles/basesecurity/templates/sysctl.d/local.conf.j2 @@ -0,0 +1,13 @@ +# {{ ansible_managed }} +# See https://www.ssi.gouv.fr/uploads/2016/01/linux_configuration-fr-v1.2.pdf + +# Disable core dump of setuid executables +# So an user can't read privileged information in memory +fs.suid_dumpable = 0 + +# Obfuscate kernel memory addresses +kernel.kptr_restrict = 1 + +# Restrict dmesg access +# This can leak specific harware failures to exploit +kernel.dmesg_restrict = 1 diff --git a/roles/certbot/tasks/main.yml b/roles/certbot/tasks/main.yml new file mode 100644 index 0000000..f29d557 --- /dev/null +++ b/roles/certbot/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Install certbot and nginx plugin + apt: + update_cache: true + name: + - certbot + - python3-certbot-nginx + register: pkg_result + retries: 3 + until: pkg_result is succeeded + +- name: Create /etc/letsencrypt/conf.d + file: + path: /etc/letsencrypt/conf.d + state: directory + mode: 0755 + +- name: Add Certbot configuration + template: + src: "letsencrypt/conf.d/certname.ini.j2" + dest: "/etc/letsencrypt/conf.d/{{ certbot.certname }}.ini" + mode: 0644 + register: certbot_config + +- name: Stop services to allow certbot to generate a cert. + service: + name: nginx + state: stopped + when: certbot_config.changed + +- name: Generate new certificate if the configuration changed + shell: "certbot certonly --non-interactive --config /etc/letsencrypt/conf.d/{{ certbot.certname }}.ini" + when: certbot_config.changed + +- name: Restart services to allow certbot to generate a cert. + service: + name: nginx + state: started + when: certbot_config.changed diff --git a/roles/certbot/templates/letsencrypt/conf.d/certname.ini.j2 b/roles/certbot/templates/letsencrypt/conf.d/certname.ini.j2 new file mode 100644 index 0000000..c23d930 --- /dev/null +++ b/roles/certbot/templates/letsencrypt/conf.d/certname.ini.j2 @@ -0,0 +1,23 @@ +# {{ ansible_managed }} + +# Pour appliquer cette conf et générer la conf de renewal : +# certbot --config /etc/letsencrypt/conf.d/{{ certbot.certname }}.ini certonly + +# Use a 4096 bit RSA key instead of 2048 +rsa-key-size = 4096 + +# Always use the staging/testing server +# server = https://acme-staging.api.letsencrypt.org/directory + +# Uncomment and update to register with the specified e-mail address +email = {{ certbot.mail }} + +# Uncomment to use a text interface instead of ncurses +text = True + +# Use nginx challenge +authenticator = nginx + +# Wildcard the domain +cert-name = {{ certbot.certname }} +domains = {{ ", ".join(certbot.domains) }} diff --git a/roles/codimd/handlers/main.yml b/roles/codimd/handlers/main.yml deleted file mode 100644 index 3056062..0000000 --- a/roles/codimd/handlers/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Build front-end bundle -# This can take very long and requires > 2GB of RAM -- name: Build front-end for CodiMD - command: NODE_ENV="production" yarn run build - args: - chdir: /var/local/codimd/codimd - become: true - become_user: codimd - -# Reload systemd daemons when a service file changes -- name: Reload systemd daemons - command: systemctl daemon-reload diff --git a/roles/codimd/tasks/0_apt_dependencies.yml b/roles/codimd/tasks/0_apt_dependencies.yml deleted file mode 100644 index d9bc109..0000000 --- a/roles/codimd/tasks/0_apt_dependencies.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# For NodeJS package -- name: Configure NodeJS pin - when: - - ansible_distribution == 'Debian' - - ansible_distribution_release == 'stretch' - template: - src: apt/nodejs.j2 - dest: /etc/apt/preferences.d/nodejs - mode: 0644 - -# TODO -# apt-transport-https -# curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - -# echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list - -# Install CodiMD dependencies -- name: Install required packages - apt: - name: "{{ item }}" - state: present - update_cache: true - with_items: - - git - - nodejs - - npm - - build-essential - - yarn diff --git a/roles/codimd/tasks/1_user_group.yml b/roles/codimd/tasks/1_user_group.yml deleted file mode 100644 index 4d79ee0..0000000 --- a/roles/codimd/tasks/1_user_group.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Security #1 -- name: Create CodiMD system group - group: - name: codimd - system: yes - state: present - -# Security #2 -- name: Create CodiMD user - user: - name: codimd - group: codimd - home: /var/local/codimd - comment: CodiMD - system: yes - state: present - -# Security #3 -- name: Secure CodiMD home directory - file: - path: /var/local/codimd - state: directory - owner: codimd - group: codimd - mode: 0750 diff --git a/roles/codimd/tasks/main.yml b/roles/codimd/tasks/main.yml deleted file mode 100644 index 4e80f3c..0000000 --- a/roles/codimd/tasks/main.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -# Install APT dependencies -- include_tasks: 0_apt_dependencies.yml - -# Create CodiMD user and group -- include_tasks: 1_user_group.yml - -# Download CodiMD -- name: Clone CodiMD project - git: - repo: https://github.com/hackmdio/codimd.git - dest: /var/local/codimd/codimd - version: 1.3.0 - become: true - become_user: codimd - notify: Build front-end for CodiMD - -# Setup dependencies and configs -- name: Install CodiMD depedencies - command: NODE_ENV="production" bin/setup - args: - chdir: /var/local/codimd/codimd - become: true - become_user: codimd - -# Connection to database -- name: Connect CodiMD to PostgreSQL db - template: - src: sequelizerc.j2 - dest: /var/local/codimd/codimd/.sequelizerc - owner: codimd - group: codimd - mode: 0600 - -# Configure -- name: Configure CodiMD - template: - src: config.json.j2 - dest: /var/local/codimd/codimd/config.json - owner: codimd - group: codimd - mode: 0600 - -# Service file -- name: Install CodiMD systemd unit - template: - src: 'systemd/codimd.service.j2' - dest: '/etc/systemd/system/codimd.service' - owner: root - group: root - mode: 0644 - notify: Reload systemd daemons - -# Run -- name: Ensure that CodiMD is started - service: - name: codimd - state: started - enabled: true diff --git a/roles/codimd/templates/config.json.j2 b/roles/codimd/templates/config.json.j2 deleted file mode 100644 index fc02978..0000000 --- a/roles/codimd/templates/config.json.j2 +++ /dev/null @@ -1,39 +0,0 @@ -{ - "production": { - "domain": "codimd.auro.re", - "debug": false, - "port": 8080, - "useSSL": false, - "protocolUseSSL": true, - "useCDN": false, - "csp": { - "enable": true, - "directives": { - }, - "upgradeInsecureRequests": "auto", - "addDefaults": true - }, - "db": { - "username": "codimd", - "password": "{{ postgresql_codimd_passwd }}", - "database": "codimd", - "host": "{{ postgresql_services_url }}", - "port": "5432", - "dialect": "postgres" - }, - "email": false, - "ldap": { - "url": "{{ ldap_master_uri }}", - "bindDn": "{{ ldap_codimd_bind_dn }}", - "bindCredentials": "{{ ldap_codimd_password }}", - "searchBase": "cn=Utilisateurs,dc=auro,dc=re", - "searchFilter": "(uid={% raw %}{{username}}{% endraw %})", - "searchAttributes": ["uid", "givenName", "mail"], - "usernameField": "uid", - "useridField": "uid", - "providerName": "Compte Aurore" - }, - "allowFreeURL": true - } -} - diff --git a/roles/codimd/templates/sequelizerc.j2 b/roles/codimd/templates/sequelizerc.j2 deleted file mode 100644 index e497bf6..0000000 --- a/roles/codimd/templates/sequelizerc.j2 +++ /dev/null @@ -1,8 +0,0 @@ -var path = require('path'); - -module.exports = { - 'config': path.resolve('config.json'), - 'migrations-path': path.resolve('lib', 'migrations'), - 'models-path': path.resolve('lib', 'models'), - 'url': 'postgres://codimd:{{ postgresql_codimd_passwd }}@{{ postgresql_services_url }}:5432/codimd' -} diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml new file mode 100644 index 0000000..66eed8c --- /dev/null +++ b/roles/docker/tasks/main.yml @@ -0,0 +1,58 @@ +--- +# Install HTTPS support for APT +- name: Install apt-transport-https + apt: + update_cache: true + name: + - apt-transport-https + - ca-certificates + - curl + - gnupg2 + - software-properties-common + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded + +# Add the key +- name: Configure the apt key + apt_key: + url: https://download.docker.com/linux/debian/gpg + id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 + state: present + register: apt_key_result + retries: 3 + until: apt_key_result is succeeded + +# Add the repository into source list +- name: Configure docker repository + apt_repository: + repo: "{{ item }}" + state: present + loop: + - deb https://download.docker.com/linux/debian buster stable + +- name: Install docker + apt: + update_cache: true + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded + +# Compose need to be updated to use new Docker features +- name: Install Docker Compose + get_url: + url: https://github.com/docker/compose/releases/download/1.24.1/docker-compose-Linux-x86_64 + dest: /usr/local/bin/docker-compose + mode: "0755" + +- name: Indicate role in motd + template: + src: update-motd.d/05-service.j2 + dest: /etc/update-motd.d/05-docker + mode: 0755 diff --git a/roles/docker/templates/update-motd.d/05-service.j2 b/roles/docker/templates/update-motd.d/05-service.j2 new file mode 100755 index 0000000..ce5faaa --- /dev/null +++ b/roles/docker/templates/update-motd.d/05-service.j2 @@ -0,0 +1,3 @@ +#!/bin/sh +# {{ ansible_managed }} +echo "> Les recettes Docker-compose se trouvent dans /var/local/ansible-docker" diff --git a/roles/dokuwiki/tasks/main.yml b/roles/dokuwiki/tasks/main.yml index 0da632d..76321ba 100644 --- a/roles/dokuwiki/tasks/main.yml +++ b/roles/dokuwiki/tasks/main.yml @@ -22,5 +22,9 @@ # Install - name: Install DokuWiki apt: - name: dokuwiki update_cache: true + name: dokuwiki + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded diff --git a/roles/etherpad/handlers/main.yml b/roles/etherpad/handlers/main.yml deleted file mode 100644 index ac3842d..0000000 --- a/roles/etherpad/handlers/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# Reload systemd daemons when a service file changes -- name: Reload systemd daemons - command: systemctl daemon-reload diff --git a/roles/etherpad/tasks/1_user_group.yml b/roles/etherpad/tasks/1_user_group.yml deleted file mode 100644 index 8c8110c..0000000 --- a/roles/etherpad/tasks/1_user_group.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Security #1 -- name: Create EtherPad system group - group: - name: etherpad - system: yes - state: present - -# Security #2 -- name: Create EtherPad user - user: - name: etherpad - group: etherpad - home: /var/local/etherpad - comment: EtherPad - system: yes - state: present - -# Security #3 -- name: Secure Etherpad home directory - file: - path: /var/local/etherpad - state: directory - owner: etherpad - group: etherpad - mode: 0750 diff --git a/roles/etherpad/tasks/main.yml b/roles/etherpad/tasks/main.yml deleted file mode 100644 index a6dea45..0000000 --- a/roles/etherpad/tasks/main.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -# Install APT dependencies -- include_tasks: 0_apt_dependencies.yml - -# Create EtherPad user and group -- include_tasks: 1_user_group.yml - -# Download EtherPad -- name: Clone EtherPad project - git: - repo: https://github.com/ether/etherpad-lite.git - dest: /var/local/etherpad/etherpad-lite - version: master - become: true - become_user: etherpad - -# Installation script -# TODO: move this in a handler -- name: Install Etherpad dependencies - command: bin/installDeps.sh - args: - chdir: /var/local/etherpad/etherpad-lite - become: true - become_user: etherpad - -# Configuration -- name: Configure EtherPad - lineinfile: - dest: /var/local/etherpad/etherpad-lite/settings.json - regexp: '^\s*"{{ item.key }}"' - line: "{{ item.value }}" - with_dict: - title: " \"title\": \"Etherpad Aurore\"," - dbType: " \"dbType\" : \"postgres\"," - defaultPadText: " \"defaultPadText\" : \"Bienvenue sur l'EtherPad d'Aurore !\\n\\nCe pad est synchronisé avec les autres utilisateur·rice·s présent·e·s sur cette page.\\n\"," - lang: " \"lang\": \"fr-fr\"" - -# Service file -- name: Install EtherPad systemd unit - template: - src: systemd/etherpad-lite.service.j2 - dest: /etc/systemd/system/etherpad-lite.service - owner: root - group: root - mode: 0644 - notify: Reload systemd daemons - -# Run -- name: Ensure that EtherPad is started - service: - name: etherpad-lite - state: started - enabled: True - -# La configuration de la clé `dbSettings` n'est pas encore automatisé ! - -# TODO-list -# * Configure admin user, logs -# Plugins : https://framacloud.org/fr/cultiver-son-jardin/etherpad.html#concernant-framapad diff --git a/roles/etherpad/templates/apt/nodejs.j2 b/roles/etherpad/templates/apt/nodejs.j2 deleted file mode 100644 index 65e5110..0000000 --- a/roles/etherpad/templates/apt/nodejs.j2 +++ /dev/null @@ -1,5 +0,0 @@ -# {{ ansible_managed }} - -Package: node* libuv1* -Pin: release a=stretch-backports -Pin-Priority: 600 diff --git a/roles/etherpad/templates/systemd/etherpad-lite.service.j2 b/roles/etherpad/templates/systemd/etherpad-lite.service.j2 deleted file mode 100644 index fcaf95d..0000000 --- a/roles/etherpad/templates/systemd/etherpad-lite.service.j2 +++ /dev/null @@ -1,17 +0,0 @@ -# {{ ansible_managed }} - -[Unit] -Description=Etherpad-lite, the collaborative editor. -After=syslog.target network-online.target mysql.service postgresql.service -Conflicts=shutdown.target - -[Service] -Type=simple -User=etherpad -Group=etherpad -WorkingDirectory=/var/local/etherpad/etherpad-lite -ExecStart=/usr/bin/nodejs /var/local/etherpad/etherpad-lite/node_modules/ep_etherpad-lite/node/server.js -Restart=always - -[Install] -WantedBy=multi-user.target diff --git a/roles/ipv6_edge_router/frr-apt-key.asc b/roles/ipv6_edge_router/frr-apt-key.asc new file mode 100644 index 0000000..3c311f8 --- /dev/null +++ b/roles/ipv6_edge_router/frr-apt-key.asc @@ -0,0 +1,186 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFvRo7oBEADH/lEeQBaRW4Lpmzhpn7W53hhMUefgj1bJ7ISpMC3qOlgSIeof +sQjZ5Hr0RHxz5bRVRtcOhPhKRvL0wCmTpROvKBVyrOHDn4AAh+D7bqhzrEZezJwu +on2fBRA5prT97r99WKpIPjyqeKHWY3GsbkKMYAcFMGNwYZudEm9bqFaZ9F1CX96i +VHTArZiZZgPPycOW6fZzrdPDa5/07WA4tJ4PXnMFEd3bLpRDW/t46XqBeNOitBcN +TrRY7LY/rLnfAUfTWlQVm1wb5gl1E0e4LDlaAysqZCVDriAUwNzk9aRnLQw14h18 +af3sIi649fQ/uv/JwQ9hc1os/gu23N4wKSwSvQGYo3V6oqbxkhIQ5TR0MgXIxfF9 +LoSFgnrXvUpUc+V4qXJJV+hLbTEoAKrHaON0f7BQHAiTsKB1R7FLVCMFIRtuZ1RD +iUCL9jFFXmAikHsTUFE2EOCW7+kqRSQ5ICu3IqMbXXA1dHz4tN2ji5LPZ4OKh/1O +zZQCBev4IZ3KWibFZwNxDwWFSoFQeuNKnVujsfR31SuFRWmASqZGGpN/Jr+zVNsb +iUXUBrnSj8PXYs1zSLrfVVlaga6EI29o5ozUweZDvn5VnRycHaTVjVEmYynnf0ss +axkRKDgP4e0czNTbH9Rze+AL/Xfc5F0CVQ3jGZQwLgspqpj2UNicZhTzQwARAQAB +tCtEYXZpZCBMYW1wYXJ0ZXIgPGVxdWlub3gtZGViaWFuQGRpYWMyNC5uZXQ+iQJO +BBMBCAA4FiEEPZlorJrnvhFpKI3bH9WDmJX1f9oFAlvRo7oCGwEFCwkIBwIGFQgJ +CgsCBBYCAwECHgECF4AACgkQH9WDmJX1f9pNHg//VS3bICTNEjjmXRtHdsKyRs2s +Nl6BefYDuOPy6NWIra9oLZzo1G15Zt8wH1LLHIBND1d8QILa1739coQhfNJeeuyp +sYclgSoX85UqpLeHE0Ws/o1vjNmAlQX7qDR5q1iOxUfxLjyXAR7qaqOCBR0uGjxP +ZCI88ctu0bt9iI2rzmKwgyORDWwvKOmHovHxB8stPwdToyQK/eij94CVlf086pOz +eIrEjC54jE4pq7nae8w7RsWs5OmgBkrZoXIuLBfHa1ynbUjhE3okPiZDnZr9bPTH +FpJ4DnsQGhZGjFIiNVi5zbV+MxjavkfbshpzE1TK9EhNf1DdI8A+XzpiTfA5ifDL +sm/KnA9Z+4T2EswthB6YV1lcnacSGOrEI1CQUTHPSFwZc1WUDkX5aqwib8fCT6U7 +oEngVBwN+guj5l2ba50pt1bct86c8Rv0cnaeKt6boe9sLeHbAur/R7Smdp0yIwAa +pq52eSQvrxkV2sKlvOrBLX0v4hOut4LQKzresM0smjARYamh3ksj7oAaHJx1+RMZ +AK7i2AjcMR4BvALTerVd2oM4SNghSFubJTVoMUarzeM3XQ6mFGbdwsqo6ziPlr2r +vtX7syFclRXaeJw4VAQqXlBqbpZevld7A9/3G9CyuRSoQxgPv9p6fx3aE7R65O9U +YsBsMtj2oxhKnkNjoky5AQ0EW9Gj4AEIALrNBXS0J+LAtQjWfJUwp9KsXCYx/1fL +YDENUdkbwfCTDHPZFgZf0jvPFuQkvFl7SnoyjwbnDlFCn2kYeZJ1vS3ZidUwZbcE +QCrARSKBzovsHDdafQwuUi21GAGuBOmIUSY5RihozjLgZ/5h2/vbqmCucfoYsctb +tl3jpT8HTo6DJ4oQWSsHF5e4G8U5DCpCINbJnpqtfIFbm3yYGHm9Yzny4E2aMnzG +lHErxxAoYufGLh6Hfs1JeJSsWL07334NZMU/zgzUs9dBbhbJ0/QBnRVuU+YHje+x +9Ir+szHjKwHo29K6g3BV2BTjWpoW7IQG2d6baN1VgWepwpLnbzAG5wMAEQEAAYkD +bAQYAQgAIBYhBD2ZaKya574RaSiN2x/Vg5iV9X/aBQJb0aPgAhsCAUAJEB/Vg5iV +9X/awHQgBBkBCAAdFiEEN1UvZYKIwg4j4yPMVBjykdDUoaoFAlvRo+AACgkQVBjy +kdDUoarcggf/S3Vd9BqByRkCyuPLwgKWLt3KsIuKOKG9+lzoAy2VsKOomistO3g0 +itefSRUOGgSArVG/rarR0Dzva3LI7sFF9vS4XKlARSPJV0rY13buSR/LnagqmWUf +mQJTnh+MSWS6P37Burw0DqWioPd7VJQ67BfdrGUUeP8bChIPByo+ssi1qu2MFmLj +toYiLSYW0gRSKtn8+oz5hk1lzuQBBTZ14ykqwZH9L1kCo+3Q7O7e1dztJ6NX6jEm +QeHwLq27RqoUG15HR7CQvupa5CLbJ0Vja2tSkUnYb/ph8z7H9rkHz4qjKQWI1QoC +jLkiyrdDeWqVWfpwGhoAryBlWKn51T9j6NecEAC5WojJF6xqYFiiT/V7ekmMKZ0l +PA/IwW12U+ZP2EFVbqXjwBj3Mqx4NshNdRiWsl24ulIuNpmi6I3MJzx/1sfafGHl +mq7n2zv0Cky37M28tYoDOt5fzSLYn9cgo/OzhS3D05ARbHP+ofcXDz+So+mj8wQb +uW2sh9ToaiYOMzGqyMR0DFO6++FdIYzphN0sPyJBdfGeePNajV6+xhdS4zktWEGq +QaF2XukTGxodJ3J3poeCarfK9ubmkemLRJ1Q+ynlx5KNzvt4Ut1pEO+OXkYOxGfI +8gUuj3BXICVP3UVpB7RaqW5obz8zqQkskRqIBBrLoX+Dl+l4sID20BmW028xurkf +ef3lNfLGTat4RleypLrcVZ4CMvAM/KOLInrXEoFqIKLiwnlAp6RK3mRL1IURyOtO +WENn6w0DuD3yyQVglQfNft6TqaMjVxFjh2fDgWvISMe7x4Jp+EWljwBnpi+TtnG/ +P99J2sGb9Hwu6gC45mQ1Ufoe+suYuehSxAWNz00GzBS9XU1xRs00xLCjPNPhSjHO +MqmGdm3cSeFdcmp7JRM05RtDOeBYAZuDV/HZNQu4XG2gHUv1xbuIqwKqN4vRMrI2 +8fWRdN2sPNlULTjeeMpxy01lfwilvVkXRJKyCPCx9MWZfJ0qbFeEC/cDOonx34lK +mBW0B2Otoah+Em6d5bkBDQRb0aQLAQgAxXl4JTeK5v3xU8CxMG8IRLVrfT2XTWN+ +RvfnIoPPpvs1M9XXNnw2jVKaMJq/s9gKxpl3QaqcxR+zf+7L49ooAUoFodPg2Fbg +HoNLZYukSLyPyL4LgE/X1ZQpx78m51Yn+vzej0Va/dqa77W90GlDM4CIE/ikFFpn +oPO3c1SaqJv+bk3XNoP2l35ttsk3Y9if3r2LJRyn/ovVXZgQD+Ulb+klYugOBiKl +ezuq/v2tnySQJ7ouXuWyoQrcuTUS05GbFdhlbr4xJHE2HLxmqn1aSV7TQb8Uk9zQ +0SmSTinnlSlAgoDeq1veDLeMnYo6No2V2IOcXOLv9hOa3sNV+FnsaQARAQABiQI2 +BBgBCAAgFiEEPZlorJrnvhFpKI3bH9WDmJX1f9oFAlvRpAsCGwwACgkQH9WDmJX1 +f9p6+Q/+N97F+PW383hTi84JMyiQsX0mJrvDjt5hkkdN+7u0tUNL0l3AACQ7b85/ +ofJsGnfh8kYlB2nCP+gaNQU03qqbcyMLHsuwB+ULG0izbREb7aK02RBluFpIbgdV +rFrgrUkLiSsuQLdReQYRTP1tU0peosBPxhhb1alAGhkPebWx+MLlbtiyg/j4pu8+ +oFirrJ5WEltamGt8OSbdLGNS22PuwxV8VDo/Xbi57P1VBglCpgG1nWDEN8+i5nHh +8OKWZmvRhih1F89BR7U14OET+EENrZd8YRF2KOvOAM0eR1aIK/AilbINVZV0girt +B/rYFhwi9i7Fyo3gEtPRRZpzcQ7V0VZiBlpEAbjgqwe1XDVNJYquM7E4S2jBidR6 +XJaYQImiwzMcyFopZZgD0F46xSI3O8zZp21g7Dq4pv6wRXGU+L639u+X5INDtJ7s +kykwrYzmeGg/Mp0Mseiqq7iIJXrbP4dL1+Ck9alSGCe0p5vd3CIeBR3pFeSDG6yI +2DiRzDfzbkIuUdIOAjXWjIl+XWfsFc/Znnux3UcAGec4Nhe3JvKEy5keDpXZGSaZ +JaFJ3WJl8uQfJjO8n8M+P2lxmrpaErqkMk0+SC3DcSSZFEDigD6flMvfdVnOqdLa +R1K6skDZkO+PQYqSydf9erO6+YgEjJB0/uCMXgHDVsmO3uKLOTg= +=IWDv +-----END PGP PUBLIC KEY BLOCK----- +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFyFXCQBEADengbfRCSixqjsBj7hnRsjDMihbgfolZe4asVd/JNh4xWqs5+z +Q+vZUNwluJa6hYrgFW66xPfQeAbo11pS9r2RNkemLWi+8gf2vUwGlp8ZPXb/hYsR +7URWJo+4GvfqH5RiTdLlJbPQnLSlCAMhwaAl1ko+p2zY/ImiAL+yaO8YYYN4sG31 ++67gG3t7AnbH+QjeoEU8heg+fYBiQXSmJ3nTvmYB0lgY/Cybh2Fge90JIZWoeWGp +fX1zhpCoJGXIW2GyOFQRMYQCbKqtrmicDkgQTocItDfUSwGBr4EFHM+mO0DwfZ8R +Lq+hzkLdAyJwWGNmiHbk8zFIBnktenmgslkoawvNOkGIz7mL0wqkkw6FYCojSnuj +ndlYg/XAKrr5RpSDwxwvzWhjyuA+0g2nXBFKWQ/SVZH5niXHTgXBjKfbXjF85eOu +bVx+82T7KV+aSAr7d0vAbSQO/XK6YrcXTJXZZbjIo/1eauT/FPQCBAejgOAle9wq +aN04IE5+XPnRkqe5jodDyf3c8hHRL0xWthtj0kupV/7VWNKBLlMESPVfSKN9kjkR +aTO6dH8jM0K1QWo5/mzEHNv4O2j8kyHDKJdRi+8bJSRKpToFmaLSe5gSA8vp/Fwg +rY/eLT/5GQ0XOkqtonLYkHbLu9m8H8IrYRgCBVuLCa3cEbYc0mktmm3ExQARAQAB +tDVGUlJvdXRpbmcgRGViaWFuIFJlcG9zaXRvcnkgPGRldkBsaXN0cy5mcnJvdXRp +bmcub3JnPokCTgQTAQgAOBYhBEpWx3OLs/gVlagF0qgydpkI8T7RBQJchVwkAhsB +BQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEKgydpkI8T7Rj20P/3Or3Vi/k6jj +qb9hQKJgmX0taDG+FF1+X1WgmYOdW01jGK5jj1k3nKYQf4VGzR+eyUBTN54IpFsm +aW5DwKoMLcaMQ7OAbl+AtpW700Q5hAtz4aWB5Stl7wyjVl600INES5DOuIuEmqB5 +60uUePNqF/+XKUgDgbQFc3E/Tb+c+Z7ADIhbIYPLUcSwJtRLhbnPGjt+pTLmPCO1 +i/NPRjzwzFgyHJaHqlGFNUUFfqHqGWXLxlO0A9m+r1gOiAevV3ZTzC2izkjOhJHT +5XWFW7pS9jD78XgSN310glGYmWHZ0hxDgNR4V0oxOZCma1TDjfWVnUfK5pkm/78i +qRHc8tjt+tor47iOsml4J95Qr5Qxvf+iDgThXcYMAVDRULITmiqZqA1pkheCBugl +vQ7tvdjXf61ZxOZnDaqtLAHaSB/EynHaBaCzI2obQDHSgC1AU74f9SSN0j4HD01V +McFh9H0YZeR0eo4I16HHYUcExQNWJGfeuAC1XBGiNixHXy4c1PdxPFGPDnFtH3sX +2I5X4sKRVhZEbhRe111B93OFdkvWXmSyK6afu6qJBqB52zEe7F6UcNLP7ZnzafJV +bTCSZeF3Nzt58byiO5jt7nj1wKxv9HpQVy2P7V0CXJB4EK42tGhejjEOY8FevWKF +0OHX9RWKktlKAsLYFNPbHGPSo/ULj+sFuQINBFyI47kBEADLx3nZ+mFReBN4/E4C +Gl+B8bKPJ+gaFSdcw8GWV2NFMlJvOqg7Fa4djrqGaOA2YomnNpddS62jAUNdlgDJ +qRlZVK+Mqctdqgz7Gsuj4l7G/XjnUpQzPaEjxMXzCdFbP19lHa6GvyTgf1TewaNv +uLBe+oaObdgiAXCUyS3RUtLc9L7KU2+BlnX2JKeQK5K7sRromFfPc27qN+hsWgpy +xPvWYMGMHA0RjRwXOitjszXVZGUEPxwhX1kFOuFKnIcKG2jSbX/KLtcV1DNZro8s +Q9hb2UZWZxrwVIIh8FTL8esf0zM64HLo1sZ6yUaVzzeETuWZFMMKaF1dn+KtmKkL +KjgplRzJSE4QDP+48F6l9RCnrpIg33/rfN/M5Lbx5g2fhfT84+wQD6cKHypYfFng +GJbmpUCgITcxGFmpetCTpYkxsVMzikudFe2YSJ7TO0aVBgiHfBoXU9g5AXuDYVKi +8ZpaWcRSu4O0H58Kh/hk/8yiVa8e1nTMjsZuXMle8N52rF2G7vrMhva7uccgbbY+ +ZlOtWpZ7MJzIn/vKeWxXNDcvG7CVHn4BiSLXRcrNgw/I4UjhqpeRdx0l+j36HdDN +0yaSZu6uP9SnsB5wkm1jN3uoNMFAdvpIqoaK9+2b5xKxLsNtE/R3anX9TlfFmeom +k0JxrTsNqpRxBw5GylM96Bd9BQARAQABiQRsBBgBCAAgFiEESlbHc4uz+BWVqAXS +qDJ2mQjxPtEFAlyI47kCGwICQAkQqDJ2mQjxPtHBdCAEGQEIAB0WIQSnzWQmxSYW +E+lH68yjyrYexHux0AUCXIjjuQAKCRCjyrYexHux0J+TD/40x0L8vzP+k29NEreT +N+k6889rCWMKAwmKWpgUN39nv9hZbSOFWDQs5Ttp+Rc+v7L5Pj4avJPzGnQieTMw +7wKOu8ZUisBVzYfYsxlXlKsOLZrVlQpFJhWNFOBq0axYlP6vrslXkMPk+IPz8/FV +USVByUHNNlIPmJU0WOIoLt+0YkqN1c1UCui/H6Z6IFpFIG8WLpgAtyKvqu8kdnEw +JEqpp4dO/ainnF8fL8VuV1+cdbxRO0IsOJBqQ+M8LFI2ANJscW+l6sg9RX2ZSExQ +Bm6dtPnsfP483SwH62PbaMP4lQ+Zpjl6ngoxv+S0RIDoW5Zl3zGe721NiLmz6Llz +0Ghe3Jgnf1JHOlR893Hi8UkvvTbBLkR4fbvmbgHvhcNWCL2hGCsxDV002hI4OlYp +px1gJ/HoU7lrrKQCzwTTxfQ1JiTMa+eiwY8xQGEfqUY83pWkx3wGUBa+W3GNlxD9 ++pZIzmxtD4uylA9lwnw/GXV4RauDuHMwWuqAGtDEr9Y8nYHuxl5/KdYOCf98sOzv +XU8btnxuGHrWb5OgRD21NeHa4zwYXIuYOQtYai6IboKdH70l8b3VX+xtu9Fwf/V4 +5EsipWedfA2S3CtKjP6Pv5C6NAVoAnXinqr1VAXMJT5PvXmx1mLP7Xms8o7O7xda +rsERKxVtt+JjArArk/gpFXImPLArD/42ZChEpJgbjabTrd6saI4BOsKSARX36Cxe +cjJuWNvddpsb9WgYXbXCSK5hOybFYLlbRmdFmz1VzVy5au+Bsbmy+jKqzgAM1sui +wE6WyVIOdN3hTZ9W20Fb4pa6MWd8dpWBwi7g40oRvaoPpspcimpa6OCNktij9zrZ +/hN49JYbLjA7V+rE+zWWz2m3Ecwn2A5LZdKbrI06uKFltTRUhMZ3HhwhKrNui/iN +YpwDn662jJaTxJ8x/WQJP6ILKVi3wk0eGFBSapEUv+D51y1v0dRh/QOO98RnLQ/p +T/4y7BPxEWLLNr53rPHOjd2ClhDNZ8+dFzYrOCs+1f+mpWf7yF9wHBs2hOhSZAMx +34HshZVLGBtdfD/cb1MA5MnBdfJHHFjL6EiJOP30YKJsTEGgqpAyMtyZt5/MKEfj +r0OV/La7s38fpcPlZplF2/eqgxt7WiQu5I6BUXJcSlGTe97Rq5Ba/tSHzUnK1FNr +v/hfBSgtxmX2qT5ojMu+UiKtvJDeUAGFLAJcaaEv92frhLWHcXXpayUuk/wdU9Qg +y8F+yFtYGY2lj0h9WCeKbYUAm1p7skW5v2nsMw6I3QOPFzQBzm1rFQ1vFJMaqFFC +qvHJALsI0SmaI7ruXYrm7CNv6qJKo3URYGq27Tm9lhut8iWKsa1/NWW5LZxzt2mO +egKcOCfAMg== +=Lt8H +-----END PGP PUBLIC KEY BLOCK----- +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFyVPosBEADMyAbmmfo6x7e/dly+yQk1BuWUDbvMHFX9coCSItNxVvvIVzYJ +Bw9pp4kJsTeqTI1cNQcCCjaeWbWevINCD5yFGN25xNPz2s1lgHCpJQzs/1qMKuE8 +vrHkEpJZTrdPSl8J8VdjQDCIh0NrLss1VzoPpFbm7lIkuN/6tl87hsyUyedd/0bY +KkNVZfOW4UAjJFWQakofhTVifHqozb6wu+SYtPFnP/yBJdsYwrKlyyhR8hIIjyAK +sxpKz6Dym6tHoDNLIcPy1Q3uNUaYdN2pXXRqzLTIKq7M7RhgY1W7QO7VFtkaG74j +tmjbJYsX6nBGSlGE5cmlSz8N+D+uR/0NPMvgKOwI38joIT5/Sii/jgZUuX4Mz9Zk +7Rh/C+P906dJpjbCbEPYSxVnZC4fHJZ0ezSEgrFKZ4QGoViNJEcc6jwNkgTM7jdz +0e/xMKu4Ed30jLO7TvnwmcGWF/m2DZIgIMu9rhNHUaKeGRKo+Daf/x1naRbht/a/ +uSyukGJA5koipy8XxmJorx7MdIa9ekYPJaHM3eOcE1fxn7IcOwoqn0piB4lllq2r +akLif195eFIcdI/cRfCX2fgQxBbgAAolUCqguJfus7cQCkn9Fr0cDgq7c0s0dQEw +0kofqGUq6/fpdqxadRdPlYXS3BIvrWA99zOVCzxaccvKaMZxaKauc/rKbQARAQAB +tDNKYWZhciBBbC1HaGFyYWliZWggKGxhdW5jaHBhZCkgPHRvLmphZmFyQGdtYWls +LmNvbT6JAk4EEwEKADgWIQSpD8NtlClAl5jpwth03u1DqxlNvwUCXJU+iwIbAwUL +CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRB03u1DqxlNv7w7D/90u8bETWZmzRZ7 +sRBfyN30YhXgvEJQCSKFgH1H5BS9H2W+VYQ8nmXpoVpsU5wLVosJ55WpO9oF7+6R ++wO2r3lo+vkqFzscYLfNd/KTo6r7kl7VTTN9bBCMgl7eFtmqSO90o5UCrNBHk2Ax +fTBJX9TLGa4SLPhiTlz51XdyYls3yG/3yQ6de6K+jPjrQsP2VbBSHP9OA+MKRTr6 +ONDTyJJ7TEAuoX7W1NLMQYS0kdA1jjGvTKeyHkb8QqFDTM7RyRdczKBEwOXSi7DW +8v3Ink+7CgKGGLnOo3lXlm7q516Z9JMJ5BJtFkTx6Bo9iSAK4jSBiC5Zlta6CtP7 +lLwq/eOut/y9VoDXsdVkQl3UdTrB5b/4MZHL4Z6ykN1dv56XzjdnIl/nM7azwrTW +N9tBDhjrYvKvscSI2l8TGdAseb4ftovHwIBFjNIlI6TYVEUR93ZEGPZCmuYeVSJe +MWStJuLDa+bbbr2/OLvHsjyWB0/5LJNsHHfMCKLZTP8jeNVcHAJi7P+iNPPLp57C +N2NEmyuJwgxFN/5cHlZzkg0QQgSaTm/tsft0jFJh4s0kO6L5NZ9ACP2JC9wfaBmt +QFwdl+Tc5/Jf6pTnnMzeHuSiVWvG4jB3EVr47dWD7p5ekP+O0cG0Rnm2rf66M+fo +z88Ga0gduOhxfdRn1qT3smK3hgyv2bkCDQRclT6LARAA8xTr8yu7ab6f4NAeMnTL +4mjjYoYVXBMd4qT/cdtkSFoCdOl+MwcEZmDrq1HzT5CVXo9hPEcI7iuyXiejoMhB +GFfvdY0Pcg5yMoUL57kE3XBoz9C8TEal1loSfTJ4IRou2VpY2sruaKgxO7PvmyQM +D8mk4Sgyewn6VkcQx4dGwQrN2VU8mwFqp4GnEm7DgVJKqFRD43hCFoncNYaSOc52 +vf/EEU6VYxPWi01nZoRiNp8tXt+dYk5yb6fEhDsH9YYk51bgiiiGNoQw/zC1w2ek +zPqJH/Y0BzoODbJ59vqc2jCuzGII4tFkijYbBTcCk1b7/yvQgwLdBpOTrrHcNolh +plr7zHcB8TOc3aYrJ8TkwgP80uK85vlAIzB9AkZ/9Gn2K08b5eVC9cvMm0idwFkg +0fHY+v2aDesA4lv1UtTsQVmrqnx1zaCjwH9tu73GTGXX40guYpbatPu3HDog/QkV +fykVI5B2+vMixFCzudMKg80K+H7QI3uc1efqEmMRKjQU2rKXTNo/lASWjQMNfbWt +JvEsLuLPc909OFhBfoX6GR7pmbKn3MCrTpLVeUkmp0EjcqYaDZXHnzKZQjjNjOKm +6J69G3Ro6Abs7tRpnqOLTLZ5DKWBYidc3/fp/BF+CpeHdZlstLUazQ56ti9GshPf +W4+6TRg2gt1leeXRU18jQz0AEQEAAYkCNgQYAQoAIBYhBKkPw22UKUCXmOnC2HTe +7UOrGU2/BQJclT6LAhsMAAoJEHTe7UOrGU2/kToP/il4dvWMMJS4pgXuDPcwTaYc +e8T9a8Uf0B2BOOKJgLZk2kvI21bwHnGxXc3zuUHCzZ81Y89/IpX+s37J+frvLbqd +xOfE39+5plK9BDn7G9UsTzg7mXuGWpMQA6Mvki4LslY/qCfUqzVeFPNZquH4Emxr +z1u0SldhaqctrkKwW1teTTmqbCtGrRpb0v8x42TBw+WvBJopelpgtdy3TnRbKk86 +NkiiPFVRnfC0RjyMlLxa095t5s8irrqjnAAKMvwKiuzt97CQ/U68WbsOYLyv42PT +ClfbbFJw6ghTZ7SRxiGwUVz7EwQ31MsiffmyJKRca81yqSQfrPS4MkEXChZBt8wF +C1IhG9I9zbHKt9saXWPYCbL8Zs2x3c1md62dl4mrH/VwLV9T+7PaJCM8qrFtkWlu +cYntgBhLW1KY9dWkCtZ7ML70n8FyIyHMD35mZb3lw+c1dBusuwGwZLSksH9ucOQz +Zh3+rlZ8PkYXXosKTkp9qBDVoSgNU5AH8F+K/Uw1uVm+JqqFP/ieQQydS8wAVYAv +ax/ZP+wTHhLvmHUoI70K51osk+sLcFh+h9L6sK+kq9i2mrJs/d2Sk6jau96RJCFe +pHI+29yZoK5ZpOkvFAFvbNXMFd2sn5O60y9LAvr9u2QRNlTvmQ7B1o2/US62SoD4 +HGxIKIAggOUujclydhvu +=+/L/ +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/ipv6_edge_router/handlers/main.yml b/roles/ipv6_edge_router/handlers/main.yml new file mode 100644 index 0000000..604d632 --- /dev/null +++ b/roles/ipv6_edge_router/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart frr + service: + name: frr + state: restarted diff --git a/roles/ipv6_edge_router/tasks/main.yml b/roles/ipv6_edge_router/tasks/main.yml new file mode 100644 index 0000000..8ec1353 --- /dev/null +++ b/roles/ipv6_edge_router/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: install GPG + apt: + name: gnupg + +- name: Add FRR repo key + apt_key: + data: "{{ lookup('file', 'frr-apt-key.asc') }}" + state: present + +- name: Add FRR apt repository + apt_repository: + repo: deb https://deb.frrouting.org/frr buster frr-stable + state: present + notify: restart frr + +- name: Install frr + apt: + name: frr + +- name: setup frr daemons + template: + src: daemons.j2 + dest: /etc/frr/daemons + mode: 0644 + notify: restart frr + +- name: setup frr.conf + template: + src: frr.conf.j2 + dest: /etc/frr/frr.conf + mode: 0644 + notify: restart frr + +- name: enable+start frr + service: + name: frr + state: started + enabled: true diff --git a/roles/ipv6_edge_router/templates/daemons.j2 b/roles/ipv6_edge_router/templates/daemons.j2 new file mode 100644 index 0000000..9cef233 --- /dev/null +++ b/roles/ipv6_edge_router/templates/daemons.j2 @@ -0,0 +1,67 @@ +# This file tells the frr package which daemons to start. +# +# Sample configurations for these daemons can be found in +# /usr/share/doc/frr/examples/. +# +# ATTENTION: +# +# When activation a daemon at the first time, a config file, even if it is +# empty, has to be present *and* be owned by the user and group "frr", else +# the daemon will not be started by /etc/init.d/frr. The permissions should +# be u=rw,g=r,o=. +# When using "vtysh" such a config file is also needed. It should be owned by +# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. +# +# The watchfrr and zebra daemons are always started. +# +{% if 'backup' in inventory_hostname %} +bgpd=no +{% else %} +bgpd=yes +{% endif %} +ospfd=no +ospf6d=no +ripd=no +ripngd=no +isisd=no +pimd=no +ldpd=no +nhrpd=no +eigrpd=no +babeld=no +sharpd=no +pbrd=no +bfdd=no + +# +# If this option is set the /etc/init.d/frr script automatically loads +# the config via "vtysh -b" when the servers are started. +# Check /etc/pam.d/frr if you intend to use "vtysh"! +# +vtysh_enable=yes +zebra_options=" -A 127.0.0.1 -s 90000000" +bgpd_options=" -A 127.0.0.1" +ospfd_options=" -A 127.0.0.1" +ospf6d_options=" -A ::1" +ripd_options=" -A 127.0.0.1" +ripngd_options=" -A ::1" +isisd_options=" -A 127.0.0.1" +pimd_options=" -A 127.0.0.1" +ldpd_options=" -A 127.0.0.1" +nhrpd_options=" -A 127.0.0.1" +eigrpd_options=" -A 127.0.0.1" +babeld_options=" -A 127.0.0.1" +sharpd_options=" -A 127.0.0.1" +pbrd_options=" -A 127.0.0.1" +staticd_options="-A 127.0.0.1" +bfdd_options=" -A 127.0.0.1" + +# The list of daemons to watch is automatically generated by the init script. +#watchfrr_options="" + +# for debugging purposes, you can specify a "wrap" command to start instead +# of starting the daemon directly, e.g. to use valgrind on ospfd: +# ospfd_wrap="/usr/bin/valgrind" +# or you can use "all_wrap" for all daemons, e.g. to use perf record: +# all_wrap="/usr/bin/perf record --call-graph -" +# the normal daemon command is added to this at the end. diff --git a/roles/ipv6_edge_router/templates/frr.conf.j2 b/roles/ipv6_edge_router/templates/frr.conf.j2 new file mode 100644 index 0000000..1f06f2f --- /dev/null +++ b/roles/ipv6_edge_router/templates/frr.conf.j2 @@ -0,0 +1,24 @@ +log syslog informational +log stdout + +hostname routeur-aurore +password Tux +enable password Tux + +interface lo +line vty + + +# Aurore AS. +router bgp 43619 + #no synchronization + bgp router-id 45.66.111.254 + + # Remote-AS: Zayo. + neighbor 2001:1b48:2:103::d7:1 remote-as 8218 + address-family ipv6 + network 2a09:6840::/29 + neighbor 2001:1b48:2:103::d7:1 activate + exit-address-family +! + diff --git a/roles/isc_dhcp_server/handlers/main.yml b/roles/isc_dhcp_server/handlers/main.yml new file mode 100644 index 0000000..05b48c6 --- /dev/null +++ b/roles/isc_dhcp_server/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: force run dhcp re2o-service + shell: /var/local/re2o-services/dhcp/main.py --force + become_user: re2o-services + +- name: restart dhcpd + systemd: + state: restarted + name: isc-dhcp-server + +- name: restart rsyslog + systemd: + name: rsyslog + state: restarted diff --git a/roles/isc_dhcp_server/tasks/main.yml b/roles/isc_dhcp_server/tasks/main.yml new file mode 100644 index 0000000..e198163 --- /dev/null +++ b/roles/isc_dhcp_server/tasks/main.yml @@ -0,0 +1,110 @@ +--- +- name: Install dhcp (re2o-service) + import_role: + name: re2o-service + vars: + service_repo: https://gitlab.federez.net/re2o/dhcp.git + service_name: dhcp + service_version: master + service_config: + hostname: re2o.auro.re + username: service-user + password: "{{ vault_serviceuser_passwd }}" + +- name: Ensure appropriate permissions on dhcp re2o service + file: + path: /var/local/re2o-services/dhcp/ + state: directory + owner: re2o-services + group: nogroup + recurse: yes + +- name: Install isc-dhcp-server + apt: + update_cache: true + name: isc-dhcp-server + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded + notify: restart dhcpd + +- name: Ensure dhcp log directory exists + file: + path: /var/log/dhcp + owner: root + group: root + mode: u=rwx,g=rx,a=rx + state: directory + +- name: Ensure rsyslog knows where to send dhcp logs + lineinfile: + path: /etc/rsyslog.conf + line: "local7.* /var/log/dhcp/dhcpd.log" + notify: restart rsyslog + +- name: Configure dhcp log rotation + template: + src: logrotate.d/dhcp.j2 + dest: /etc/logrotate.d/dhcp + mode: 0644 + +- name: set up cron to reload dhcp re2o service + cron: + # Do not change this name or idempotence *might* be lost. + name: dhcp-re2o-service + cron_file: re2o-services + minute: "*/2" + hour: "*" + day: "*" + weekday: "*" + month: "*" + user: root + job: "/usr/bin/python3 /var/local/re2o-services/dhcp/main.py" + +- name: Configure /etc/default/isc-dhcp-server + template: + src: default/isc-dhcp-server.j2 + dest: /etc/default/isc-dhcp-server + mode: 0644 + notify: restart dhcpd + +- name: Configure dhcp-failover.conf + template: + src: dhcp/dhcp-failover.conf.j2 + dest: /etc/dhcp/dhcp-failover.conf + mode: 0600 + when: dhcp_failover_enabled + notify: restart dhcpd + +- name: Configure dhcpd.conf + template: + src: dhcp/dhcpd.conf.j2 + dest: /etc/dhcp/dhcpd.conf + mode: 0600 + notify: restart dhcpd + +- name: Configure subnets.conf (regular service) + template: + src: dhcp/regular-subnets.conf.j2 + dest: /etc/dhcp/subnets.conf + mode: 0600 + notify: restart dhcpd + when: not is_aurore_host + +- name: Configure subnets.conf (aurore service) + template: + src: dhcp/aurore-subnets.conf.j2 + dest: /etc/dhcp/subnets.conf + mode: 0600 + notify: restart dhcpd + when: is_aurore_host + +- name: force run dhcp re2o-service + shell: /var/local/re2o-services/dhcp/main.py --force + +- name: Ensure dhcpd is running + service: + name: isc-dhcp-server + state: started + enabled: true diff --git a/roles/isc_dhcp_server/templates/default/isc-dhcp-server.j2 b/roles/isc_dhcp_server/templates/default/isc-dhcp-server.j2 new file mode 100644 index 0000000..6a7c7f7 --- /dev/null +++ b/roles/isc_dhcp_server/templates/default/isc-dhcp-server.j2 @@ -0,0 +1,24 @@ +# Defaults for isc-dhcp-server (sourced by /etc/init.d/isc-dhcp-server) + +# Path to dhcpd's config file (default: /etc/dhcp/dhcpd.conf). +#DHCPDv4_CONF=/etc/dhcp/dhcpd.conf +#DHCPDv6_CONF=/etc/dhcp/dhcpd6.conf + +# Path to dhcpd's PID file (default: /var/run/dhcpd.pid). +#DHCPDv4_PID=/var/run/dhcpd.pid +#DHCPDv6_PID=/var/run/dhcpd6.pid + +# Additional options to start dhcpd with. +# Don't use options -cf or -pf here; use DHCPD_CONF/ DHCPD_PID instead +#OPTIONS="" + +# On what interfaces should the DHCP server (dhcpd) serve DHCP requests? +# Separate multiple interfaces with spaces, e.g. "eth0 eth1". + +{% if is_aurore_host %} +INTERFACESv4="ens19" +INTERFACESv6="" +{% else %} +INTERFACESv4="ens19 ens20 ens21 ens22 ens23" +INTERFACESv6="" +{% endif %} diff --git a/roles/isc_dhcp_server/templates/dhcp/aurore-subnets.conf.j2 b/roles/isc_dhcp_server/templates/dhcp/aurore-subnets.conf.j2 new file mode 100644 index 0000000..1a0e09e --- /dev/null +++ b/roles/isc_dhcp_server/templates/dhcp/aurore-subnets.conf.j2 @@ -0,0 +1,12 @@ +subnet 45.66.110.0 netmask 255.255.255.0 { + interface "ens19"; + option subnet-mask 255.255.255.0; + option broadcast-address 45.66.110.255; + option routers 45.66.110.{{ router_ip_suffix }}; + option domain-name-servers 45.66.110.{{ dns_host_suffix_main }}, {{ backup_dns_servers|join(', ') }}; + option domain-name "adh.auro.re"; + option domain-search "adh.auro.re"; + include "/var/local/re2o-services/dhcp/generated/dhcp.adh.auro.re.list"; + + deny unknown-clients; +} diff --git a/roles/isc_dhcp_server/templates/dhcp/dhcp-failover.conf.j2 b/roles/isc_dhcp_server/templates/dhcp/dhcp-failover.conf.j2 new file mode 100644 index 0000000..6252343 --- /dev/null +++ b/roles/isc_dhcp_server/templates/dhcp/dhcp-failover.conf.j2 @@ -0,0 +1,31 @@ +failover peer "dhcp-failover" { +{% if inventory_hostname == dhcp_failover.primary_host %} + primary; + + # MCLT = Maximum Client Lead Time. + # Must be specified on the primary, forbidden on the secondary. + mclt 3600; + + # Address or DNS name on which this node listens for connections + # from its failover peer. + address {{ dhcp_failover.primary_host }}; + peer address {{ dhcp_failover.secondary_host }}; + + # Load balancing. + split 128; +{% endif %} +{% if inventory_hostname == dhcp_failover.secondary_host %} + secondary; + # Address and peer address are reversed on the secondary node. + address {{ dhcp_failover.secondary_host }}; + peer address {{ dhcp_failover.primary_host }}; +{% endif %} + + # The following options can be shared between primary and + # secondary failover peers. + port 647; + peer port 647; + max-response-delay 30; + max-unacked-updates 10; + load balance max seconds 3; +} diff --git a/roles/isc_dhcp_server/templates/dhcp/dhcpd.conf.j2 b/roles/isc_dhcp_server/templates/dhcp/dhcpd.conf.j2 new file mode 100644 index 0000000..84c86fd --- /dev/null +++ b/roles/isc_dhcp_server/templates/dhcp/dhcpd.conf.j2 @@ -0,0 +1,25 @@ +default-lease-time 86400; +max-lease-time 86400; + +# Option definitions common to all supported networks. + +option interface-mtu {{ mtu }}; +option root-path "/"; + +# The ddns-updates-style parameter controls whether or not the server will +# attempt to do a DNS update when a lease is confirmed. We default to the +# behavior of the version 2 packages ('none', since DHCP v2 didn't +# have support for DDNS.) +ddns-update-style none; + +# If this DHCP server is the official DHCP server for the local +# network, the authoritative directive should be uncommented. +authoritative; + +log-facility local7; + +{% if dhcp_failover_enabled %} +include "/etc/dhcp/dhcp-failover.conf"; +{% endif %} + +include "/etc/dhcp/subnets.conf"; diff --git a/roles/isc_dhcp_server/templates/dhcp/regular-subnets.conf.j2 b/roles/isc_dhcp_server/templates/dhcp/regular-subnets.conf.j2 new file mode 100644 index 0000000..43f49fa --- /dev/null +++ b/roles/isc_dhcp_server/templates/dhcp/regular-subnets.conf.j2 @@ -0,0 +1,94 @@ +# Bornes WiFi +subnet 10.{{ subnet_ids.ap }}.0.0 netmask 255.255.0.0 { + interface "ens19"; + option subnet-mask 255.255.0.0; + option broadcast-address 10.{{ subnet_ids.ap }}.255.255; + option routers 10.{{ subnet_ids.ap }}.0.250; + option domain-name "borne.auro.re"; + option domain-search "borne.auro.re"; + + option domain-name-servers 10.{{ subnet_ids.ap }}.0.{{ dns_host_suffix_main }}, 10.{{ subnet_ids.ap }}.0.{{ dns_host_suffix_backup }}, {{ backup_dns_servers|join(', ') }}; + include "/var/local/re2o-services/dhcp/generated/dhcp.borne.auro.re.list"; + + deny unknown-clients; +} + +# Users filaire +subnet 10.{{ subnet_ids.users_wired }}.0.0 netmask 255.255.0.0 { + interface "ens20"; + option subnet-mask 255.255.0.0; + option broadcast-address 10.{{ subnet_ids.users_wired }}.255.255; + option routers 10.{{ subnet_ids.users_wired }}.0.{{ router_ip_suffix }}; + option domain-name "fil.{{ apartment_block_dhcp }}.auro.re"; + option domain-search "auro.re"; + + option domain-name-servers 10.{{ subnet_ids.users_wired }}.0.{{ dns_host_suffix_main }}, 10.{{ subnet_ids.users_wired }}.0.{{ dns_host_suffix_backup }}, {{ backup_dns_servers|join(', ') }}; + + include "/var/local/re2o-services/dhcp/generated/dhcp.fil.{{ apartment_block_dhcp }}.auro.re.list"; + + deny unknown-clients; +} + + +# Users WiFi +subnet 10.{{ subnet_ids.users_wifi }}.0.0 netmask 255.255.0.0 { + interface "ens21"; + option subnet-mask 255.255.0.0; + option broadcast-address 10.{{ subnet_ids.users_wifi }}.255.255; + option routers 10.{{ subnet_ids.users_wifi }}.0.{{ router_ip_suffix }}; + option domain-name "wifi.{{ apartment_block_dhcp }}.auro.re"; + option domain-search "auro.re"; + + option domain-name-servers 10.{{ subnet_ids.users_wifi }}.0.{{ dns_host_suffix_main }}, 10.{{ subnet_ids.users_wifi }}.0.{{ dns_host_suffix_backup }}, {{ backup_dns_servers|join(', ') }}; + + include "/var/local/re2o-services/dhcp/generated/dhcp.wifi.{{ apartment_block_dhcp }}.auro.re.list"; + + pool { + range 10.{{ subnet_ids.users_wifi }}.8.0 10.{{ subnet_ids.users_wifi }}.10.255; + +{% if dhcp_failover is defined %} + failover peer "dhcp-failover"; +{% endif %} + } +} + +# Banni +subnet 10.{{ subnet_ids.users_banni }}.0.0 netmask 255.255.0.0 { + interface "ens22"; + option subnet-mask 255.255.0.0; + option broadcast-address 10.{{ subnet_ids.users_banni }}.255.255; + option routers 10.{{ subnet_ids.users_banni }}.0.{{ router_ip_suffix }}; + option domain-name "banni.{{ apartment_block_dhcp }}.auro.re"; + option domain-search "auro.re"; + + option domain-name-servers 10.{{ subnet_ids.users_banni }}.0.{{ dns_host_suffix_main }}, 10.{{ subnet_ids.users_banni }}.0.{{ dns_host_suffix_backup }}; + + pool { + range 10.{{ subnet_ids.users_banni }}.1.0 10.{{ subnet_ids.users_banni }}.2.255; + +{% if dhcp_failover is defined %} + failover peer "dhcp-failover"; +{% endif %} + } +} + + +# Accueil +subnet 10.{{ subnet_ids.users_accueil }}.0.0 netmask 255.255.0.0 { + interface "ens23"; + option subnet-mask 255.255.0.0; + option broadcast-address 10.{{ subnet_ids.users_accueil }}.255.255; + option routers 10.{{ subnet_ids.users_accueil }}.0.{{ router_ip_suffix }}; + option domain-name "accueil.{{ apartment_block_dhcp }}.auro.re"; + option domain-search "auro.re"; + + option domain-name-servers 10.{{ subnet_ids.users_accueil }}.0.{{ dns_host_suffix_main }}, 10.{{ subnet_ids.users_accueil }}.0.{{ dns_host_suffix_backup }}; + + pool { + range 10.{{ subnet_ids.users_accueil }}.1.0 10.{{ subnet_ids.users_accueil }}.2.255; + +{% if dhcp_failover is defined %} + failover peer "dhcp-failover"; +{% endif %} + } +} diff --git a/roles/isc_dhcp_server/templates/logrotate.d/dhcp.j2 b/roles/isc_dhcp_server/templates/logrotate.d/dhcp.j2 new file mode 100644 index 0000000..9823aed --- /dev/null +++ b/roles/isc_dhcp_server/templates/logrotate.d/dhcp.j2 @@ -0,0 +1,11 @@ +/var/log/dhcp/dhcpd.log { + # common options + daily + rotate 365 + missingok + compress + delaycompress + notifempty + + copytruncate +} diff --git a/roles/isc_dhcp_server/vars/main.yml b/roles/isc_dhcp_server/vars/main.yml new file mode 100644 index 0000000..77933c1 --- /dev/null +++ b/roles/isc_dhcp_server/vars/main.yml @@ -0,0 +1,4 @@ +--- +dhcp_failover: + primary_host: dhcp-{{ apartment_block }}.adm.auro.re + secondary_host: dhcp-{{ apartment_block }}-backup.adm.auro.re diff --git a/roles/ldap-client/tasks/0_install_ldap.yml b/roles/ldap-client/tasks/0_install_ldap.yml deleted file mode 100644 index 79ceb18..0000000 --- a/roles/ldap-client/tasks/0_install_ldap.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -# Install LDAP client packages -- name: Install LDAP client packages - apt: - name: "{{ item }}" - state: present - update_cache: true - with_items: - - nslcd - - libnss-ldapd - - libpam-ldapd - -# Reduce LDAP load -# For the moment it is broken on Stretch when using PHP7.3 -# - name: Install LDAP cache package -# apt: -# name: nscd -# state: present -# update_cache: true - -# Configure /etc/nslcd.conf -- name: Configure nslcd LDAP credentials - template: - src: nslcd.conf.j2 - dest: /etc/nslcd.conf - mode: 0600 - notify: Restart nslcd service - -# Configure /etc/nsswitch.conf -- name: Configure NSS to use LDAP - lineinfile: - dest: /etc/nsswitch.conf - regexp: "^{{ item.key }}:" - line: "{{ item.value }}" - with_dict: - passwd: 'passwd: files ldap' - group: 'group: files ldap' - shadow: 'shadow: files ldap' - sudoers: 'sudoers: files ldap' - notify: Restart nslcd service diff --git a/roles/ldap-client/tasks/1_group_security.yml b/roles/ldap-client/tasks/1_group_security.yml deleted file mode 100644 index 40435d6..0000000 --- a/roles/ldap-client/tasks/1_group_security.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# Filter SSH on groups -- name: Filter SSH on groups - lineinfile: - dest: /etc/ssh/sshd_config - regexp: '^AllowGroups' - line: "AllowGroups root sudoldap aurore ssh" - -# To gain root access with ldap rights -- name: Install SUDO package - package: - name: sudo - state: present - -# Set sudo group -- name: Configure sudoers - lineinfile: - dest: /etc/sudoers - regexp: "^%{{ sudo_group }}" - line: "%{{ sudo_group }} ALL=(ALL:ALL) ALL" diff --git a/roles/ldap-client/templates/nslcd.conf.j2 b/roles/ldap-client/templates/nslcd.conf.j2 deleted file mode 100644 index db05bdc..0000000 --- a/roles/ldap-client/templates/nslcd.conf.j2 +++ /dev/null @@ -1,38 +0,0 @@ -# {{ ansible_managed }} - -# The user and group nslcd should run as. -uid nslcd -gid nslcd - -# The location at which the LDAP server(s) should be reachable. -{% if ldap_local_replica_uri is defined %} -{% for uri in ldap_local_replica_uri %} -uri {{ uri }} -{% endfor %} -{% endif %} -uri {{ ldap_master_uri }} - -# The search base that will be used for all queries. -base {{ ldap_base }} -base passwd cn=Utilisateurs,{{ ldap_base }} -base shadow cn=Utilisateurs,{{ ldap_base }} -base group ou=posix,ou=groups,{{ ldap_base }} - -# The LDAP protocol version to use. -ldap_version 3 - -# The DN to bind with for normal lookups. -binddn {{ ldap_nslcd_bind_dn }} -bindpw {{ ldap_nslcd_passwd }} - -# The DN used for password modifications by root. -#rootpwmoddn cn=admin,dc=example,dc=com - -# SSL options -#ssl off -#tls_reqcert never -tls_cacertfile /etc/ssl/certs/ca-certificates.crt - -# The search scope. -#scope sub - diff --git a/roles/ldap-client/handlers/main.yml b/roles/ldap_client/handlers/main.yml similarity index 84% rename from roles/ldap-client/handlers/main.yml rename to roles/ldap_client/handlers/main.yml index b233281..f0f3111 100644 --- a/roles/ldap-client/handlers/main.yml +++ b/roles/ldap_client/handlers/main.yml @@ -1,6 +1,6 @@ --- - name: Reconfigure libnss-ldapd package - command: 'dpkg-reconfigure libnss-ldapd -f noninteractive' + command: dpkg-reconfigure libnss-ldapd -f noninteractive - name: Restart nslcd service service: diff --git a/roles/ldap_client/tasks/1_group_security.yml b/roles/ldap_client/tasks/1_group_security.yml new file mode 100644 index 0000000..06664e6 --- /dev/null +++ b/roles/ldap_client/tasks/1_group_security.yml @@ -0,0 +1,37 @@ +--- +# Filter SSH on groups +- name: Filter SSH on groups + when: ansible_facts['hostname'] != "camelot" # Camelot is accessible for everyone + lineinfile: + dest: /etc/ssh/sshd_config + regexp: ^AllowGroups + line: AllowGroups root sudoldap aurore ssh + state: present + +# To gain root access with ldap rights +- name: Install SUDO package + package: + name: sudo + state: present + register: package_result + retries: 3 + until: package_result is succeeded + +# Set sudo group +- name: Configure sudoers sudo group + lineinfile: + dest: /etc/sudoers + regexp: ^%{{ sudo_group }} + line: "%{{ sudo_group }} ALL=(ALL:ALL) ALL" + state: present + validate: /usr/sbin/visudo -cf %s + +# Set sudo location group +- name: Configure sudoers sudo location group + lineinfile: + dest: /etc/sudoers + regexp: ^%{{ sudo_group_location }} + line: "%{{ sudo_group_location }} ALL=(ALL:ALL) ALL" + state: present + validate: /usr/sbin/visudo -cf %s + when: sudo_group_location is defined diff --git a/roles/ldap-client/tasks/2_userland_scripts.yml b/roles/ldap_client/tasks/2_userland_scripts.yml similarity index 85% rename from roles/ldap-client/tasks/2_userland_scripts.yml rename to roles/ldap_client/tasks/2_userland_scripts.yml index 34d0098..fa41780 100644 --- a/roles/ldap-client/tasks/2_userland_scripts.yml +++ b/roles/ldap_client/tasks/2_userland_scripts.yml @@ -3,9 +3,9 @@ - name: Copy passwd and chsh scripts template: src: "{{ item }}.j2" - dest: "/usr/local/bin/{{ item }}" + dest: /usr/local/bin/{{ item }} mode: 0755 - with_items: + loop: - chsh - passwd diff --git a/roles/ldap_client/tasks/install_ldap.yml b/roles/ldap_client/tasks/install_ldap.yml new file mode 100644 index 0000000..3afc484 --- /dev/null +++ b/roles/ldap_client/tasks/install_ldap.yml @@ -0,0 +1,34 @@ +--- +# Install LDAP client packages +- name: Install LDAP client packages + apt: + update_cache: true + name: + - nslcd + - libnss-ldapd + - libpam-ldapd + - nscd # local cache + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded + +# Configure /etc/nslcd.conf +- name: Configure nslcd LDAP credentials + template: + src: nslcd.conf.j2 + dest: /etc/nslcd.conf + mode: 0600 + notify: Restart nslcd service + +# Configure /etc/nsswitch.conf +- name: Configure NSS to use LDAP + lineinfile: + dest: /etc/nsswitch.conf + regexp: "^{{ item }}:" + line: "{{ item }}: files ldap systemd" + loop: + - passwd + - group + - shadow + notify: Restart nslcd service diff --git a/roles/ldap-client/tasks/main.yml b/roles/ldap_client/tasks/main.yml similarity index 94% rename from roles/ldap-client/tasks/main.yml rename to roles/ldap_client/tasks/main.yml index c367dd6..8599950 100644 --- a/roles/ldap-client/tasks/main.yml +++ b/roles/ldap_client/tasks/main.yml @@ -1,6 +1,6 @@ --- # Install and configure main LDAP tools -- include_tasks: 0_install_ldap.yml +- include_tasks: install_ldap.yml # Filter who can access server and sudo on groups - include_tasks: 1_group_security.yml diff --git a/roles/ldap-client/templates/chsh.j2 b/roles/ldap_client/templates/chsh.j2 similarity index 100% rename from roles/ldap-client/templates/chsh.j2 rename to roles/ldap_client/templates/chsh.j2 diff --git a/roles/ldap_client/templates/nslcd.conf.j2 b/roles/ldap_client/templates/nslcd.conf.j2 new file mode 100644 index 0000000..e5b8841 --- /dev/null +++ b/roles/ldap_client/templates/nslcd.conf.j2 @@ -0,0 +1,62 @@ +# {{ ansible_managed }} + +# The user and group nslcd should run as. +uid nslcd +gid nslcd + +# The location at which the LDAP server(s) should be reachable. +{% if 'fleming_vm' in group_names or 'fleming_pve' in group_names %} +{% for uri in groups['ldap_replica_fleming'] %} +uri ldap://{{ uri }} +{% endfor %} +{% endif %} +{% if 'rives_vm' in group_names or 'rives_pve' in group_names %} +{% for uri in groups['ldap_replica_rives'] %} +uri ldap://{{ uri }} +{% endfor %} +{% endif %} +{% if 'pacaterie_vm' in group_names or 'pacaterie_pve' in group_names %} +{% for uri in groups['ldap_replica_pacaterie'] %} +uri ldap://{{ uri }} +{% endfor %} +{% endif %} +{% if 'edc_vm' in group_names or 'edc_pve' in group_names or 'edc_server' in group_names %} +{% for uri in groups['ldap_replica_edc'] %} +uri ldap://{{ uri }} +{% endfor %} +{% endif %} +{% if 'gs_vm' in group_names or 'gs_pve' in group_names %} +{% for uri in groups['ldap_replica_gs'] %} +uri ldap://{{ uri }} +{% endfor %} +{% endif %} +{% if 'ovh_vm' in group_names or 'ovh_container' in group_names or 'ovh_pve' in group_names %} +{% for uri in groups['ldap_replica_ovh'] %} +uri ldap://{{ uri }} +{% endfor %} +{% endif %} +uri {{ ldap_master_uri }} + +# The search base that will be used for all queries. +base {{ ldap_base }} +base passwd cn=Utilisateurs,{{ ldap_base }} +base shadow cn=Utilisateurs,{{ ldap_base }} +base group ou=posix,ou=groups,{{ ldap_base }} + +# The LDAP protocol version to use. +ldap_version 3 + +# The DN to bind with for normal lookups. +binddn {{ ldap_nslcd_bind_dn }} +bindpw {{ ldap_nslcd_passwd }} + +# The DN used for password modifications by root. +#rootpwmoddn cn=admin,dc=example,dc=com + +# SSL options +#ssl off +#tls_reqcert never +tls_cacertfile /etc/ssl/certs/ca-certificates.crt + +# The search scope. +#scope sub diff --git a/roles/ldap-client/templates/passwd.j2 b/roles/ldap_client/templates/passwd.j2 similarity index 100% rename from roles/ldap-client/templates/passwd.j2 rename to roles/ldap_client/templates/passwd.j2 diff --git a/roles/ldap-replica/tasks/main.yml b/roles/ldap_replica/tasks/main.yml similarity index 71% rename from roles/ldap-replica/tasks/main.yml rename to roles/ldap_replica/tasks/main.yml index 5face6d..cb79bd4 100644 --- a/roles/ldap-replica/tasks/main.yml +++ b/roles/ldap_replica/tasks/main.yml @@ -5,6 +5,9 @@ name: slapd state: present update_cache: true + register: apt_result + retries: 3 + until: apt_result is succeeded # What is written after is really not a nice way to install a schema # because the LDAP is being flushed away always... @@ -25,22 +28,27 @@ # Cry a bit - name: Remove old data - file: path={{ item }} state=absent - with_items: + file: + path: "{{ item }}" + state: absent + loop: - /etc/ldap/slapd.d - /var/lib/ldap # Cry a lot - name: Recreate structure - file: path={{ item }} state=directory - with_items: + file: + path: "{{ item }}" + state: directory + mode: 0755 + loop: - /etc/ldap/slapd.d - /var/lib/ldap # Install schema as root # We can't do a `become_user` here - name: Install LDAP schema - command: 'slapadd -n 0 -l /etc/ldap/schema.ldiff -F /etc/ldap/slapd.d' + command: slapadd -n 0 -l /etc/ldap/schema.ldiff -F /etc/ldap/slapd.d # then fix permissions - name: Fix permissions @@ -48,11 +56,13 @@ path: "{{ item }}" owner: openldap group: openldap - recurse: yes - with_items: - - '/var/lib/ldap' - - '/etc/ldap/slapd.d' + recurse: true + loop: + - /var/lib/ldap + - /etc/ldap/slapd.d # Save the day - name: Start LDAP server - service: name=slapd state=started + service: + name: slapd + state: started diff --git a/roles/ldap-replica/templates/schema.ldiff.j2 b/roles/ldap_replica/templates/schema.ldiff.j2 similarity index 100% rename from roles/ldap-replica/templates/schema.ldiff.j2 rename to roles/ldap_replica/templates/schema.ldiff.j2 diff --git a/roles/matrix-mxisd/handlers/main.yml b/roles/matrix-mxisd/handlers/main.yml deleted file mode 100644 index 73933ed..0000000 --- a/roles/matrix-mxisd/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# Restart mxisd when configuration changes -- name: Restart mxisd service - service: - name: mxisd - state: restarted diff --git a/roles/matrix-mxisd/tasks/main.yml b/roles/matrix-mxisd/tasks/main.yml deleted file mode 100644 index d58dc22..0000000 --- a/roles/matrix-mxisd/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Install mxisd - apt: - deb: https://github.com/kamax-matrix/mxisd/releases/download/v{{ mxisd_version }}/mxisd_{{ mxisd_version }}_all.deb - -- name: Configure mxisd - template: - src: mxisd/mxisd.yaml.j2 - dest: /etc/mxisd/mxisd.yaml - mode: 0600 - owner: mxisd - notify: Restart mxisd service diff --git a/roles/matrix-mxisd/templates/mxisd/mxisd.yaml.j2 b/roles/matrix-mxisd/templates/mxisd/mxisd.yaml.j2 deleted file mode 100644 index be427c1..0000000 --- a/roles/matrix-mxisd/templates/mxisd/mxisd.yaml.j2 +++ /dev/null @@ -1,89 +0,0 @@ -# {{ ansible_managed }} - -####################### -# Matrix config items # -####################### -# Matrix domain, same as 'server_name' in synapse configuration. -matrix: - domain: 'auro.re' - - -################ -# Signing keys # -################ -# Absolute path for the Identity Server signing keys database. -# /!\ THIS MUST **NOT** BE YOUR HOMESERVER KEYS FILE /!\ -# If this path does not exist, it will be auto-generated. -key: - path: '/var/lib/mxisd/keys' - - -# Path to the SQLite DB file for mxisd internal storage -# /!\ THIS MUST **NOT** BE YOUR HOMESERVER DATABASE /!\ -storage: - provider: - sqlite: - database: '/var/lib/mxisd/store.db' - - -################### -# Identity Stores # -################### -ldap: - enabled: true - connection: - host: '{{ ldap_master_ipv4 }}' - port: 389 - bindDn: '{{ ldap_matrix_bind_dn }}' - bindPassword: '{{ ldap_matrix_password }}' - baseDNs: - - '{{ ldap_user_tree }}' - attribute: - uid: - type: 'uid' - value: 'uid' - name: 'uid' - - -################################################# -# Notifications for invites/addition to profile # -################################################# -# This is mandatory to deal with anything e-mail related. -# -# For an introduction to sessions, invites and 3PIDs in general, -# see https://github.com/kamax-matrix/mxisd/blob/master/docs/threepids/session/session.md#3pid-sessions -# -# If you would like to change the content of the notifications, -# see https://github.com/kamax-matrix/mxisd/blob/master/docs/threepids/notification/template-generator.md -# -#### E-mail connector -threepid: - medium: - email: - identity: - # The e-mail to send as. - from: "matrix@auro.re" - - connectors: - smtp: - # SMTP host - host: "smtp.crans.org" - - # SMTP port - port: 587 - - # STARTLS mode for the connection. - # SSL/TLS is currently not supported. See https://github.com/kamax-matrix/mxisd/issues/125 - # - # Possible values: - # 0 Disable any kind of TLS entirely - # 1 Enable STARTLS if supported by server (default) - # 2 Force STARTLS and fail if not available - # - tls: 1 - - # Login for SMTP - login: "matrix@auro.re" - - # Password for the account - password: "" diff --git a/roles/matrix-riot/handlers/main.yml b/roles/matrix-riot/handlers/main.yml deleted file mode 100644 index 4307d64..0000000 --- a/roles/matrix-riot/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# Reload the NGINX service -- name: Reload NGINX service - service: - name: nginx - state: reloaded diff --git a/roles/matrix-riot/tasks/main.yml b/roles/matrix-riot/tasks/main.yml deleted file mode 100644 index 007a449..0000000 --- a/roles/matrix-riot/tasks/main.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -# Install HTTPS support for APT -- name: Install apt-transport-https - apt: - name: apt-transport-https - update_cache: true - -# Add the repository into source list -- name: Configure riot repository - apt_repository: - repo: "deb https://riot.im/packages/debian/ {{ ansible_distribution_release }} main" - -# Add the key -- name: Configure the apt key - apt_key: - url: https://riot.im/packages/debian/repo-key.asc - id: E019645248E8F4A1 - -# Install riot -- name: Install riot-web - apt: - name: riot-web - update_cache: true - -# Install nginx -- name: Install nginx - apt: - name: nginx - -# Configure nginx -- name: Configure nginx - template: - src: nginx-riot.j2 - dest: /etc/nginx/sites-available/riot - mode: 0644 - notify: Reload NGINX service - -# Desactive useless nginx sites -- name: Deactivate the default NGINX site - file: - path: /etc/nginx/sites-enabled/default - state: absent - notify: Reload NGINX service - -# Activate sites -- name: Activate sites - file: - src: /etc/nginx/sites-available/riot - dest: /etc/nginx/sites-enabled/riot - state: link - notify: Reload NGINX service diff --git a/roles/matrix-riot/templates/nginx-riot.j2 b/roles/matrix-riot/templates/nginx-riot.j2 deleted file mode 100644 index c5d128c..0000000 --- a/roles/matrix-riot/templates/nginx-riot.j2 +++ /dev/null @@ -1,20 +0,0 @@ -# {{ ansible_managed }} - -server { - listen 80; - listen [::]:80; - - root /opt/Riot/resources/webapp/; - index index.html; - - access_log /var/log/nginx/riot-access.log; - error_log /var/log/nginx/riot-errors.log; - - add_header X-Content-Type-Options nosniff; - add_header X-XSS-Protection "1; mode=block"; - add_header X-Frame-Options "SAMEORIGIN" always; - - location / { - try_files $uri $uri/ =404; - } -} diff --git a/roles/matrix-synapse/templates/matrix-synapse/conf.d/password_providers.yaml.j2 b/roles/matrix-synapse/templates/matrix-synapse/conf.d/password_providers.yaml.j2 deleted file mode 100644 index 8281be3..0000000 --- a/roles/matrix-synapse/templates/matrix-synapse/conf.d/password_providers.yaml.j2 +++ /dev/null @@ -1,5 +0,0 @@ -# {{ ansible_managed }} -password_providers: - - module: "rest_auth_provider.RestAuthProvider" - config: - endpoint: "http://127.0.0.1:8090" diff --git a/roles/matrix_appservice_irc/defaults/main.yml b/roles/matrix_appservice_irc/defaults/main.yml new file mode 100644 index 0000000..845dbab --- /dev/null +++ b/roles/matrix_appservice_irc/defaults/main.yml @@ -0,0 +1,15 @@ +--- +# service_name is the name of the project on GitHub +service_name: matrix-appservice-irc + +# URL to clone +service_repo: https://github.com/matrix-org/matrix-appservice-irc.git + +# name of the service user +# It means that you will have to `sudo -u THISUSER zsh` to debug +service_user: "{{ service_name }}" +service_homedir: "/var/local/{{ service_name }}" + +# service_path is where the project is cloned +# It can't be the home directory because of user hidden files. +service_path: "{{ service_homedir }}/{{ service_name }}" diff --git a/roles/matrix_appservice_irc/tasks/main.yml b/roles/matrix_appservice_irc/tasks/main.yml new file mode 100644 index 0000000..36d931c --- /dev/null +++ b/roles/matrix_appservice_irc/tasks/main.yml @@ -0,0 +1,60 @@ +--- +# Create service user +- include_tasks: service_user.yml + +- name: "Clone {{ service_name }} project" + git: + repo: "{{ service_repo }}" + dest: "{{ service_path }}" + version: 0.11.2 + become: true + become_user: "{{ service_user }}" + +# Setup dependencies +- name: "Install {{ service_name }} dependencies" + npm: + path: "{{ service_path }}" + production: true + become: true + become_user: "{{ service_user }}" + register: npm_result + retries: 3 + until: npm_result is succeeded + +- name: "Configure {{ service_name }}" + template: + src: config.yaml.j2 + dest: "{{ service_path }}/config.yaml" + owner: "{{ service_user }}" + group: nogroup + mode: 0600 + +# Service file +- name: "Install {{ service_name }} systemd unit" + template: + src: systemd/appservice.service.j2 + dest: "/etc/systemd/system/{{ service_name }}.service" + owner: root + group: root + mode: 0644 + +# TODO generate registration +# node app.js -r -f irc-registration.yaml \ +# -u "http://localhost:9999" -c config.yaml -l ircbot + +- name: Copy appservice registration file + copy: + src: "{{ service_path }}/irc-registration.yaml" + dest: "/etc/matrix-synapse/{{ service_name }}-registration.yaml" + owner: matrix-synapse + group: nogroup + mode: 0600 + remote_src: true + +# Run +- name: "Ensure that {{ service_name }} is started" + systemd: + name: "{{ service_name }}" + state: started + enabled: true + daemon_reload: true diff --git a/roles/matrix_appservice_irc/tasks/service_user.yml b/roles/matrix_appservice_irc/tasks/service_user.yml new file mode 100644 index 0000000..0818676 --- /dev/null +++ b/roles/matrix_appservice_irc/tasks/service_user.yml @@ -0,0 +1,19 @@ +--- +# Having a custom group is useless so use nogroup +- name: "Create {{ service_user }} user" + user: + name: "{{ service_user }}" + group: nogroup + home: "{{ service_homedir }}" + system: true + shell: /bin/false + state: present + +# Only service user should be able to go there +- name: "Secure {{ service_user }} home directory" + file: + path: "{{ service_homedir }}" + state: directory + owner: "{{ service_user }}" + group: nogroup + mode: 0700 diff --git a/roles/matrix_appservice_irc/templates/config.yaml.j2 b/roles/matrix_appservice_irc/templates/config.yaml.j2 new file mode 100644 index 0000000..5112d96 --- /dev/null +++ b/roles/matrix_appservice_irc/templates/config.yaml.j2 @@ -0,0 +1,431 @@ +# {{ ansible_managed }} + +homeserver: + # The URL to the home server for client-server API calls, also used to form the + # media URLs as displayed in bridged IRC channels: + url: "http://auro.re" + # + # The URL of the homeserver hosting media files. This is only used to transform + # mxc URIs to http URIs when bridging m.room.[file|image] events. Optional. By + # default, this is the homeserver URL, specified above. + # + media_url: "https://auro.re" + + # Drop Matrix messages which are older than this number of seconds, according to + # the event's origin_server_ts. + # If the bridge is down for a while, the homeserver will attempt to send all missed + # events on reconnection. These events may be hours old, which can be confusing to + # IRC users if they are then bridged. This option allows these old messages to be + # dropped. + # CAUTION: This is a very coarse heuristic. Federated homeservers may have different + # clock times and hence produce different origin_server_ts values, which may be old + # enough to cause *all* events from the homeserver to be dropped. + # Default: 0 (don't ever drop) + dropMatrixMessagesAfterSecs: 300 # 5 minutes + + # The 'domain' part for user IDs on this home server. Usually (but not always) + # is the "domain name" part of the HS URL. + domain: "auro.re" + + # Should presence be enabled for matrix clients on this bridge. If disabled on the + # homeserver then it should also be disabled here to avoid excess traffic. + # Default: true + enablePresence: true + +# Configuration specific to the IRC service +ircService: + servers: + # The address of the server to connect to. + irc.crans.org: + # A human-readable short name. This is used to label IRC status rooms + # where matrix users control their connections. + # E.g. 'ExampleNet IRC Bridge status'. + # It is also used in the Third Party Lookup API as the instance `desc` + # property, where each server is an instance. + name: "Crans IRC Bridge status" + + additionalAddresses: [ ] + # + # [DEPRECATED] Use `name`, above, instead. + # A human-readable description string + # description: "Example.com IRC network" + + # An ID for uniquely identifying this server amongst other servers being bridged. + networkId: "crans" + + # URL to an icon used as the network icon whenever this network appear in + # a network list. (Like in the riot room directory, for instance.) + # icon: https://example.com/images/hash.png + + # The port to connect to. Optional. + port: 6697 + # Whether to use SSL or not. Default: false. + ssl: true + # Whether or not IRC server is using a self-signed cert or not providing CA Chain + sslselfsign: true + # Should the connection attempt to identify via SASL (if a server or user password is given) + # If false, this will use PASS instead. If SASL fails, we do not fallback to PASS. + sasl: false + # Whether to allow expired certs when connecting to the IRC server. + # Usually this should be off. Default: false. + allowExpiredCerts: false + # A specific CA to trust instead of the default CAs. Optional. + #ca: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + # + # The connection password to send for all clients as a PASS (or SASL, if enabled above) command. Optional. + # password: 'pa$$w0rd' + # + # Whether or not to send connection/error notices to real Matrix users. Default: true. + sendConnectionMessages: true + + quitDebounce: + # Whether parts due to net-splits are debounced for delayMs, to allow + # time for the netsplit to resolve itself. A netsplit is detected as being + # a QUIT rate higher than quitsPerSecond. Default: false. + enabled: false + # The maximum number of quits per second acceptable above which a netsplit is + # considered ongoing. Default: 5. + quitsPerSecond: 5 + # The time window in which to wait before bridging a QUIT to Matrix that occurred during + # a netsplit. Debouncing is jittered randomly between delayMinMs and delayMaxMs so that the HS + # is not sent many requests to leave rooms all at once if a netsplit occurs and many + # people to not rejoin. + # If the user with the same IRC nick as the one who sent the quit rejoins a channel + # they are considered back online and the quit is not bridged, so long as the rejoin + # occurs before the randomly-jittered timeout is not reached. + # Default: 3600000, = 1h + delayMinMs: 3600000 # 1h + # Default: 7200000, = 2h + delayMaxMs: 7200000 # 2h + + # A map for conversion of IRC user modes to Matrix power levels. This enables bridging + # of IRC ops to Matrix power levels only, it does not enable the reverse. If a user has + # been given multiple modes, the one that maps to the highest power level will be used. + modePowerMap: + o: 50 + + botConfig: + # Enable the presence of the bot in IRC channels. The bot serves as the entity + # which maps from IRC -> Matrix. You can disable the bot entirely which + # means IRC -> Matrix chat will be shared by active "M-Nick" connections + # in the room. If there are no users in the room (or if there are users + # but their connections are not on IRC) then nothing will be bridged to + # Matrix. If you're concerned about the bot being treated as a "logger" + # entity, then you may want to disable the bot. If you want IRC->Matrix + # but don't want to have TCP connections to IRC unless a Matrix user speaks + # (because your client connection limit is low), then you may want to keep + # the bot enabled. Default: true. + # NB: If the bot is disabled, you SHOULD have matrix-to-IRC syncing turned + # on, else there will be no users and no bot in a channel (meaning no + # messages to Matrix!) until a Matrix user speaks which makes a client + # join the target IRC channel. + # NBB: The bridge bot IRC client will still join the target IRC network so + # it can service bridge-specific queries from the IRC-side e.g. so + # real IRC clients have a way to change their Matrix display name. + # See https://github.com/matrix-org/matrix-appservice-irc/issues/55 + enabled: false + # The nickname to give the AS bot. + nick: "AuroreBot" + # The password to give to NickServ or IRC Server for this nick. Optional. + # password: "helloworld" + # + # Join channels even if there are no Matrix users on the other side of + # the bridge. Set to false to prevent the bot from joining channels which have no + # real matrix users in them, even if there is a mapping for the channel. + # Default: true + joinChannelsIfNoUsers: true + + # Configuration for PMs / private 1:1 communications between users. + privateMessages: + # Enable the ability for PMs to be sent to/from IRC/Matrix. + # Default: true. + enabled: true + # Prevent Matrix users from sending PMs to the following IRC nicks. + # Optional. Default: []. + # exclude: ["Alice", "Bob"] # NOT YET IMPLEMENTED + + # Should created Matrix PM rooms be federated? If false, only users on the + # HS attached to this AS will be able to interact with this room. + # Optional. Default: true. + federate: true + + # Configuration for mappings not explicitly listed in the 'mappings' + # section. + dynamicChannels: + # Enable the ability for Matrix users to join *any* channel on this IRC + # network. + # Default: false. + enabled: true + # Should the AS create a room alias for the new Matrix room? The form of + # the alias can be modified via 'aliasTemplate'. Default: true. + createAlias: true + # Should the AS publish the new Matrix room to the public room list so + # anyone can see it? Default: true. + published: true + # What should the join_rule be for the new Matrix room? If 'public', + # anyone can join the room. If 'invite', only users with an invite can + # join the room. Note that if an IRC channel has +k or +i set on it, + # join_rules will be set to 'invite' until these modes are removed. + # Default: "public". + joinRule: public + # This will set the m.room.related_groups state event in newly created rooms + # with the given groupId. This means flares will show up on IRC users in those rooms. + # This should be set to the same thing as namespaces.users.group_id in irc_registration. + # This does not alter existing rooms. + # Leaving this option empty will not set the event. + groupId: +myircnetwork:localhost + # Should created Matrix rooms be federated? If false, only users on the + # HS attached to this AS will be able to interact with this room. + # Default: true. + federate: true + # The room alias template to apply when creating new aliases. This only + # applies if createAlias is 'true'. The following variables are exposed: + # $SERVER => The IRC server address (e.g. "irc.example.com") + # $CHANNEL => The IRC channel (e.g. "#python") + # This MUST have $CHANNEL somewhere in it. + # Default: '#irc_$SERVER_$CHANNEL' + aliasTemplate: "#irc_crans_$CHANNEL" + # A list of user IDs which the AS bot will send invites to in response + # to a !join. Only applies if joinRule is 'invite'. Default: [] + # whitelist: + # - "@foo:example.com" + # - "@bar:example.com" + # + # Prevent the given list of channels from being mapped under any + # circumstances. + # exclude: ["#foo", "#bar"] + + # Configuration for controlling how Matrix and IRC membership lists are + # synced. + membershipLists: + # Enable the syncing of membership lists between IRC and Matrix. This + # can have a significant effect on performance on startup as the lists are + # synced. This must be enabled for anything else in this section to take + # effect. Default: false. + enabled: true + + # Syncing membership lists at startup can result in hundreds of members to + # process all at once. This timer drip feeds membership entries at the + # specified rate. Default: 10000. (10s) + floodDelayMs: 10000 + + global: + ircToMatrix: + # Get a snapshot of all real IRC users on a channel (via NAMES) and + # join their virtual matrix clients to the room. + initial: true + # Make virtual matrix clients join and leave rooms as their real IRC + # counterparts join/part channels. Default: false. + incremental: true + + matrixToIrc: + # Get a snapshot of all real Matrix users in the room and join all of + # them to the mapped IRC channel on startup. Default: false. + initial: true + # Make virtual IRC clients join and leave channels as their real Matrix + # counterparts join/leave rooms. Make sure your 'maxClients' value is + # high enough! Default: false. + incremental: true + + # Configuration for virtual matrix users. The following variables are + # exposed: + # $NICK => The IRC nick + # $SERVER => The IRC server address (e.g. "irc.example.com") + matrixClients: + # The user ID template to use when creating virtual matrix users. This + # MUST have $NICK somewhere in it. + # Optional. Default: "@$SERVER_$NICK". + # Example: "@irc.example.com_Alice:example.com" + userTemplate: "@irc_$NICK" + # The display name to use for created matrix clients. This should have + # $NICK somewhere in it if it is specified. Can also use $SERVER to + # insert the IRC domain. + # Optional. Default: "$NICK (IRC)". Example: "Alice (IRC)" + displayName: "$NICK (IRC)" + # Number of tries a client can attempt to join a room before the request + # is discarded. You can also use -1 to never retry or 0 to never give up. + # Optional. Default: -1 + joinAttempts: -1 + + # Configuration for virtual IRC users. The following variables are exposed: + # $LOCALPART => The user ID localpart ("alice" in @alice:localhost) + # $USERID => The user ID + # $DISPLAY => The display name of this user, with excluded characters + # (e.g. space) removed. If the user has no display name, this + # falls back to $LOCALPART. + ircClients: + # The template to apply to every IRC client nick. This MUST have either + # $DISPLAY or $USERID or $LOCALPART somewhere in it. + # Optional. Default: "M-$DISPLAY". Example: "M-Alice". + nickTemplate: "$DISPLAY[m]" + # True to allow virtual IRC clients to change their nick on this server + # by issuing !nick commands to the IRC AS bot. + # This is completely freeform: it will NOT follow the nickTemplate. + allowNickChanges: true + # The max number of IRC clients that will connect. If the limit is + # reached, the client that spoke the longest time ago will be + # disconnected and replaced. + # Optional. Default: 30. + maxClients: 30 + # IPv6 configuration. + ipv6: + # Optional. Set to true to force IPv6 for outgoing connections. + only: false + # Optional. The IPv6 prefix to use for generating unique addresses for each + # connected user. If not specified, all users will connect from the same + # (default) address. This may require additional OS-specific work to allow + # for the node process to bind to multiple different source addresses + # e.g IP_FREEBIND on Linux, which requires an LD_PRELOAD with the library + # https://github.com/matrix-org/freebindfree as Node does not expose setsockopt. + # prefix: "2001:0db8:85a3::" # modify appropriately + # + # The maximum amount of time in seconds that the client can exist + # without sending another message before being disconnected. Use 0 to + # not apply an idle timeout. This value is ignored if this IRC server is + # mirroring matrix membership lists to IRC. Default: 172800 (48 hours) + idleTimeout: 10800 + # The number of millseconds to wait between consecutive reconnections if a + # client gets disconnected. Setting to 0 will cause the scheduling to be + # disabled, i.e. it will be scheduled immediately (with jitter. + # Otherwise, the scheduling interval will be used such that one client + # reconnect for this server will be handled every reconnectIntervalMs ms using + # a FIFO queue. + # Default: 5000 (5 seconds) + reconnectIntervalMs: 5000 + # The number of concurrent reconnects if a user has been disconnected unexpectedly + # (e.g. a netsplit). You should set this to a reasonably high number so that + # bridges are not waiting an eternity to reconnect all its clients if + # we see a massive number of disconnect. This is unrelated to the reconnectIntervalMs + # setting above which is for connecting on restart of the bridge. Set to 0 to + # immediately try to reconnect all users. + # Default: 50 + concurrentReconnectLimit: 50 + # The number of lines to allow being sent by the IRC client that has received + # a large block of text to send from matrix. If the number of lines that would + # be sent is > lineLimit, the text will instead be uploaded to matrix and the + # resulting URI is treated as a file. As such, a link will be sent to the IRC + # side instead of potentially spamming IRC and getting the IRC client kicked. + # Default: 3. + lineLimit: 3 + # A list of user modes to set on every IRC client. For example, "RiG" would set + # +R, +i and +G on every IRC connection when they have successfully connected. + # User modes vary wildly depending on the IRC network you're connecting to, + # so check before setting this value. Some modes may not work as intended + # through the bridge e.g. caller ID as there is no way to /ACCEPT. + # Default: "" (no user modes) + # userModes: "R" + + # Configuration for an ident server. If you are running a public bridge it is + # advised you setup an ident server so IRC mods can ban specific matrix users + # rather than the application service itself. + ident: + # True to listen for Ident requests and respond with the + # matrix user's user_id (converted to ASCII, respecting RFC 1413). + # Default: false. + enabled: false + # The port to listen on for incoming ident requests. + # Ports below 1024 require root to listen on, and you may not want this to + # run as root. Instead, you can get something like an Apache to yank up + # incoming requests to 113 to a high numbered port. Set the port to listen + # on instead of 113 here. + # Default: 113. + port: 1113 + # The address to listen on for incoming ident requests. + # Default: 0.0.0.0 + address: "::" + + # Configuration for logging. Optional. Default: console debug level logging + # only. + logging: + # Level to log on console/logfile. One of error|warn|info|debug + level: "info" + # The file location to log to. This is relative to the project directory. + logfile: "debug.log" + # The file location to log errors to. This is relative to the project + # directory. + errfile: "errors.log" + # Whether to log to the console or not. + toConsole: true + # The max number of files to keep. Files will be overwritten eventually due + # to rotations. + maxFiles: 5 + + # Optional. Enable Prometheus metrics. If this is enabled, you MUST install `prom-client`: + # $ npm install prom-client@6.3.0 + # Metrics will then be available via GET /metrics on the bridge listening port (-p). + metrics: + # Whether to actually enable the metric endpoint. Default: false + enabled: false + # When collecting remote user active times, which "buckets" should be used. Defaults are given below. + # The bucket name is formed of a duration and a period. (h=hours,d=days,w=weeks). + remoteUserAgeBuckets: + - "1h" + - "1d" + - "1w" + + # The nedb database URI to connect to. This is the name of the directory to + # dump .db files to. This is relative to the project directory. + # Required. + databaseUri: "nedb://data" + + # Configuration options for the debug HTTP API. To access this API, you must + # append ?access_token=$APPSERVICE_TOKEN (from the registration file) to the requests. + # + # The debug API exposes the following endpoints: + # + # GET /irc/$domain/user/$user_id => Return internal state for the IRC client for this user ID. + # + # POST /irc/$domain/user/$user_id => Issue a raw IRC command down this connection. + # Format: new line delimited commands as per IRC protocol. + # + debugApi: + # True to enable the HTTP API endpoint. Default: false. + enabled: false + # The port to host the HTTP API. + port: 11100 + + # Configuration for the provisioning API. + # + # GET /_matrix/provision/link + # GET /_matrix/provision/unlink + # GET /_matrix/provision/listlinks + # + provisioning: + # True to enable the provisioning HTTP endpoint. Default: false. + enabled: false + # The number of seconds to wait before giving up on getting a response from + # an IRC channel operator. If the channel operator does not respond within the + # allotted time period, the provisioning request will fail. + # Default: 300 seconds (5 mins) + requestTimeoutSeconds: 300 + + # WARNING: The bridge needs to send plaintext passwords to the IRC server, it cannot + # send a password hash. As a result, passwords (NOT hashes) are stored encrypted in + # the database. + # + # To generate a .pem file: + # $ openssl genpkey -out passkey.pem -outform PEM -algorithm RSA -pkeyopt rsa_keygen_bits:2048 + # + # The path to the RSA PEM-formatted private key to use when encrypting IRC passwords + # for storage in the database. Passwords are stored by using the admin room command + # `!storepass server.name passw0rd. When a connection is made to IRC on behalf of + # the Matrix user, this password will be sent as the server password (PASS command). + passwordEncryptionKeyPath: "passkey.pem" + + # Config for Matrix -> IRC bridging + matrixHandler: + # Cache this many matrix events in memory to be used for m.relates_to messages (usually replies). + eventCacheSize: 4096 + +# Options here are generally only applicable to large-scale bridges and may have +# consequences greater than other options in this configuration file. +advanced: + # The maximum number of HTTP(S) sockets to maintain. Usually this is unlimited + # however for large bridges it is important to rate limit the bridge to avoid + # accidentally overloading the homeserver. Defaults to 1000, which should be + # enough for the vast majority of use cases. + maxHttpSockets: 1000 diff --git a/roles/codimd/templates/systemd/codimd.service.j2 b/roles/matrix_appservice_irc/templates/systemd/appservice.service.j2 similarity index 51% rename from roles/codimd/templates/systemd/codimd.service.j2 rename to roles/matrix_appservice_irc/templates/systemd/appservice.service.j2 index 8468dfd..c686551 100644 --- a/roles/codimd/templates/systemd/codimd.service.j2 +++ b/roles/matrix_appservice_irc/templates/systemd/appservice.service.j2 @@ -1,18 +1,17 @@ # {{ ansible_managed }} [Unit] -Description=CodiMD +Description=A bridge between Matrix and IRC After=syslog.target network-online.target mysql.service postgresql.service Conflicts=shutdown.target [Service] Type=simple -User=codimd -Group=codimd -WorkingDirectory=/var/local/codimd/codimd -Environment="NODE_ENV=production" -ExecStart=/usr/bin/nodejs /var/local/codimd/codimd/app.js +User={{ service_user }} +WorkingDirectory={{ service_path }} +ExecStart=/usr/bin/nodejs ./app.js -c config.yaml -f irc-registration.yaml -p 9999 Restart=always +RestartSec=3 [Install] WantedBy=multi-user.target diff --git a/roles/matrix_appservice_webhooks/defaults/main.yml b/roles/matrix_appservice_webhooks/defaults/main.yml new file mode 100644 index 0000000..e4425c8 --- /dev/null +++ b/roles/matrix_appservice_webhooks/defaults/main.yml @@ -0,0 +1,15 @@ +--- +# service_name is the name of the project on GitHub +service_name: matrix-appservice-webhooks + +# URL to clone +service_repo: https://github.com/turt2live/matrix-appservice-webhooks.git + +# name of the service user +# It means that you will have to `sudo -u THISUSER zsh` to debug +service_user: "{{ service_name }}" +service_homedir: "/var/local/{{ service_name }}" + +# service_path is where the project is cloned +# It can't be the home directory because of user hidden files. +service_path: "{{ service_homedir }}/{{ service_name }}" diff --git a/roles/matrix_appservice_webhooks/tasks/main.yml b/roles/matrix_appservice_webhooks/tasks/main.yml new file mode 100644 index 0000000..0afa419 --- /dev/null +++ b/roles/matrix_appservice_webhooks/tasks/main.yml @@ -0,0 +1,58 @@ +--- +# Create service user +- include_tasks: service_user.yml + +- name: "Clone {{ service_name }} project" + git: + repo: "{{ service_repo }}" + dest: "{{ service_path }}" + version: master + become: true + become_user: "{{ service_user }}" + +# Setup dependencies +- name: "Install {{ service_name }} dependencies" + npm: + path: "{{ service_path }}" + production: true + become: true + become_user: "{{ service_user }}" + register: npm_result + retries: 3 + until: npm_result is succeeded + +- name: "Configure {{ service_name }}" + template: + src: config.yaml.j2 + dest: "{{ service_path }}/config/config.yaml" + owner: "{{ service_user }}" + group: nogroup + mode: 0600 + +# Service file +- name: "Install {{ service_name }} systemd unit" + template: + src: systemd/appservice.service.j2 + dest: "/etc/systemd/system/{{ service_name }}.service" + owner: root + group: root + mode: 0644 + +# TODO generate registration + +- name: Copy appservice registration file + copy: + src: "{{ service_path }}/appservice-registration-webhooks.yaml" + dest: "/etc/matrix-synapse/{{ service_name }}-registration.yaml" + owner: matrix-synapse + group: nogroup + mode: 0600 + remote_src: true + +# Run +- name: "Ensure that {{ service_name }} is started" + systemd: + name: "{{ service_name }}" + state: started + enabled: true + daemon_reload: true diff --git a/roles/matrix_appservice_webhooks/tasks/service_user.yml b/roles/matrix_appservice_webhooks/tasks/service_user.yml new file mode 100644 index 0000000..0818676 --- /dev/null +++ b/roles/matrix_appservice_webhooks/tasks/service_user.yml @@ -0,0 +1,19 @@ +--- +# Having a custom group is useless so use nogroup +- name: "Create {{ service_user }} user" + user: + name: "{{ service_user }}" + group: nogroup + home: "{{ service_homedir }}" + system: true + shell: /bin/false + state: present + +# Only service user should be able to go there +- name: "Secure {{ service_user }} home directory" + file: + path: "{{ service_homedir }}" + state: directory + owner: "{{ service_user }}" + group: nogroup + mode: 0700 diff --git a/roles/matrix_appservice_webhooks/templates/config.yaml.j2 b/roles/matrix_appservice_webhooks/templates/config.yaml.j2 new file mode 100644 index 0000000..9355aff --- /dev/null +++ b/roles/matrix_appservice_webhooks/templates/config.yaml.j2 @@ -0,0 +1,39 @@ +# {{ ansible_managed }} + +# Configuration specific to the application service. All fields (unless otherwise marked) are required. +homeserver: + # The domain for the client-server API calls. + url: "http://localhost:8008" + + # The domain part for user IDs on this home server. Usually, but not always, this is the same as the + # home server's URL. + domain: "auro.re" + +# Configuration specific to the bridge. All fields (unless otherwise marked) are required. +webhookBot: + # The localpart to use for the bot. May require re-registering the application service. + localpart: "_webhook" + + # Appearance options for the Matrix bot + appearance: + displayName: "Webhook Bridge" + avatarUrl: "http://i.imgur.com/IDOBtEJ.png" # webhook icon + +# Provisioning API options +provisioning: + # Your secret for the API. Required for all provisioning API requests. + secret: '{{ matrix_webhooks_secret }}' + +# Configuration related to the web portion of the bridge. Handles the inbound webhooks +web: + hookUrlBase: 'https://auro.re:9442/' + +logging: + file: logs/webhook.log + console: true + consoleLevel: info + fileLevel: verbose + writeFiles: true + rotate: + size: 52428800 # bytes, default is 50mb + count: 5 diff --git a/roles/matrix_appservice_webhooks/templates/systemd/appservice.service.j2 b/roles/matrix_appservice_webhooks/templates/systemd/appservice.service.j2 new file mode 100644 index 0000000..48239a8 --- /dev/null +++ b/roles/matrix_appservice_webhooks/templates/systemd/appservice.service.j2 @@ -0,0 +1,17 @@ +# {{ ansible_managed }} + +[Unit] +Description=A bridge between Matrix and WebHooks +After=syslog.target network-online.target mysql.service postgresql.service +Conflicts=shutdown.target + +[Service] +Type=simple +User={{ service_user }} +WorkingDirectory={{ service_path }} +ExecStart=/usr/bin/nodejs index.js -p 9000 -c config/config.yaml -f appservice-registration-webhooks.yaml +Restart=always +RestartSec=3 + +[Install] +WantedBy=multi-user.target diff --git a/roles/matrix_synapse/files/rest_auth_provider.py b/roles/matrix_synapse/files/rest_auth_provider.py new file mode 100644 index 0000000..1d582d9 --- /dev/null +++ b/roles/matrix_synapse/files/rest_auth_provider.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- +# +# REST endpoint Authentication module for Matrix synapse +# Copyright (C) 2017 Maxime Dor +# +# https://max.kamax.io/ +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# + +import logging +from twisted.internet import defer +import requests +import json + +logger = logging.getLogger(__name__) + +class RestAuthProvider(object): + + def __init__(self, config, account_handler): + self.account_handler = account_handler + + if not config.endpoint: + raise RuntimeError('Missing endpoint config') + + self.endpoint = config.endpoint + self.regLower = config.regLower + self.config = config + + logger.info('Endpoint: %s', self.endpoint) + logger.info('Enforce lowercase username during registration: %s', self.regLower) + + @defer.inlineCallbacks + def check_password(self, user_id, password): + logger.info("Got password check for " + user_id) + data = {'user':{'id':user_id, 'password':password}} + r = requests.post(self.endpoint + '/_matrix-internal/identity/v1/check_credentials', json = data) + r.raise_for_status() + r = r.json() + if not r["auth"]: + reason = "Invalid JSON data returned from REST endpoint" + logger.warning(reason) + raise RuntimeError(reason) + + auth = r["auth"] + if not auth["success"]: + logger.info("User not authenticated") + defer.returnValue(False) + + localpart = user_id.split(":", 1)[0][1:] + logger.info("User %s authenticated", user_id) + + registration = False + if not (yield self.account_handler.check_user_exists(user_id)): + logger.info("User %s does not exist yet, creating...", user_id) + + if localpart != localpart.lower() and self.regLower: + logger.info('User %s was cannot be created due to username lowercase policy', localpart) + defer.returnValue(False) + + user_id, access_token = (yield self.account_handler.register(localpart=localpart)) + registration = True + logger.info("Registration based on REST data was successful for %s", user_id) + else: + logger.info("User %s already exists, registration skipped", user_id) + + if auth["profile"]: + logger.info("Handling profile data") + profile = auth["profile"] + + store = yield self.account_handler.hs.get_profile_handler().store + if "display_name" in profile and ((registration and self.config.setNameOnRegister) or (self.config.setNameOnLogin)): + display_name = profile["display_name"] + logger.info("Setting display name to '%s' based on profile data", display_name) + yield store.set_profile_displayname(localpart, display_name) + else: + logger.info("Display name was not set because it was not given or policy restricted it") + + if (self.config.updateThreepid): + if "three_pids" in profile: + logger.info("Handling 3PIDs") + for threepid in profile["three_pids"]: + medium = threepid["medium"].lower() + address = threepid["address"].lower() + logger.info("Looking for 3PID %s:%s in user profile", medium, address) + + validated_at = self.account_handler.hs.get_clock().time_msec() + if not (yield store.get_user_id_by_threepid(medium, address)): + logger.info("3PID is not present, adding") + yield store.user_add_threepid( + user_id, + medium, + address, + validated_at, + validated_at + ) + else: + logger.info("3PID is present, skipping") + else: + logger.info("3PIDs were not updated due to policy") + else: + logger.info("No profile data") + + defer.returnValue(True) + + @staticmethod + def parse_config(config): + # verify config sanity + _require_keys(config, ["endpoint"]) + + class _RestConfig(object): + endpoint = '' + regLower = True + setNameOnRegister = True + setNameOnLogin = False + updateThreepid = True + + rest_config = _RestConfig() + rest_config.endpoint = config["endpoint"] + + try: + rest_config.regLower = config['policy']['registration']['username']['enforceLowercase'] + except TypeError: + # we don't care + pass + except KeyError: + # we don't care + pass + + try: + rest_config.setNameOnRegister = config['policy']['registration']['profile']['name'] + except TypeError: + # we don't care + pass + except KeyError: + # we don't care + pass + + try: + rest_config.setNameOnLogin = config['policy']['login']['profile']['name'] + except TypeError: + # we don't care + pass + except KeyError: + # we don't care + pass + + try: + rest_config.updateThreepid = config['policy']['all']['threepid']['update'] + except TypeError: + # we don't care + pass + except KeyError: + # we don't care + pass + + return rest_config + +def _require_keys(config, required): + missing = [key for key in required if key not in config] + if missing: + raise Exception( + "REST Auth enabled but missing required config values: {}".format( + ", ".join(missing) + ) + ) + diff --git a/roles/matrix-synapse/handlers/main.yml b/roles/matrix_synapse/handlers/main.yml similarity index 100% rename from roles/matrix-synapse/handlers/main.yml rename to roles/matrix_synapse/handlers/main.yml diff --git a/roles/matrix-synapse/tasks/main.yml b/roles/matrix_synapse/tasks/main.yml similarity index 68% rename from roles/matrix-synapse/tasks/main.yml rename to roles/matrix_synapse/tasks/main.yml index 06e44bc..f5339b9 100644 --- a/roles/matrix-synapse/tasks/main.yml +++ b/roles/matrix_synapse/tasks/main.yml @@ -1,9 +1,15 @@ --- - name: Install matrix-synapse apt: - name: matrix-synapse update_cache: true + name: + - matrix-synapse-py3 + - matrix-synapse-ldap3 + state: present default_release: stretch-backports + register: apt_result + retries: 3 + until: apt_result is succeeded - name: Configure matrix-synapse template: @@ -20,10 +26,3 @@ - server_name.yaml - trusted_third_party_id_servers.yaml notify: Restart matrix-synapse service - -- name: Install rest auth provider - get_url: - url: "{{ synapse_rest_auth_url }}" - dest: /usr/local/lib/python3.5/dist-packages/rest_auth_provider.py - mode: 0755 - notify: Restart matrix-synapse service diff --git a/roles/matrix-synapse/templates/matrix-synapse/conf.d/database.yaml.j2 b/roles/matrix_synapse/templates/matrix-synapse/conf.d/database.yaml.j2 similarity index 100% rename from roles/matrix-synapse/templates/matrix-synapse/conf.d/database.yaml.j2 rename to roles/matrix_synapse/templates/matrix-synapse/conf.d/database.yaml.j2 diff --git a/roles/matrix-synapse/templates/matrix-synapse/conf.d/enable_group_creation.yaml.j2 b/roles/matrix_synapse/templates/matrix-synapse/conf.d/enable_group_creation.yaml.j2 similarity index 100% rename from roles/matrix-synapse/templates/matrix-synapse/conf.d/enable_group_creation.yaml.j2 rename to roles/matrix_synapse/templates/matrix-synapse/conf.d/enable_group_creation.yaml.j2 diff --git a/roles/matrix-synapse/templates/matrix-synapse/conf.d/listeners.yaml.j2 b/roles/matrix_synapse/templates/matrix-synapse/conf.d/listeners.yaml.j2 similarity index 58% rename from roles/matrix-synapse/templates/matrix-synapse/conf.d/listeners.yaml.j2 rename to roles/matrix_synapse/templates/matrix-synapse/conf.d/listeners.yaml.j2 index a5523a0..0c3693e 100644 --- a/roles/matrix-synapse/templates/matrix-synapse/conf.d/listeners.yaml.j2 +++ b/roles/matrix_synapse/templates/matrix-synapse/conf.d/listeners.yaml.j2 @@ -3,15 +3,10 @@ listeners: - port: 8008 tls: false - bind_addresses: - - '::' - - '0.0.0.0' + bind_addresses: ['::', '0.0.0.0'] type: http - x_forwarded: true resources: - - names: [client] - compress: true - - names: [federation] + - names: [client, federation] compress: false diff --git a/roles/matrix-synapse/templates/matrix-synapse/conf.d/no_tls.yaml.j2 b/roles/matrix_synapse/templates/matrix-synapse/conf.d/no_tls.yaml.j2 similarity index 100% rename from roles/matrix-synapse/templates/matrix-synapse/conf.d/no_tls.yaml.j2 rename to roles/matrix_synapse/templates/matrix-synapse/conf.d/no_tls.yaml.j2 diff --git a/roles/matrix_synapse/templates/matrix-synapse/conf.d/password_providers.yaml.j2 b/roles/matrix_synapse/templates/matrix-synapse/conf.d/password_providers.yaml.j2 new file mode 100644 index 0000000..4f84014 --- /dev/null +++ b/roles/matrix_synapse/templates/matrix-synapse/conf.d/password_providers.yaml.j2 @@ -0,0 +1,13 @@ +# {{ ansible_managed }} +password_providers: + - module: "ldap_auth_provider.LdapAuthProvider" + config: + enabled: true + uri: "ldap://{{ ldap_master_ipv4 }}:389" + base: "{{ ldap_user_tree }}" + attributes: + uid: "uid" + mail: "mail" + name: "uid" + bind_dn: "{{ ldap_matrix_bind_dn }}" + bind_password: "{{ ldap_matrix_password }}" diff --git a/roles/matrix-synapse/templates/matrix-synapse/conf.d/server_name.yaml.j2 b/roles/matrix_synapse/templates/matrix-synapse/conf.d/server_name.yaml.j2 similarity index 100% rename from roles/matrix-synapse/templates/matrix-synapse/conf.d/server_name.yaml.j2 rename to roles/matrix_synapse/templates/matrix-synapse/conf.d/server_name.yaml.j2 diff --git a/roles/matrix-synapse/templates/matrix-synapse/conf.d/trusted_third_party_id_servers.yaml.j2 b/roles/matrix_synapse/templates/matrix-synapse/conf.d/trusted_third_party_id_servers.yaml.j2 similarity index 100% rename from roles/matrix-synapse/templates/matrix-synapse/conf.d/trusted_third_party_id_servers.yaml.j2 rename to roles/matrix_synapse/templates/matrix-synapse/conf.d/trusted_third_party_id_servers.yaml.j2 diff --git a/roles/nginx-reverse-proxy/handlers/main.yml b/roles/nginx-reverse-proxy/handlers/main.yml deleted file mode 100644 index aa28cf0..0000000 --- a/roles/nginx-reverse-proxy/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# Reload NGINX when a site changes -- name: Reload NGINX service - service: - name: nginx - state: reloaded diff --git a/roles/nginx-reverse-proxy/tasks/main.yml b/roles/nginx-reverse-proxy/tasks/main.yml deleted file mode 100644 index 339ce0b..0000000 --- a/roles/nginx-reverse-proxy/tasks/main.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -# nginx is the proxy server -- name: Install NGINX server - apt: - name: nginx - update_cache: true - -# Install sites -- name: Configure NGINX sites - template: - src: nginx-sites-available.j2 - dest: /etc/nginx/sites-available/{{ item.name }} - mode: 0644 - loop: "{{ reversed_proxy_subdomains }}" - notify: Reload NGINX service - -# Desactive useless nginx sites -- name: Deactivate the default NGINX site - file: - path: /etc/nginx/sites-enabled/default - state: absent - notify: Reload NGINX service - -# Activate sites -- name: Activate sites - file: - src: /etc/nginx/sites-available/{{ item.name }} - dest: /etc/nginx/sites-enabled/{{ item.name }} - state: link - loop: "{{ reversed_proxy_subdomains }}" - notify: Reload NGINX service - -# Install main site -- name: Configure NGINX main site - template: - src: nginx-sites-available-main.j2 - dest: /etc/nginx/sites-available/main - mode: 0644 - notify: Reload NGINX service - -# Activate main site -- name: Activate main site - file: - src: /etc/nginx/sites-available/main - dest: /etc/nginx/sites-enabled/main - state: link - notify: Reload NGINX service diff --git a/roles/nginx-reverse-proxy/templates/nginx-sites-available-main.j2 b/roles/nginx-reverse-proxy/templates/nginx-sites-available-main.j2 deleted file mode 100644 index 43f678f..0000000 --- a/roles/nginx-reverse-proxy/templates/nginx-sites-available-main.j2 +++ /dev/null @@ -1,69 +0,0 @@ -# {{ ansible_managed }} - -server { - server_name auro.re; - include "snippets/proxy-common.conf"; - - location / { - return 302 https://$host$request_uri; - } -} - -server { - include "snippets/proxy-common-ssl.conf"; - server_name auro.re; - - # Separate log files - access_log /var/log/nginx/main.access.log; - error_log /var/log/nginx/main.error.log; - - # Use LetsEncrypt SSL - ssl_certificate /etc/letsencrypt/live/auro.re/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/auro.re/privkey.pem; - ssl_trusted_certificate /etc/letsencrypt/live/auro.re/chain.pem; - - location / { - proxy_redirect off; - proxy_pass http://www.adm.auro.re; - proxy_set_header Host auro.re; - proxy_set_header P-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $remote_addr; - - # "A man is not dead while his name is still spoken." -- Going Postal - add_header X-Clacks-Overhead "GNU Terry Pratchett"; - } - - # For Matrix identity server - location /_matrix/identity { - proxy_pass http://synapse.adm.auro.re:8090/_matrix/identity; - proxy_set_header Host $host; - proxy_set_header X-Forwarded-For $remote_addr; - } - - # For Matrix Synapse - location /_matrix { - proxy_pass http://synapse.adm.auro.re:8008; - proxy_set_header X-Forwarded-For $remote_addr; - } -} - -server { - listen 8448 ssl default_server; - listen [::]:8448 ssl default_server; - server_name auro.re; - - # Separate log files - access_log /var/log/nginx/main.access.log; - error_log /var/log/nginx/main.error.log; - - # Use LetsEncrypt SSL - ssl_certificate /etc/letsencrypt/live/auro.re/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/auro.re/privkey.pem; - ssl_trusted_certificate /etc/letsencrypt/live/auro.re/chain.pem; - - # For Matrix Synapse federation - location / { - proxy_pass http://synapse.adm.auro.re:8008; - proxy_set_header X-Forwarded-For $remote_addr; - } -} diff --git a/roles/nginx-reverse-proxy/templates/nginx-sites-available.j2 b/roles/nginx-reverse-proxy/templates/nginx-sites-available.j2 deleted file mode 100644 index 0ddd2df..0000000 --- a/roles/nginx-reverse-proxy/templates/nginx-sites-available.j2 +++ /dev/null @@ -1,35 +0,0 @@ -# {{ ansible_managed }} - -server { - server_name {{ item.from }}; - include "snippets/proxy-common.conf"; - - location / { - return 302 https://$host$request_uri; - } -} - -server { - include "snippets/proxy-common-ssl.conf"; - server_name {{ item.from }}; - - # Separate log files - access_log /var/log/nginx/{{ item.name }}.access.log; - error_log /var/log/nginx/{{ item.name }}.error.log; - - # Use LetsEncrypt SSL - ssl_certificate /etc/letsencrypt/live/auro.re/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/auro.re/privkey.pem; - ssl_trusted_certificate /etc/letsencrypt/live/auro.re/chain.pem; - - location / { - proxy_redirect off; - proxy_pass http://{{ item.to }}; - proxy_set_header Host {{ item.from }}; - proxy_set_header P-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $remote_addr; - - # "A man is not dead while his name is still spoken." -- Going Postal - add_header X-Clacks-Overhead "GNU Terry Pratchett"; - } -} diff --git a/roles/nginx_reverseproxy/handlers/main.yml b/roles/nginx_reverseproxy/handlers/main.yml new file mode 100644 index 0000000..6dfcdd7 --- /dev/null +++ b/roles/nginx_reverseproxy/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Reload nginx + systemd: + name: nginx + state: reloaded diff --git a/roles/nginx_reverseproxy/tasks/main.yml b/roles/nginx_reverseproxy/tasks/main.yml new file mode 100644 index 0000000..497048d --- /dev/null +++ b/roles/nginx_reverseproxy/tasks/main.yml @@ -0,0 +1,73 @@ +--- +- name: Install NGINX + apt: + update_cache: true + name: nginx + register: apt_result + retries: 3 + until: apt_result is succeeded + +- name: Copy snippets + template: + src: "nginx/snippets/{{ item }}.j2" + dest: "/etc/nginx/snippets/{{ item }}" + mode: 0644 + loop: + - options-ssl.conf + - options-proxypass.conf + +- name: Copy dhparam + template: + src: letsencrypt/dhparam.j2 + dest: /etc/letsencrypt/dhparam + mode: 0644 + +- name: Copy reverse proxy sites + template: + src: "nginx/sites-available/{{ item }}.j2" + dest: "/etc/nginx/sites-available/{{ item }}" + mode: 0644 + loop: + - reverseproxy + - reverseproxy_redirect_dname + - redirect + notify: Reload nginx + +- name: Activate sites + file: + src: "/etc/nginx/sites-available/{{ item }}" + dest: "/etc/nginx/sites-enabled/{{ item }}" + state: link + mode: 0644 + loop: + - reverseproxy + - reverseproxy_redirect_dname + - redirect + notify: Reload nginx + +- name: Copy forward modules + template: + src: "nginx/modules-available/60-forward.conf.j2" + dest: "/etc/nginx/modules-available/60-forward.conf" + mode: 0644 + notify: Reload nginx + +- name: Activate modules + file: + src: "/etc/nginx/modules-available/60-forward.conf" + dest: "/etc/nginx/modules-enabled/60-forward.conf" + state: link + mode: 0644 + notify: Reload nginx + +- name: Copy 50x error page + template: + src: www/html/50x.html.j2 + dest: /var/www/html/50x.html + mode: 0644 + +- name: Indicate role in motd + template: + src: update-motd.d/05-service.j2 + dest: /etc/update-motd.d/05-nginx + mode: 0755 diff --git a/roles/nginx_reverseproxy/templates/letsencrypt/dhparam.j2 b/roles/nginx_reverseproxy/templates/letsencrypt/dhparam.j2 new file mode 100644 index 0000000..9b182b7 --- /dev/null +++ b/roles/nginx_reverseproxy/templates/letsencrypt/dhparam.j2 @@ -0,0 +1,8 @@ +-----BEGIN DH PARAMETERS----- +MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz ++8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a +87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7 +YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi +7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD +ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg== +-----END DH PARAMETERS----- diff --git a/roles/nginx_reverseproxy/templates/nginx/modules-available/60-forward.conf.j2 b/roles/nginx_reverseproxy/templates/nginx/modules-available/60-forward.conf.j2 new file mode 100644 index 0000000..9a86a5d --- /dev/null +++ b/roles/nginx_reverseproxy/templates/nginx/modules-available/60-forward.conf.j2 @@ -0,0 +1,14 @@ +# {{ ansible_managed }} + +{% for site in nginx.redirect_tcp %} +# Forward port {{ site.port }} to {{ site.name }} +stream { + server { + listen {{ site.port }}; + listen [::]:{{ site.port }}; + + proxy_pass {{ site.destination }}; + } +} + +{% endfor %} diff --git a/roles/nginx_reverseproxy/templates/nginx/sites-available/redirect.j2 b/roles/nginx_reverseproxy/templates/nginx/sites-available/redirect.j2 new file mode 100644 index 0000000..28e9b7d --- /dev/null +++ b/roles/nginx_reverseproxy/templates/nginx/sites-available/redirect.j2 @@ -0,0 +1,67 @@ +# {{ ansible_managed }} + +{% for site in nginx.redirect_sites %} +# Redirect http://{{ site.from }} to http://{{ site.to }} +server { + listen 80; + listen [::]:80; + + server_name {{ site.from }}; + + location / { + return 302 http://{{ site.to }}$request_uri; + } +} + +# Redirect https://{{ site.from }} to https://{{ site.to }} +server { + listen 443 ssl http2; + listen [::]:443 ssl http2; + + server_name {{ site.from }}; + + # SSL common conf + include "/etc/nginx/snippets/options-ssl.conf"; + + location / { + return 302 https://{{ site.to }}$request_uri; + } +} + +{% endfor %} + +{# Also redirect for DNAMEs #} +{% for dname in nginx.redirect_dnames %} +{% for site in nginx.redirect_sites %} +{% set from = site.from | regex_replace('crans.org', dname) %} +{% if from != site.from %} +# Redirect http://{{ from }} to http://{{ site.to }} +server { + listen 80; + listen [::]:80; + + server_name {{ from }}; + + location / { + return 302 http://{{ site.to }}$request_uri; + } +} + +# Redirect https://{{ from }} to https://{{ site.to }} +server { + listen 443 ssl http2; + listen [::]:443 ssl http2; + + server_name {{ from }}; + + # SSL common conf + include "/etc/nginx/snippets/options-ssl.conf"; + + location / { + return 302 https://{{ site.to }}$request_uri; + } +} + +{% endif %} +{% endfor %} +{% endfor %} diff --git a/roles/nginx_reverseproxy/templates/nginx/sites-available/reverseproxy.j2 b/roles/nginx_reverseproxy/templates/nginx/sites-available/reverseproxy.j2 new file mode 100644 index 0000000..d29d13c --- /dev/null +++ b/roles/nginx_reverseproxy/templates/nginx/sites-available/reverseproxy.j2 @@ -0,0 +1,56 @@ +# {{ ansible_managed }} + +# Automatic Connection header for WebSocket support +# See http://nginx.org/en/docs/http/websocket.html +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +{% for site in nginx.reverseproxy_sites %} +# Redirect http://{{ site.from }} to https://{{ site.from }} +server { + listen 80; + listen [::]:80; + + server_name {{ site.from }}; + + location / { + return 302 https://$host$request_uri; + } +} + +# Reverse proxify https://{{ site.from }} to http://{{ site.to }} +server { + listen 443 ssl http2; + listen [::]:443 ssl http2; + + server_name {{ site.from }}; + + # SSL common conf + include "/etc/nginx/snippets/options-ssl.conf"; + + # Log into separate log files + access_log /var/log/nginx/{{ site.from }}.log; + error_log /var/log/nginx/{{ site.from }}_error.log; + + # Keep the TCP connection open a bit for faster browsing + keepalive_timeout 70; + + # Custom error page + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /var/www/html; + } + + set_real_ip_from 10.231.136.0/24; + set_real_ip_from 2a0c:700:0:2::/64; + real_ip_header P-Real-Ip; + + location / { + proxy_pass http://{{ site.to }}; + include "/etc/nginx/snippets/options-proxypass.conf"; + } +} + +{% endfor %} diff --git a/roles/nginx_reverseproxy/templates/nginx/sites-available/reverseproxy_redirect_dname.j2 b/roles/nginx_reverseproxy/templates/nginx/sites-available/reverseproxy_redirect_dname.j2 new file mode 100644 index 0000000..bac615d --- /dev/null +++ b/roles/nginx_reverseproxy/templates/nginx/sites-available/reverseproxy_redirect_dname.j2 @@ -0,0 +1,37 @@ +# {{ ansible_managed }} + +{% for dname in nginx.redirect_dnames %} +{% for site in nginx.reverseproxy_sites %} +{% set from = site.from | regex_replace('auro.re', dname) %} +{% set to = site.from %} +{% if from != site.from %} +# Redirect http://{{ from }} to http://{{ to }} +server { + listen 80; + listen [::]:80; + + server_name {{ from }}; + + location / { + return 302 http://{{ to }}$request_uri; + } +} + +# Redirect https://{{ from }} to https://{{ to }} +server { + listen 443 ssl http2; + listen [::]:443 ssl http2; + + server_name {{ from }}; + + # SSL common conf + include "/etc/nginx/snippets/options-ssl.conf"; + + location / { + return 302 https://{{ to }}$request_uri; + } +} + +{% endif %} +{% endfor %} +{% endfor %} diff --git a/roles/nginx_reverseproxy/templates/nginx/snippets/options-proxypass.conf.j2 b/roles/nginx_reverseproxy/templates/nginx/snippets/options-proxypass.conf.j2 new file mode 100644 index 0000000..9515d81 --- /dev/null +++ b/roles/nginx_reverseproxy/templates/nginx/snippets/options-proxypass.conf.j2 @@ -0,0 +1,19 @@ +# {{ ansible_managed }} + +proxy_redirect off; +proxy_set_header Host $host; + +# Pass the real client IP +proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + +# Tell proxified server that we are HTTPS, fix Wordpress +proxy_set_header X-Forwarded-Proto https; + +# WebSocket support +proxy_http_version 1.1; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection $connection_upgrade; + +# For Owncloud WebDav +client_max_body_size 10G; diff --git a/roles/nginx_reverseproxy/templates/nginx/snippets/options-ssl.conf.j2 b/roles/nginx_reverseproxy/templates/nginx/snippets/options-ssl.conf.j2 new file mode 100644 index 0000000..fee51c6 --- /dev/null +++ b/roles/nginx_reverseproxy/templates/nginx/snippets/options-ssl.conf.j2 @@ -0,0 +1,17 @@ +# {{ ansible_managed }} + +ssl_certificate {{ nginx.ssl.cert }}; +ssl_certificate_key {{ nginx.ssl.cert_key }}; +ssl_session_timeout 1d; +ssl_session_cache shared:MozSSL:10m; +ssl_session_tickets off; +ssl_dhparam /etc/letsencrypt/dhparam; +ssl_protocols TLSv1.2 TLSv1.3; +ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384; +ssl_prefer_server_ciphers off; + +# Enable OCSP Stapling, point to certificate chain +ssl_stapling on; +ssl_stapling_verify on; +ssl_trusted_certificate {{ nginx.ssl.trusted_cert }}; + diff --git a/roles/nginx_reverseproxy/templates/update-motd.d/05-service.j2 b/roles/nginx_reverseproxy/templates/update-motd.d/05-service.j2 new file mode 100755 index 0000000..fdff0b8 --- /dev/null +++ b/roles/nginx_reverseproxy/templates/update-motd.d/05-service.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/tail +14 +# {{ ansible_managed }} +> NGINX a été déployé sur cette machine. Voir /etc/nginx/. diff --git a/roles/nginx_reverseproxy/templates/www/html/50x.html.j2 b/roles/nginx_reverseproxy/templates/www/html/50x.html.j2 new file mode 100644 index 0000000..e5c8733 --- /dev/null +++ b/roles/nginx_reverseproxy/templates/www/html/50x.html.j2 @@ -0,0 +1,63 @@ + + + + + 502 + + + + +

502

+

Whoops, le service prend trop de temps à répondre…

+

Essayez de rafraîchir la page. Si le problème persiste, pensez + à contacter l'équipe technique d'Aurore.

+ + + diff --git a/roles/etherpad/tasks/0_apt_dependencies.yml b/roles/nodejs/tasks/main.yml similarity index 65% rename from roles/etherpad/tasks/0_apt_dependencies.yml rename to roles/nodejs/tasks/main.yml index 4c8d341..0f17fa8 100644 --- a/roles/etherpad/tasks/0_apt_dependencies.yml +++ b/roles/nodejs/tasks/main.yml @@ -1,5 +1,4 @@ --- -# For NodeJS package - name: Configure NodeJS pin when: - ansible_distribution == 'Debian' @@ -9,15 +8,14 @@ dest: /etc/apt/preferences.d/nodejs mode: 0644 -# Install EtherPad dependencies - name: Install required packages apt: - name: "{{ item }}" - state: present update_cache: true - with_items: - - build-essential - - curl - - git - - nodejs - - npm + name: + - nodejs + - npm + - build-essential # To build npm packages + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded diff --git a/roles/codimd/templates/apt/nodejs.j2 b/roles/nodejs/templates/apt/nodejs.j2 similarity index 100% rename from roles/codimd/templates/apt/nodejs.j2 rename to roles/nodejs/templates/apt/nodejs.j2 diff --git a/roles/passbolt/defaults/main.yml b/roles/passbolt/defaults/main.yml new file mode 100644 index 0000000..d499d24 --- /dev/null +++ b/roles/passbolt/defaults/main.yml @@ -0,0 +1,10 @@ +--- +# URL to clone +passbolt_repo: https://github.com/passbolt/passbolt_api.git +passbolt_version: v2.10.0 + +# Install target +passbolt_path: /var/www/passbolt + +# User used to run passbolt +passbolt_user: www-data diff --git a/roles/passbolt/tasks/main.yml b/roles/passbolt/tasks/main.yml new file mode 100644 index 0000000..0c10a53 --- /dev/null +++ b/roles/passbolt/tasks/main.yml @@ -0,0 +1,39 @@ +--- +# See https://help.passbolt.com/hosting/install/ce/from-source.html + +- name: Clone passbolt project + git: + repo: "{{ passbolt_repo }}" + dest: "{{ passbolt_path }}" + version: "{{ passbolt_version }}" + become: true + become_user: "{{ passbolt_user }}" + +- name: Install passbolt dependencies + apt: + name: + - composer + - php-fpm + - php-intl + - php-gnupg + - php-gd + - php-mysql + - nginx + - mariadb-server + state: present + update_cache: true + register: apt_result + retries: 3 + until: apt_result is succeeded + +# Setup dependencies +- name: Install passbolt PHP dependencies + composer: + command: install + working_dir: "{{ passbolt_path }}" + no_dev: true + become: true + become_user: "{{ passbolt_user }}" + register: composer_result + retries: 3 + until: composer_result is succeeded diff --git a/roles/prometheus/handlers/main.yml b/roles/prometheus/handlers/main.yml new file mode 100644 index 0000000..670847b --- /dev/null +++ b/roles/prometheus/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Restart Prometheus + service: + name: prometheus + state: restarted + +- name: Restart prometheus-snmp-exporter + service: + name: prometheus-snmp-exporter + state: restarted diff --git a/roles/prometheus/tasks/main.yml b/roles/prometheus/tasks/main.yml new file mode 100644 index 0000000..211aee3 --- /dev/null +++ b/roles/prometheus/tasks/main.yml @@ -0,0 +1,69 @@ +--- +- name: Install Prometheus + apt: + update_cache: true + name: + - prometheus + - prometheus-snmp-exporter + register: apt_result + retries: 3 + until: apt_result is succeeded + +- name: Configure Prometheus + template: + src: prometheus/prometheus.yml.j2 + dest: /etc/prometheus/prometheus.yml + mode: 0644 + notify: Restart Prometheus + +- name: Configure Prometheus alert rules + template: + src: "prometheus/{{ item }}.j2" + dest: "/etc/prometheus/{{ item }}" + mode: 0644 + notify: Restart Prometheus + loop: + - alert.rules.yml + - django.rules.yml + +- name: Make Prometheus snmp-exporter listen on localhost only + lineinfile: + path: /etc/default/prometheus-snmp-exporter + regexp: '^ARGS=' + line: "ARGS=\"--web.listen-address=127.0.0.1:9116\"" + notify: Restart prometheus-snmp-exporter + +# This file store SNMP OIDs +- name: Configure Prometheus snmp-exporter + template: + src: "prometheus/snmp.yml.j2" + dest: "/etc/prometheus/snmp.yml" + mode: 0600 + owner: prometheus + notify: Restart prometheus-snmp-exporter + +# We don't need to restart Prometheus when updating nodes +- name: Configure Prometheus nodes + copy: + content: "{{ prometheus_targets | to_nice_json }}" + dest: /etc/prometheus/targets.json + mode: 0644 + +# We don't need to restart Prometheus when updating nodes +- name: Configure Prometheus Ubiquity Unifi SNMP devices + copy: + content: "{{ prometheus_unifi_snmp_targets | to_nice_json }}" + dest: /etc/prometheus/targets_unifi_snmp.json + mode: 0644 + +- name: Activate prometheus service + systemd: + name: prometheus + enabled: true + state: started + +- name: Indicate role in motd + template: + src: update-motd.d/05-service.j2 + dest: /etc/update-motd.d/05-prometheus + mode: 0755 diff --git a/roles/prometheus/templates/prometheus/alert.rules.yml.j2 b/roles/prometheus/templates/prometheus/alert.rules.yml.j2 new file mode 100644 index 0000000..2a10358 --- /dev/null +++ b/roles/prometheus/templates/prometheus/alert.rules.yml.j2 @@ -0,0 +1,62 @@ +# {{ ansible_managed }} +{# As this is also Jinja2 it will conflict without a raw block #} +{# Depending of Prometheus Node exporter version, rules can change depending of version #} +{% raw %} +groups: +- name: alert.rules + rules: + + # Alert for any instance that is unreachable for >3 minutes. + - alert: InstanceDown + expr: up == 0 + for: 3m + labels: + severity: critical + annotations: + summary: "{{ $labels.instance }} est invisible depuis plus de 3 minutes !" + + # Alert for out of memory + - alert: OutOfMemory + expr: (node_memory_MemFree_bytes + node_memory_Cached_bytes + node_memory_Buffers_bytes) / node_memory_MemTotal_bytes * 100 < 10 + for: 5m + labels: + severity: warning + annotations: + summary: "Mémoire libre de {{ $labels.instance }} à {{ $value }}%." + + # Alert for out of disk space + - alert: OutOfDiskSpace + expr: node_filesystem_free_bytes{fstype="ext4"} / node_filesystem_size_bytes{fstype="ext4"} * 100 < 10 + for: 5m + labels: + severity: warning + annotations: + summary: "Espace libre de {{ $labels.mountpoint }} sur {{ $labels.instance }} à {{ $value }}%." + + # Alert for out of inode space on disk + - alert: OutOfInodes + expr: node_filesystem_files_free{fstype="ext4"} / node_filesystem_files{fstype="ext4"} * 100 < 10 + for: 5m + labels: + severity: warning + annotations: + summary: "Presque plus d'inodes disponibles ({{ $value }}% restant) dans {{ $labels.mountpoint }} sur {{ $labels.instance }}." + + # Alert for high CPU usage + - alert: CpuUsage + expr: (100 - avg by (instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 75 + for: 10m + labels: + severity: warning + annotations: + summary: "CPU sur {{ $labels.instance }} à {{ $value }}%." + + # Check systemd unit (> buster) + - alert: SystemdServiceFailed + expr: node_systemd_unit_state{state="failed"} == 1 + for: 10m + labels: + severity: warning + annotations: + summary: "{{ $labels.name }} a échoué sur {{ $labels.instance }}" +{% endraw %} diff --git a/roles/prometheus/templates/prometheus/django.rules.yml.j2 b/roles/prometheus/templates/prometheus/django.rules.yml.j2 new file mode 100644 index 0000000..fddd398 --- /dev/null +++ b/roles/prometheus/templates/prometheus/django.rules.yml.j2 @@ -0,0 +1,106 @@ +# {{ ansible_managed }} +{# As this is also Jinja2 it will conflict without a raw block #} +{% raw %} +groups: +- name: django.rules + rules: + - record: job:django_http_requests_before_middlewares_total:sum_rate30s + expr: sum(rate(django_http_requests_before_middlewares_total[30s])) BY (job) + - record: job:django_http_requests_unknown_latency_total:sum_rate30s + expr: sum(rate(django_http_requests_unknown_latency_total[30s])) BY (job) + - record: job:django_http_ajax_requests_total:sum_rate30s + expr: sum(rate(django_http_ajax_requests_total[30s])) BY (job) + - record: job:django_http_responses_before_middlewares_total:sum_rate30s + expr: sum(rate(django_http_responses_before_middlewares_total[30s])) BY (job) + - record: job:django_http_requests_unknown_latency_including_middlewares_total:sum_rate30s + expr: sum(rate(django_http_requests_unknown_latency_including_middlewares_total[30s])) + BY (job) + - record: job:django_http_requests_body_total_bytes:sum_rate30s + expr: sum(rate(django_http_requests_body_total_bytes[30s])) BY (job) + - record: job:django_http_responses_streaming_total:sum_rate30s + expr: sum(rate(django_http_responses_streaming_total[30s])) BY (job) + - record: job:django_http_responses_body_total_bytes:sum_rate30s + expr: sum(rate(django_http_responses_body_total_bytes[30s])) BY (job) + - record: job:django_http_requests_total:sum_rate30s + expr: sum(rate(django_http_requests_total_by_method[30s])) BY (job) + - record: job:django_http_requests_total_by_method:sum_rate30s + expr: sum(rate(django_http_requests_total_by_method[30s])) BY (job, method) + - record: job:django_http_requests_total_by_transport:sum_rate30s + expr: sum(rate(django_http_requests_total_by_transport[30s])) BY (job, transport) + - record: job:django_http_requests_total_by_view:sum_rate30s + expr: sum(rate(django_http_requests_total_by_view_transport_method[30s])) BY (job, + view) + - record: job:django_http_requests_total_by_view_transport_method:sum_rate30s + expr: sum(rate(django_http_requests_total_by_view_transport_method[30s])) BY (job, + view, transport, method) + - record: job:django_http_responses_total_by_templatename:sum_rate30s + expr: sum(rate(django_http_responses_total_by_templatename[30s])) BY (job, templatename) + - record: job:django_http_responses_total_by_status:sum_rate30s + expr: sum(rate(django_http_responses_total_by_status[30s])) BY (job, status) + - record: job:django_http_responses_total_by_charset:sum_rate30s + expr: sum(rate(django_http_responses_total_by_charset[30s])) BY (job, charset) + - record: job:django_http_exceptions_total_by_type:sum_rate30s + expr: sum(rate(django_http_exceptions_total_by_type[30s])) BY (job, type) + - record: job:django_http_exceptions_total_by_view:sum_rate30s + expr: sum(rate(django_http_exceptions_total_by_view[30s])) BY (job, view) + - record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s + expr: histogram_quantile(0.5, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) + BY (job, le)) + labels: + quantile: "50" + - record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s + expr: histogram_quantile(0.95, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) + BY (job, le)) + labels: + quantile: "95" + - record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s + expr: histogram_quantile(0.99, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) + BY (job, le)) + labels: + quantile: "99" + - record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s + expr: histogram_quantile(0.999, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) + BY (job, le)) + labels: + quantile: "99.9" + - record: job:django_http_requests_latency_seconds:quantile_rate30s + expr: histogram_quantile(0.5, sum(rate(django_http_requests_latency_seconds_bucket[30s])) + BY (job, le)) + labels: + quantile: "50" + - record: job:django_http_requests_latency_seconds:quantile_rate30s + expr: histogram_quantile(0.95, sum(rate(django_http_requests_latency_seconds_bucket[30s])) + BY (job, le)) + labels: + quantile: "95" + - record: job:django_http_requests_latency_seconds:quantile_rate30s + expr: histogram_quantile(0.99, sum(rate(django_http_requests_latency_seconds_bucket[30s])) + BY (job, le)) + labels: + quantile: "99" + - record: job:django_http_requests_latency_seconds:quantile_rate30s + expr: histogram_quantile(0.999, sum(rate(django_http_requests_latency_seconds_bucket[30s])) + BY (job, le)) + labels: + quantile: "99.9" + - record: job:django_model_inserts_total:sum_rate1m + expr: sum(rate(django_model_inserts_total[1m])) BY (job, model) + - record: job:django_model_updates_total:sum_rate1m + expr: sum(rate(django_model_updates_total[1m])) BY (job, model) + - record: job:django_model_deletes_total:sum_rate1m + expr: sum(rate(django_model_deletes_total[1m])) BY (job, model) + - record: job:django_db_new_connections_total:sum_rate30s + expr: sum(rate(django_db_new_connections_total[30s])) BY (alias, vendor) + - record: job:django_db_new_connection_errors_total:sum_rate30s + expr: sum(rate(django_db_new_connection_errors_total[30s])) BY (alias, vendor) + - record: job:django_db_execute_total:sum_rate30s + expr: sum(rate(django_db_execute_total[30s])) BY (alias, vendor) + - record: job:django_db_execute_many_total:sum_rate30s + expr: sum(rate(django_db_execute_many_total[30s])) BY (alias, vendor) + - record: job:django_db_errors_total:sum_rate30s + expr: sum(rate(django_db_errors_total[30s])) BY (alias, vendor, type) + - record: job:django_migrations_applied_total:max + expr: max(django_migrations_applied_total) BY (job, connection) + - record: job:django_migrations_unapplied_total:max + expr: max(django_migrations_unapplied_total) BY (job, connection) +{% endraw %} diff --git a/roles/prometheus/templates/prometheus/prometheus.yml.j2 b/roles/prometheus/templates/prometheus/prometheus.yml.j2 new file mode 100644 index 0000000..31df6bd --- /dev/null +++ b/roles/prometheus/templates/prometheus/prometheus.yml.j2 @@ -0,0 +1,67 @@ +# {{ ansible_managed }} + +global: + # scrape_interval is set to the global default (60s) + # evaluation_interval is set to the global default (60s) + # scrape_timeout is set to the global default (10s). + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'example' + +# Alertmanager configuration +# Use prometheus alertmanager installed on the same machine +alerting: + alertmanagers: + - static_configs: + - targets: ['{{ prometheus_alertmanager }}'] + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + - "alert.rules.yml" # Monitoring alerts, this is the file you may be searching! + - "django.rules.yml" # Custom rules specific for Django project monitoring + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The .json in file_sd_configs is dynamically reloaded + + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + - job_name: servers + file_sd_configs: + - files: + - '/etc/prometheus/targets.json' + relabel_configs: + # Do not put :9100 in instance name, rather here + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - source_labels: [__param_target] + target_label: __address__ + replacement: '$1:9100' + + - job_name: unifi_snmp + file_sd_configs: + - files: + - '/etc/prometheus/targets_unifi_snmp.json' + metrics_path: /snmp + params: + module: [ubiquiti_unifi] + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 127.0.0.1:9116 + + - job_name: django + scheme: https + static_configs: + - targets: [] diff --git a/roles/prometheus/templates/prometheus/snmp.yml.j2 b/roles/prometheus/templates/prometheus/snmp.yml.j2 new file mode 100644 index 0000000..84dcb65 --- /dev/null +++ b/roles/prometheus/templates/prometheus/snmp.yml.j2 @@ -0,0 +1,297 @@ +# {{ ansible_managed }} +# TODOlist : +# - Faire fonctionner le monitoring des switchs défini ici +# * Configurer tous les switchs avec un compte SNMPv3 +# * Mettre l'inventaire des switchs dans Ansible +# - Optimiser les règles pour les bornes Unifi, +# on pourrait indexer avec les SSID + +procurve_switch: + walk: + - 1.3.6.1.2.1.31.1.1.1.10 + - 1.3.6.1.2.1.31.1.1.1.6 + get: + - 1.3.6.1.2.1.1.3.0 + - 1.3.6.1.2.1.1.5.0 + - 1.3.6.1.2.1.1.6.0 + metrics: + - name: sysUpTime + oid: 1.3.6.1.2.1.1.3 + type: gauge + help: The time (in hundredths of a second) since the network management portion + of the system was last re-initialized. - 1.3.6.1.2.1.1.3 + - name: sysName + oid: 1.3.6.1.2.1.1.5 + type: DisplayString + help: An administratively-assigned name for this managed node - 1.3.6.1.2.1.1.5 + - name: sysLocation + oid: 1.3.6.1.2.1.1.6 + type: DisplayString + help: The physical location of this node (e.g., 'telephone closet, 3rd floor') + - 1.3.6.1.2.1.1.6 + - name: ifHCOutOctets + oid: 1.3.6.1.2.1.31.1.1.1.10 + type: counter + help: The total number of octets transmitted out of the interface, including framing + characters - 1.3.6.1.2.1.31.1.1.1.10 + indexes: + - labelname: ifIndex + type: gauge + - name: ifHCInOctets + oid: 1.3.6.1.2.1.31.1.1.1.6 + type: counter + help: The total number of octets received on the interface, including framing + characters - 1.3.6.1.2.1.31.1.1.1.6 + indexes: + - labelname: ifIndex + type: gauge + version: 3 + auth: + username: prometheus + +ubiquiti_unifi: + walk: + - 1.3.6.1.4.1.41112.1.6 + get: + - 1.3.6.1.2.1.1.5.0 + - 1.3.6.1.2.1.1.6.0 + metrics: +# Pour faire une WifiMap un jour, on peut entrer la location dans la conf des bornes +# - name: sysLocation +# oid: 1.3.6.1.2.1.1.6 +# type: DisplayString +# help: The physical location of this node (e.g., 'telephone closet, 3rd floor') +# - 1.3.6.1.2.1.1.6 + - name: unifiVapIndex + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.1 + type: gauge + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.1' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapChannel + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.4 + type: gauge + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.4' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapEssId + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.6 + type: DisplayString + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.6' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapName + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.7 + type: DisplayString + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.7' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapNumStations + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.8 + type: gauge + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.8' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapRadio + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.9 + type: DisplayString + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.9' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapRxBytes + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.10 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.10' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapRxCrypts + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.11 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.11' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapRxDropped + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.12 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.12' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapRxErrors + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.13 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.13' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapRxFrags + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.14 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.14' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapRxPackets + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.15 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.15' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapTxBytes + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.16 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.16' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapTxDropped + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.17 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.17' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapTxErrors + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.18 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.18' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapTxPackets + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.19 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.19' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapTxRetries + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.20 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.20' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapTxPower + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.21 + type: gauge + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.21' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapUp + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.22 + type: gauge + help: ' - 1.3.6.1.4.1.41112.1.6.1.2.1.22' + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiVapUsage + oid: 1.3.6.1.4.1.41112.1.6.1.2.1.23 + type: DisplayString + help: guest or regular user - 1.3.6.1.4.1.41112.1.6.1.2.1.23 + indexes: + - labelname: unifiVapIndex + type: gauge + - name: unifiIfIndex + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.1 + type: gauge + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.1' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfName + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.5 + type: DisplayString + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.5' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfRxBytes + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.6 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.6' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfRxDropped + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.7 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.7' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfRxError + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.8 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.8' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfRxMulticast + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.9 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.9' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfRxPackets + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.10 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.10' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfTxBytes + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.12 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.12' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfTxDropped + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.13 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.13' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfTxError + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.14 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.14' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiIfTxPackets + oid: 1.3.6.1.4.1.41112.1.6.2.1.1.15 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.2.1.1.15' + indexes: + - labelname: unifiIfIndex + type: gauge + - name: unifiApSystemModel + oid: 1.3.6.1.4.1.41112.1.6.3.3 + type: DisplayString + help: ' - 1.3.6.1.4.1.41112.1.6.3.3' + - name: unifiApSystemUptime + oid: 1.3.6.1.4.1.41112.1.6.3.5 + type: counter + help: ' - 1.3.6.1.4.1.41112.1.6.3.5' + version: 3 + auth: + security_level: authPriv + username: snmp_prometheus + password: {{ snmp_unifi_password }} + auth_protocol: SHA + priv_protocol: AES + priv_password: {{ snmp_unifi_password }} diff --git a/roles/prometheus/templates/update-motd.d/05-service.j2 b/roles/prometheus/templates/update-motd.d/05-service.j2 new file mode 100755 index 0000000..f027dc4 --- /dev/null +++ b/roles/prometheus/templates/update-motd.d/05-service.j2 @@ -0,0 +1,4 @@ +#!/bin/sh +# {{ ansible_managed }} +echo "> prometheus a été déployé sur cette machine." +echo " Voir /etc/prometheus/" diff --git a/roles/prometheus_node/handlers/main.yml b/roles/prometheus_node/handlers/main.yml new file mode 100644 index 0000000..b4b64a4 --- /dev/null +++ b/roles/prometheus_node/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart prometheus-node-exporter + service: + name: prometheus-node-exporter + state: restarted diff --git a/roles/prometheus_node/tasks/main.yml b/roles/prometheus_node/tasks/main.yml new file mode 100644 index 0000000..7ca6350 --- /dev/null +++ b/roles/prometheus_node/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Install Prometheus node-exporter + apt: + update_cache: true + name: prometheus-node-exporter + install_recommends: false # Do not install smartmontools + register: apt_result + retries: 3 + until: apt_result is succeeded + when: + - ansible_lsb.codename == 'buster' + +# Prometheus 2 node is in stretch-backports +- name: Install Prometheus node-exporter (stretch-backports) + apt: + update_cache: true + name: prometheus-node-exporter + install_recommends: false + default_release: stretch-backports + register: apt_result + retries: 3 + until: apt_result is succeeded + when: + - ansible_lsb.codename == 'stretch' + +- name: Activate prometheus-node-exporter service + systemd: + name: prometheus-node-exporter + enabled: true + state: started + +# Doesn't work on Debian Stretch with the old prometheus package +- name: Make Prometheus node-exporter listen on adm only + lineinfile: + path: /etc/default/prometheus-node-exporter + regexp: '^ARGS=' + line: | + ARGS="--web.listen-address={{ ansible_hostname }}.adm.auro.re:9100" + notify: Restart prometheus-node-exporter diff --git a/roles/radius/tasks/main.yml b/roles/radius/tasks/main.yml new file mode 100644 index 0000000..941f7c9 --- /dev/null +++ b/roles/radius/tasks/main.yml @@ -0,0 +1,263 @@ +--- +- name: Add backports repositories + apt_repository: + repo: "{{ item }} http://deb.debian.org/debian buster-backports main contrib non-free" + loop: + - "deb" + - "deb-src" + +- name: Ensure /var/www exists + file: + name: "/var/www" + state: directory + mode: 0755 + +- name: Clone re2o repo + git: + repo: "https://gitlab.federez.net/re2o/re2o.git" + dest: "/var/www/re2o" + version: "dev" + force: true + +- name: Template local re2o settings + template: + src: "{{ item }}.j2" + dest: "/var/www/re2o/re2o/{{ item }}" + mode: 0644 + loop: + - settings_local.py + - local_routers.py + +# What follows is a hideous abomination. +# Blame freeradius-python3 on backports. + +- name: try to install freeradius-python3 (this will fail on post-install) + apt: + name: freeradius-python3 + default_release: buster-backports + update_cache: true + ignore_errors: true + +- name: fix freeradius-python3 postinstall script + template: + src: freeradius-python3.postinst.j2 + dest: /var/lib/dpkg/info/freeradius-python3.postinst + mode: 0644 + +- name: reinstall broken package (this might fail too, for different reasons) + apt: + name: freeradius-python3 + default_release: buster-backports + force: true + ignore_errors: true + +- name: Setup radius symlinks + file: + src: "/var/www/re2o/freeradius_utils/{{ item.local_prefix }}{{ item.filename }}" + dest: "/etc/freeradius/3.0/{{ item.filename }}" + state: link + force: true + loop: + - local_prefix: "" + filename: auth.py + - local_prefix: freeradius3/ + filename: radiusd.conf + - local_prefix: freeradius3/ + filename: mods-enabled/python + - local_prefix: freeradius3/ + filename: mods-enabled/eap + +- name: Configure freeradius + template: + src: "{{ item }}.j2" + dest: "/etc/freeradius/3.0/{{ item }}" + mode: 0640 + owner: freerad + loop: + - sites-enabled/default + - sites-enabled/inner-tunnel + +- name: Install Basic Clients/Proxy Files freeradius + template: + src: "{{ item }}.j2" + dest: "/etc/freeradius/3.0/{{ item }}" + mode: 0640 + owner: freerad + loop: + - clients.conf + - proxy.conf + when: "'aurore_vm' not in group_names" + +- name: Install Clients FedeRez Radius-Aurore + template: + src: proxy-federez.conf.j2 + dest: /etc/freeradius/3.0/proxy.conf + mode: 0640 + owner: freerad + when: "'aurore_vm' in group_names" + +- name: Install Proxy FedeRez Radius-Aurore + template: + src: clients-federez.conf.j2 + dest: /etc/freeradius/3.0/clients.conf + mode: 0640 + owner: freerad + when: "'aurore_vm' in group_names" + +- name: Install radius requirements (except freeradius-python3) + shell: + cmd: "{{ item }}" + chdir: /var/www/re2o/ + loop: + - "cat apt_requirements_radius.txt | grep -v freeradius-python3 | xargs apt-get -y install" + - "pip3 install -r pip_requirements.txt" + + +# End of hideousness (hopefully). + +- name: Configure log rotation + template: + src: "freeradius-logrotate.j2" + dest: "/etc/logrotate.d/freeradius" + mode: 0644 + + +# Database setup + + +- name: Install postgresql + apt: + name: + - postgresql + - postgresql-client + +- name: Install postgresql ansible module requirement(s) + pip: + name: psycopg2 + +- name: Create read-only user + community.general.postgresql_user: + name: re2o_ro + password: "{{ radius_pg_re2o_ro_password }}" + become_user: postgres + +- name: Create replication user + community.general.postgresql_user: + name: replication + password: "{{ radius_pg_replication_password }}" + become_user: postgres + + +- name: Nuking - Stop freeradius + systemd: + name: freeradius + state: stopped + when: nuke_radius|default(false) + +- name: Nuking - Remove old subscription if it exists + community.general.postgresql_subscription: + name: "re2o_subscription_{{ inventory_hostname_short | replace('-','_') }}" + db: re2o + state: absent + become_user: postgres + when: nuke_radius|default(false) + ignore_errors: true + +- name: Nuking - Destroy old local DB if it exists + community.general.postgresql_db: + name: re2o + state: absent + become_user: postgres + when: nuke_radius|default(false) + +- name: Create local DB + community.general.postgresql_db: + name: re2o + owner: replication + state: present + encoding: "UTF8" + lc_collate: 'fr_FR.UTF-8' + lc_ctype: 'fr_FR.UTF-8' + become_user: postgres + +- name: Dump radius re2o PostgreSQL database schema from master + community.general.postgresql_db: + name: re2o + state: dump + target: /tmp/re2o-schema.sql + target_opts: '-s' + login_host: 10.128.0.22 + login_user: replication + login_password: "{{ radius_pg_replication_password }}" + + +- name: Restore DB + tags: + - restore + community.general.postgresql_db: + name: re2o + state: restore + target: /tmp/re2o-schema.sql + target_opts: "-s" + login_host: localhost + login_user: replication + login_password: "{{ radius_pg_replication_password }}" + + +- name: Grant select permissions on all tables to read-only user + tags: + - perms + community.general.postgresql_privs: + database: re2o + privs: SELECT + objs: ALL_IN_SCHEMA + schema: public + roles: re2o_ro + become_user: postgres + +- name: Grant usage permission on schema to read-only user + tags: + - perms + community.general.postgresql_privs: + database: re2o + privs: USAGE + objs: public + type: schema + roles: re2o_ro + become_user: postgres + +- name: Set default privileges in schema + tags: + - perms + community.general.postgresql_privs: + database: re2o + privs: SELECT + schema: public + objs: TABLES + type: default_privs + roles: re2o_ro + become_user: postgres + + +- name: Set up subscription to main database + tags: + - sub + community.general.postgresql_subscription: + name: "re2o_subscription_{{ inventory_hostname_short | replace('-','_') }}" + connparams: + host: re2o-db.adm.auro.re + user: replication + password: "{{ radius_pg_replication_password }}" + dbname: re2o + db: re2o + publications: + - re2o_pub + become_user: postgres + + +- name: Restart freeradius, ensure enabled + systemd: + name: freeradius + enabled: true + state: restarted + daemon_reload: true diff --git a/roles/radius/templates/clients-federez.conf.j2 b/roles/radius/templates/clients-federez.conf.j2 new file mode 100644 index 0000000..2a71e05 --- /dev/null +++ b/roles/radius/templates/clients-federez.conf.j2 @@ -0,0 +1,22 @@ +client radius-aurore { + ipaddr = 10.128.0.0 + netmask = 16 + secret = {{ radius_secret_aurore }} + require_message_authenticator = no + nastype = other + virtual_server = radius-wifi +} + +# Parangon (federez) +client parangon { + ipaddr = 185.230.78.47 + secret = {{ radius_secret_federez }} + virtual_server = radius-wifi +} + +# Dodecagon (federez) +client dodecagon { + ipaddr = 195.154.165.76 + secret = {{ radius_secret_federez }} + virtual_server = radius-wifi +} diff --git a/roles/radius/templates/clients.conf.j2 b/roles/radius/templates/clients.conf.j2 new file mode 100644 index 0000000..6909978 --- /dev/null +++ b/roles/radius/templates/clients.conf.j2 @@ -0,0 +1,18 @@ +client radius-filaire { + ipaddr = 10.130.{{ apartment_block_id }}.0 + netmask = 24 + secret = {{ radius_secret_wired }} + require_message_authenticator = no + nastype = other + virtual_server = radius-filaire +} + + +client aurore-wifi { + ipaddr = 10.{{ subnet_ids.ap }}.0.0 + netmask = 16 + secret = {{ radius_secret_wifi }} + require_message_authenticator = no + nastype = other + virtual_server = radius-wifi +} diff --git a/roles/radius/templates/freeradius-logrotate.j2 b/roles/radius/templates/freeradius-logrotate.j2 new file mode 100644 index 0000000..91d5df4 --- /dev/null +++ b/roles/radius/templates/freeradius-logrotate.j2 @@ -0,0 +1,50 @@ +# The main server log +/var/log/freeradius/radius.log { + # common options + daily + rotate 365 + missingok + compress + delaycompress + notifempty + + copytruncate +} + +# (in order) +# Session monitoring utilities +# Session database modules +# SQL log files +/var/log/freeradius/checkrad.log /var/log/freeradius/radwatch.log +/var/log/freeradius/radutmp /var/log/freeradius/radwtmp +/var/log/freeradius/sqllog.sql +{ + # common options + daily + rotate 365 + missingok + compress + delaycompress + notifempty + + nocreate +} + +# There are different detail-rotating strategies you can use. One is +# to write to a single detail file per IP and use the rotate config +# below. Another is to write to a daily detail file per IP with: +# detailfile = ${radacctdir}/%{Client-IP-Address}/%Y%m%d-detail +# (or similar) in radiusd.conf, without rotation. If you go with the +# second technique, you will need another cron job that removes old +# detail files. You do not need to comment out the below for method #2. +/var/log/freeradius/radacct/*/detail { + # common options + daily + rotate 365 + missingok + compress + delaycompress + notifempty + + nocreate +} diff --git a/roles/radius/templates/freeradius-python3.postinst.j2 b/roles/radius/templates/freeradius-python3.postinst.j2 new file mode 100644 index 0000000..058ec38 --- /dev/null +++ b/roles/radius/templates/freeradius-python3.postinst.j2 @@ -0,0 +1,14 @@ +#!/bin/sh +# vim:ts=2:sw=2:et + +set -e + +case "$1" in + configure) + invoke-rc.d freeradius restart + ;; +esac + + + +exit 0 diff --git a/roles/radius/templates/local_routers.py.j2 b/roles/radius/templates/local_routers.py.j2 new file mode 100644 index 0000000..0367f2c --- /dev/null +++ b/roles/radius/templates/local_routers.py.j2 @@ -0,0 +1,28 @@ +class DbRouter(object): + """ + A router to control all database operations on models in the + auth application. + """ + def db_for_read(self, model, **hints): + """ + Attempts to read remote models go to local database. + """ + return 'local' + + def db_for_write(self, model, **hints): + """ + Attempts to write remote models go to the remote database. + """ + return 'default' + + def allow_relation(self, obj1, obj2, **hints): + """ + Allow relations involving the remote database + """ + return True + + def allow_migrate(self, db, app_label, model_name=None, **hints): + """ + Allow migrations on the remote database + """ + return True diff --git a/roles/radius/templates/proxy-federez.conf.j2 b/roles/radius/templates/proxy-federez.conf.j2 new file mode 100644 index 0000000..d3b9efe --- /dev/null +++ b/roles/radius/templates/proxy-federez.conf.j2 @@ -0,0 +1,87 @@ +# -*- mode: conf-unix; coding: utf-8 -*- +proxy server { + default_fallback = no +} + + +realm LOCAL { + +} + +realm NULL { + +} + +#Proxy FedeRez ##### + +realm AUROREFEDEREZ { + auth_pool = federez_radius_servers +# nostrip +} + +home_server parangon_v4 { + type = auth + ipaddr = 185.230.78.47 + port = 1812 + secret = {{ radius_secret_federez }} + require_message_authenticator =yes + response_window = 20 + zombie_period = 40 + revive_interval = 120 + status_check = status-server + check_interval = 30 + num_answers_to_alive = 3 +} + +home_server parangon_v6 { + type = auth + ipaddr = 2a0c:700:0:23:67:e5ff:fee9:5 + port = 1812 + secret = {{ radius_secret_federez }} + require_message_authenticator =yes + response_window = 20 + zombie_period = 40 + revive_interval = 120 + status_check = status-server + check_interval = 30 + num_answers_to_alive = 3 +} + +home_server dodecagon_v4 { + type = auth + ipaddr = 195.154.165.76 + port = 1812 + secret = {{ radius_secret_federez }} + require_message_authenticator =yes + response_window = 20 + zombie_period = 40 + revive_interval = 120 + status_check = status-server + check_interval = 30 + num_answers_to_alive = 3 +} + +home_server dodecagon_v6 { + type = auth + ipaddr = 2001:bc8:273e::1 + port = 1812 + secret = {{ radius_secret_federez }} + require_message_authenticator =yes + response_window = 20 + zombie_period = 40 + revive_interval = 120 + status_check = status-server + check_interval = 30 + num_answers_to_alive = 3 +} + +home_server_pool federez_radius_servers { + type = fail-over + home_server = parangon_v4 + home_server = dodecagon_v4 + home_server = dodecagon_v6 + home_server = parangon_v6 +} + + + diff --git a/roles/radius/templates/proxy.conf.j2 b/roles/radius/templates/proxy.conf.j2 new file mode 100644 index 0000000..737d4c2 --- /dev/null +++ b/roles/radius/templates/proxy.conf.j2 @@ -0,0 +1,54 @@ +# -*- mode: conf-unix; coding: utf-8 -*- +proxy server { + default_fallback = no +} + + +realm LOCAL { + +} + +realm NULL { + +} + +#Proxy FedeRez ##### + +realm AUROREFEDEREZ { + auth_pool = aurore_central_radius_servers +# nostrip +} + +home_server radius_aurore_v4 { + type = auth + ipaddr = 10.128.0.251 + port = 1812 + secret = {{ radius_secret_aurore }} + require_message_authenticator =yes + response_window = 20 + zombie_period = 40 + revive_interval = 120 + status_check = status-server + check_interval = 30 + num_answers_to_alive = 3 +} + +home_server radius_aurore_v6 { + type = auth + ipaddr = 2a09:6840:128::251 + port = 1812 + secret = {{ radius_secret_aurore }} + require_message_authenticator =yes + response_window = 20 + zombie_period = 40 + revive_interval = 120 + status_check = status-server + check_interval = 30 + num_answers_to_alive = 3 +} + +home_server_pool aurore_central_radius_servers { + type = fail-over + home_server = radius_aurore_v4 + home_server = radius_aurore_v6 +} diff --git a/roles/radius/templates/settings_local.py.j2 b/roles/radius/templates/settings_local.py.j2 new file mode 100644 index 0000000..fb1ac29 --- /dev/null +++ b/roles/radius/templates/settings_local.py.j2 @@ -0,0 +1,129 @@ +# coding: utf-8 +# Re2o est un logiciel d'administration développé initiallement au rezometz. Il +# se veut agnostique au réseau considéré, de manière à être installable en +# quelques clics. +# +# Copyright © 2017 Gabriel Détraz +# Copyright © 2017 Goulven Kermarec +# Copyright © 2017 Augustin Lemesle +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +"""re2o.settings_locale +The file with all the available options for a locale configuration of re2o +""" + +from __future__ import unicode_literals + +# A secret key used by the server. +SECRET_KEY = "{{ re2o_secret_key }}" + +# The password to access the project database +DB_PASSWORD = "{{ re2o_db_password }}" + +# AES key for secret key encryption. +# The length must be a multiple of 16 +AES_KEY = "{{ re2o_aes_key }}" + +# Should the server run in debug mode ? +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = False + +# A list of admins of the services. Receive mails when an error occurs +ADMINS = [('AURORE', 'monitoring.aurore@lists.crans.org'), ('Gabriel Detraz', 'detraz@crans.org')] + +# The list of hostname the server will respond to. +ALLOWED_HOSTS = ['{{ inventory_hostname }}'] + +# The time zone the server is runned in +TIME_ZONE = 'Europe/Paris' + +# The storage systems parameters to use +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.postgresql_psycopg2', + 'NAME': 're2o', + 'USER': 're2o', + 'PASSWORD': DB_PASSWORD, + 'HOST': 're2o-db.adm.auro.re', + 'TEST': { + 'CHARSET': 'utf8', + 'COLLATION': 'utf8_general_ci' + } + }, + 'local': { + 'ENGINE': 'django.db.backends.postgresql_psycopg2', + 'NAME': 're2o', + 'USER': 're2o_ro', + 'PASSWORD': "{{ radius_pg_re2o_ro_password }}", + 'HOST': 'localhost', + 'TEST': { + 'CHARSET': 'utf8', + 'COLLATION': 'utf8_general_ci' + } + }, + 'ldap': { + 'ENGINE': 'ldapdb.backends.ldap', + 'NAME': 'ldap://10.128.0.21/', + 'USER': 'cn=admin,dc=auro,dc=re', + 'TLS': False, + 'PASSWORD': '{{ ldap_admin_password }}', + } +} + +# Security settings for secure https +# Activate once https is correctly configured +SECURE_CONTENT_TYPE_NOSNIFF = False +SECURE_BROWSER_XSS_FILTER = False +SESSION_COOKIE_SECURE = False +CSRF_COOKIE_SECURE = False +CSRF_COOKIE_HTTPONLY = False +X_FRAME_OPTIONS = 'DENY' +SESSION_COOKIE_AGE = 60 * 60 * 3 + +# The path where your organization logo is stored +LOGO_PATH = "static_files/logo.png" + +# The mail configuration for Re2o to send mails +SERVER_EMAIL = 'no-reply@auro.re' # The mail address to use +EMAIL_HOST = 'localhost' # The host to use +EMAIL_PORT = 25 # The port to use + +# Settings of the LDAP structure +LDAP = { + 'base_user_dn': 'cn=Utilisateurs,dc=auro,dc=re', + 'base_userservice_dn': 'ou=service-users,dc=auro,dc=re', + 'base_usergroup_dn': 'ou=posix,ou=groups,dc=auro,dc=re', + 'base_userservicegroup_dn': 'ou=services,ou=groups,dc=auro,dc=re', + 'user_gid': 100, + } + +# A range of UID to use. Used in linux environement +UID_RANGES = { + 'users': [21001, 30000], + 'service-users': [20000, 21000], +} + +# A range of GID to use. Used in linux environement +GID_RANGES = { + 'posix': [501, 600], +} + +# Some optionnal Re2o Apps +OPTIONNAL_APPS_RE2O = () + +# Some Django apps you want to add in you local project +OPTIONNAL_APPS = OPTIONNAL_APPS_RE2O + () + +LOCAL_ROUTERS = ["re2o.local_routers.DbRouter"] diff --git a/roles/radius/templates/sites-enabled/default.j2 b/roles/radius/templates/sites-enabled/default.j2 new file mode 100644 index 0000000..a406559 --- /dev/null +++ b/roles/radius/templates/sites-enabled/default.j2 @@ -0,0 +1,239 @@ +###################################################################### +# +# As of 2.0.0, FreeRADIUS supports virtual hosts using the +# "server" section, and configuration directives. +# +# Virtual hosts should be put into the "sites-available" +# directory. Soft links should be created in the "sites-enabled" +# directory to these files. This is done in a normal installation. +# +# If you are using 802.1X (EAP) authentication, please see also +# the "inner-tunnel" virtual server. You will likely have to edit +# that, too, for authentication to work. +# +# $Id: 083407596aa5074d665adac9606e7de655b634aa $ +# +###################################################################### +# +# Read "man radiusd" before editing this file. See the section +# titled DEBUGGING. It outlines a method where you can quickly +# obtain the configuration you want, without running into +# trouble. See also "man unlang", which documents the format +# of this file. +# +# This configuration is designed to work in the widest possible +# set of circumstances, with the widest possible number of +# authentication methods. This means that in general, you should +# need to make very few changes to this file. +# +# The best way to configure the server for your local system +# is to CAREFULLY edit this file. Most attempts to make large +# edits to this file will BREAK THE SERVER. Any edits should +# be small, and tested by running the server with "radiusd -X". +# Once the edits have been verified to work, save a copy of these +# configuration files somewhere. (e.g. as a "tar" file). Then, +# make more edits, and test, as above. +# +# There are many "commented out" references to modules such +# as ldap, sql, etc. These references serve as place-holders. +# If you need the functionality of that module, then configure +# it in radiusd.conf, and un-comment the references to it in +# this file. In most cases, those small changes will result +# in the server being able to connect to the DB, and to +# authenticate users. +# +###################################################################### + +server default { +listen { + type = auth + ipaddr = * + port = 0 + + limit { + max_connections = 16 + lifetime = 0 + idle_timeout = 30 + } +} + +listen { + ipaddr = * + port = 0 + type = acct + + limit { + } +} + +# IPv6 versions of the above - read their full config to understand options +listen { + type = auth + ipv6addr = :: # any. ::1 == localhost + port = 0 + limit { + max_connections = 16 + lifetime = 0 + idle_timeout = 30 + } +} + +listen { + ipv6addr = :: + port = 0 + type = acct + + limit { + } +} +} + +server radius-wifi { +authorize { + rewrite_calling_station_id + + if (User-Name =~ /^(.*)@(.*)/){ + if (User-Name !~ /^(.*)@(.*)auro(.*)/){ + update control{ + Proxy-To-Realm := 'AUROREFEDEREZ' + } + } + + if ("%{request:User-Name}" =~ /^(.*)@(.*)auro(.*)/){ + update request{ + Stripped-User-Name := "%{1}" + } + } + } + + filter_username + + preprocess + + suffix + + eap { + ok = return + } + + expiration + logintime + + pap + +} + +authenticate { + Auth-Type PAP { + pap + } + + Auth-Type CHAP { + chap + } + + Auth-Type MS-CHAP { + mschap + } + + mschap + + digest + + eap +} + + +preacct { + preprocess + + acct_unique + + suffix + files +} + +accounting { + + detail + + unix + exec + +} + +session { +} + +post-auth { + update { + &reply: += &session-state: + } + + exec + + + remove_reply_message_if_eap + + Post-Auth-Type REJECT { + -sql + attr_filter.access_reject + + eap + + remove_reply_message_if_eap + } +} + +pre-proxy { +} + +post-proxy { + eap +} +} + + + +server radius-filaire{ + authorize{ + + re2o + expiration + logintime + pap + } + authenticate{ + Auth-Type PAP{ + pap + } + Auth-Type CHAP{ + chap + } + Auth-Type MS-CHAP{ + mschap + } + digest + eap + + } + preacct{ + preprocess + acct_unique + suffix + files + } + accounting{ + } + session{ + } + post-auth{ + re2o + exec + } + pre-proxy{ + } + post-proxy{ + eap + } +} diff --git a/roles/radius/templates/sites-enabled/inner-tunnel.j2 b/roles/radius/templates/sites-enabled/inner-tunnel.j2 new file mode 100644 index 0000000..ee6e929 --- /dev/null +++ b/roles/radius/templates/sites-enabled/inner-tunnel.j2 @@ -0,0 +1,345 @@ +# -*- text -*- +###################################################################### +# +# This is a virtual server that handles *only* inner tunnel +# requests for EAP-TTLS and PEAP types. +# +# $Id: 2c6f9611bfc7b4b782aeb9764e47e832690739c4 $ +# +###################################################################### + +server inner-tunnel { + +# +# This next section is here to allow testing of the "inner-tunnel" +# authentication methods, independently from the "default" server. +# It is listening on "localhost", so that it can only be used from +# the same machine. +# +# $ radtest USER PASSWORD 127.0.0.1:18120 0 testing123 +# +# If it works, you have configured the inner tunnel correctly. To check +# if PEAP will work, use: +# +# $ radtest -t mschap USER PASSWORD 127.0.0.1:18120 0 testing123 +# +# If that works, PEAP should work. If that command doesn't work, then +# +# FIX THE INNER TUNNEL CONFIGURATION SO THAT IT WORKS. +# +# Do NOT do any PEAP tests. It won't help. Instead, concentrate +# on fixing the inner tunnel configuration. DO NOTHING ELSE. +# +listen { + ipaddr = 127.0.0.1 + port = 18120 + type = auth +} + + +# Authorization. First preprocess (hints and huntgroups files), +# then realms, and finally look in the "users" file. +# +# The order of the realm modules will determine the order that +# we try to find a matching realm. +# +# Make *sure* that 'preprocess' comes before any realm if you +# need to setup hints for the remote radius server +authorize { + if ("%{request:User-Name}" =~ /^(.*)@auro(.*)/){ + update request{ + Stripped-User-Name := "%{1}" + } + } + # + # Take a User-Name, and perform some checks on it, for spaces and other + # invalid characters. If the User-Name appears invalid, reject the + # request. + # + # See policy.d/filter for the definition of the filter_username policy. + # + filter_username + + re2o + + # + # Do checks on outer / inner User-Name, so that users + # can't spoof us by using incompatible identities + # +# filter_inner_identity + + # + # The chap module will set 'Auth-Type := CHAP' if we are + # handling a CHAP request and Auth-Type has not already been set + chap + + # + # If the users are logging in with an MS-CHAP-Challenge + # attribute for authentication, the mschap module will find + # the MS-CHAP-Challenge attribute, and add 'Auth-Type := MS-CHAP' + # to the request, which will cause the server to then use + # the mschap module for authentication. + mschap + + # + # Pull crypt'd passwords from /etc/passwd or /etc/shadow, + # using the system API's to get the password. If you want + # to read /etc/passwd or /etc/shadow directly, see the + # passwd module, above. + # +# unix + + # + # Look for IPASS style 'realm/', and if not found, look for + # '@realm', and decide whether or not to proxy, based on + # that. +# IPASS + + # + # If you are using multiple kinds of realms, you probably + # want to set "ignore_null = yes" for all of them. + # Otherwise, when the first style of realm doesn't match, + # the other styles won't be checked. + # + # Note that proxying the inner tunnel authentication means + # that the user MAY use one identity in the outer session + # (e.g. "anonymous", and a different one here + # (e.g. "user@example.com"). The inner session will then be + # proxied elsewhere for authentication. If you are not + # careful, this means that the user can cause you to forward + # the authentication to another RADIUS server, and have the + # accounting logs *not* sent to the other server. This makes + # it difficult to bill people for their network activity. + # + suffix +# ntdomain + + # + # The "suffix" module takes care of stripping the domain + # (e.g. "@example.com") from the User-Name attribute, and the + # next few lines ensure that the request is not proxied. + # + # If you want the inner tunnel request to be proxied, delete + # the next few lines. + # + update control { + &Proxy-To-Realm := LOCAL + } + + # + # This module takes care of EAP-MSCHAPv2 authentication. + # + # It also sets the EAP-Type attribute in the request + # attribute list to the EAP type from the packet. + # + # The example below uses module failover to avoid querying all + # of the following modules if the EAP module returns "ok". + # Therefore, your LDAP and/or SQL servers will not be queried + # for the many packets that go back and forth to set up TTLS + # or PEAP. The load on those servers will therefore be reduced. + # + eap { + ok = return + } + + # + # Read the 'users' file + #files + + # + # Look in an SQL database. The schema of the database + # is meant to mirror the "users" file. + # + # See "Authorization Queries" in sql.conf + #-sql + + # + # If you are using /etc/smbpasswd, and are also doing + # mschap authentication, the un-comment this line, and + # enable the "smbpasswd" module. +# smbpasswd + + # + # The ldap module reads passwords from the LDAP database. + #-ldap + + # + # Enforce daily limits on time spent logged in. +# daily + + expiration + logintime + + # + # If no other module has claimed responsibility for + # authentication, then try to use PAP. This allows the + # other modules listed above to add a "known good" password + # to the request, and to do nothing else. The PAP module + # will then see that password, and use it to do PAP + # authentication. + # + # This module should be listed last, so that the other modules + # get a chance to set Auth-Type for themselves. + # + pap +} + + +# Authentication. +# +# +# This section lists which modules are available for authentication. +# Note that it does NOT mean 'try each module in order'. It means +# that a module from the 'authorize' section adds a configuration +# attribute 'Auth-Type := FOO'. That authentication type is then +# used to pick the appropriate module from the list below. +# + +# In general, you SHOULD NOT set the Auth-Type attribute. The server +# will figure it out on its own, and will do the right thing. The +# most common side effect of erroneously setting the Auth-Type +# attribute is that one authentication method will work, but the +# others will not. +# +# The common reasons to set the Auth-Type attribute by hand +# is to either forcibly reject the user, or forcibly accept him. +# +authenticate { + # + # PAP authentication, when a back-end database listed + # in the 'authorize' section supplies a password. The + # password can be clear-text, or encrypted. + Auth-Type PAP { + pap + } + + # + # Most people want CHAP authentication + # A back-end database listed in the 'authorize' section + # MUST supply a CLEAR TEXT password. Encrypted passwords + # won't work. + Auth-Type CHAP { + chap + } + + # + # MSCHAP authentication. + Auth-Type MS-CHAP { + mschap + } + + # + # For old names, too. + # + mschap + + # + # Allow EAP authentication. + eap +} + +###################################################################### +# +# There are no accounting requests inside of EAP-TTLS or PEAP +# tunnels. +# +###################################################################### + + +# Session database, used for checking Simultaneous-Use. Either the radutmp +# or rlm_sql module can handle this. +# The rlm_sql module is *much* faster +session { + radutmp + + # + # See "Simultaneous Use Checking Queries" in sql.conf +# sql +} + + +# Post-Authentication +# Once we KNOW that the user has been authenticated, there are +# additional steps we can take. +# +# Note that the last packet of the inner-tunnel authentication +# MAY NOT BE the last packet of the outer session. So updating +# the outer reply MIGHT work, and sometimes MIGHT NOT. The +# exact functionality depends on both the inner and outer +# authentication methods. +# +# If you need to send a reply attribute in the outer session, +# the ONLY safe way is to set "use_tunneled_reply = yes", and +# then update the inner-tunnel reply. +post-auth { + re2o + + Post-Auth-Type REJECT { + # log failed authentications in SQL, too. + -sql + attr_filter.access_reject + + # + # Let the outer session know which module failed, and why. + # + update outer.session-state { + &Module-Failure-Message := &request:Module-Failure-Message + } + } +} + +# +# When the server decides to proxy a request to a home server, +# the proxied request is first passed through the pre-proxy +# stage. This stage can re-write the request, or decide to +# cancel the proxy. +# +# Only a few modules currently have this method. +# +pre-proxy { + # Uncomment the following line if you want to change attributes + # as defined in the preproxy_users file. +# files + + # Uncomment the following line if you want to filter requests + # sent to remote servers based on the rules defined in the + # 'attrs.pre-proxy' file. +# attr_filter.pre-proxy + + # If you want to have a log of packets proxied to a home + # server, un-comment the following line, and the + # 'detail pre_proxy_log' section, above. +# pre_proxy_log +} + +# +# When the server receives a reply to a request it proxied +# to a home server, the request may be massaged here, in the +# post-proxy stage. +# +post-proxy { + + # If you want to have a log of replies from a home server, + # un-comment the following line, and the 'detail post_proxy_log' + # section, above. +# post_proxy_log + + # Uncomment the following line if you want to filter replies from + # remote proxies based on the rules defined in the 'attrs' file. +# attr_filter.post-proxy + + # + # If you are proxying LEAP, you MUST configure the EAP + # module, and you MUST list it here, in the post-proxy + # stage. + # + # You MUST also use the 'nostrip' option in the 'realm' + # configuration. Otherwise, the User-Name attribute + # in the proxied request will not match the user name + # hidden inside of the EAP packet, and the end server will + # reject the EAP request. + # + eap +} + +} # inner-tunnel server block diff --git a/roles/radvd/handlers/main.yml b/roles/radvd/handlers/main.yml new file mode 100644 index 0000000..6ed4ca7 --- /dev/null +++ b/roles/radvd/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart radvd + systemd: + state: restarted + name: radvd + enabled: true diff --git a/roles/radvd/tasks/main.yml b/roles/radvd/tasks/main.yml new file mode 100644 index 0000000..47b1f9d --- /dev/null +++ b/roles/radvd/tasks/main.yml @@ -0,0 +1,19 @@ +--- +# Warning: radvd installation seems to fail if the configuration +# file doesn't already exist when the package is installed, +# so the order is important. +- name: Configure radvd + template: + src: radvd.conf.j2 + dest: /etc/radvd.conf + mode: 0644 + notify: restart radvd + tags: + - radconf + +- name: Install radvd + apt: + update_cache: true + name: radvd + state: present + notify: restart radvd diff --git a/roles/radvd/templates/radvd.conf.j2 b/roles/radvd/templates/radvd.conf.j2 new file mode 100644 index 0000000..300f50b --- /dev/null +++ b/roles/radvd/templates/radvd.conf.j2 @@ -0,0 +1,80 @@ +# -*- mode: conf-unix; coding: utf-8 -*- + +## +# Bornes Wi-Fi +## + +# # Need to add an interface for this VLAN on "routeur-*" hosts. +# +# interface ens19 { +# AdvSendAdvert on; +# AdvLinkMTU {{ mtu }}; +# AdvDefaultPreference high; +# MaxRtrAdvInterval 30; +# +# AdvRASrcAddress { +# {{ ipv6_base_prefix }}:{{ subnet_ids.ap }}::0:250; # Unifi controller +# }; +# +# prefix {{ ipv6_base_prefix }}:{{ subnet_ids.ap }}::/64 { +# AdvRouterAddr on; +# }; +# +# # La zone DNS +# DNSSL borne.auro.re {}; +# +# # Les DNS récursifs +# RDNSS {{ ipv6_base_prefix }}:{{ subnet_ids.ap }}::{{ dns_host_suffix_main }} {}; +# RDNSS {{ ipv6_base_prefix }}:{{ subnet_ids.ap }}::{{ dns_host_suffix_backup }} {}; +# }; + +## +# Utilisateurs filaire +## +interface ens20 { + AdvSendAdvert on; + AdvLinkMTU {{ mtu }}; + AdvDefaultPreference high; + MaxRtrAdvInterval 30; + + AdvRASrcAddress { + fe80::1; # link-local virtual IP used with keepalived + }; + + prefix {{ ipv6_base_prefix }}:{{ subnet_ids.users_wired }}::/64 { + AdvRouterAddr on; + }; + + DNSSL fil.{{ apartment_block_dhcp }}.auro.re {}; # TODO: fix this shitty workaround. + + RDNSS {{ ipv6_base_prefix }}:{{ subnet_ids.users_wired }}::{{ dns_host_suffix_main }} {}; + RDNSS {{ ipv6_base_prefix }}:{{ subnet_ids.users_wired }}::{{ dns_host_suffix_backup }} {}; +}; + + +## +# Utilisateurs wifi +## +interface ens21 { + AdvSendAdvert on; + AdvLinkMTU {{ mtu }}; + AdvDefaultPreference high; + MaxRtrAdvInterval 30; + + AdvRASrcAddress { + fe80::1; + }; + + prefix {{ ipv6_base_prefix }}:{{ subnet_ids.users_wifi }}::/64 { + AdvRouterAddr on; + }; + + DNSSL wifi.{{ apartment_block_dhcp }}.auro.re {}; # TODO: fix this shitty workaround. + + RDNSS {{ ipv6_base_prefix }}:{{ subnet_ids.users_wifi }}::{{ dns_host_suffix_main }} {}; + RDNSS {{ ipv6_base_prefix }}:{{ subnet_ids.users_wifi }}::{{ dns_host_suffix_backup }} {}; +}; + + + +# For public IPs: will use DHCPv6, deployed on routeur-aurore alone. diff --git a/roles/re2o-service/defaults/main.yml b/roles/re2o-service/defaults/main.yml new file mode 100644 index 0000000..343c392 --- /dev/null +++ b/roles/re2o-service/defaults/main.yml @@ -0,0 +1,3 @@ +--- +service_user: re2o-services +service_homedir: /var/local/re2o-services diff --git a/roles/re2o-service/tasks/main.yml b/roles/re2o-service/tasks/main.yml new file mode 100644 index 0000000..68e963c --- /dev/null +++ b/roles/re2o-service/tasks/main.yml @@ -0,0 +1,47 @@ +--- +# Create service user +- include_tasks: service_user.yml + +- name: Install Python dependencies + apt: + name: + - python3-iso8601 + - python3-jinja2 + update_cache: true + register: apt_result + retries: 3 + until: apt_result is succeeded + +- name: "Clone re2o {{ service_name }} project" + git: + repo: "{{ service_repo }}" + dest: "{{ service_homedir }}/{{ service_name }}" + version: "{{ service_version }}" + force: true + become: true + become_user: "{{ service_user }}" + +- name: "Configure re2o {{ service_name }} project" + community.general.ini_file: + path: "{{ service_homedir }}/config.ini" + section: Re2o + option: "{{ item.key }}" + value: "{{ item.value }}" + mode: 0600 + become: true + become_user: "{{ service_user }}" + loop: "{{ service_config|dict2items }}" + +- name: Link config file + file: + src: "{{ service_homedir }}/config.ini" + dest: "{{ service_homedir }}/{{ service_name }}/config.ini" + owner: "{{ service_user }}" + group: nogroup + state: link + +- name: Indicate in motd service location + template: + src: update-motd.d/05-service.j2 + dest: "/etc/update-motd.d/05-re2o-{{ service_name }}" + mode: 0755 diff --git a/roles/re2o-service/tasks/service_user.yml b/roles/re2o-service/tasks/service_user.yml new file mode 100644 index 0000000..389b72e --- /dev/null +++ b/roles/re2o-service/tasks/service_user.yml @@ -0,0 +1,19 @@ +--- +# Having a custom group is useless so use nogroup +- name: "Create {{ service_user }} user" + user: + name: "{{ service_user }}" + group: nogroup + home: "{{ service_homedir }}" + system: true + shell: /bin/false + state: present + +# Only service user should be able to go there +- name: "Secure {{ service_user }} home directory" + file: + path: "{{ service_homedir }}" + state: directory + owner: "{{ service_user }}" + group: nogroup + mode: 0755 diff --git a/roles/re2o-service/templates/update-motd.d/05-service.j2 b/roles/re2o-service/templates/update-motd.d/05-service.j2 new file mode 100755 index 0000000..4ed8a74 --- /dev/null +++ b/roles/re2o-service/templates/update-motd.d/05-service.j2 @@ -0,0 +1,3 @@ +#!/bin/sh +# {{ ansible_managed }} +echo "✨ Le service re2o {{ service_name }} est dans {{ service_homedir }}/{{ service_name }}" diff --git a/roles/router/handlers/main.yml b/roles/router/handlers/main.yml new file mode 100644 index 0000000..0583512 --- /dev/null +++ b/roles/router/handlers/main.yml @@ -0,0 +1,11 @@ +--- +- name: restart keepalived + systemd: + state: restarted + name: keepalived + enabled: true + +- name: run aurore-firewall + command: python3 main.py --force + args: + chdir: /var/local/re2o-services/aurore-firewall/ diff --git a/roles/router/tasks/main.yml b/roles/router/tasks/main.yml new file mode 100644 index 0000000..2014572 --- /dev/null +++ b/roles/router/tasks/main.yml @@ -0,0 +1,90 @@ +--- + +# XXX: YES, this is ugly as fuck. +- name: set IP suffix (main) + set_fact: + router_hard_ip_suffix: 240 + when: "'backup' not in ansible_hostname" + +- name: set IP suffix (backup) + set_fact: + router_hard_ip_suffix: 140 + when: "'backup' in ansible_hostname" + +- name: Enable IPv4 packet forwarding + ansible.posix.sysctl: + name: net.ipv4.ip_forward + value: '1' + sysctl_set: true + +- name: Enable IPv6 packet forwarding + ansible.posix.sysctl: + name: net.ipv6.conf.all.forwarding + value: '1' + sysctl_set: true + +- name: Configure /etc/network/interfaces for routeur-aurore* + template: + src: interfaces-aurore + dest: /etc/network/interfaces + mode: 0644 + when: "'routeur-aurore' in ansible_hostname" + +- name: Install aurore-firewall (re2o-service) + import_role: + name: re2o-service + vars: + service_repo: https://gitlab.federez.net/aurore/aurore-firewall.git + service_name: aurore-firewall + service_version: aurore + service_config: + hostname: re2o.auro.re + username: service-user + password: "{{ vault_serviceuser_passwd }}" + notify: run aurore-firewall + +- name: Configure aurore-firewall for local router + template: + src: firewall_config.py + dest: /var/local/re2o-services/aurore-firewall/firewall_config.py + mode: 0644 + notify: run aurore-firewall + when: "'routeur-aurore' not in ansible_hostname" + +- name: Configure aurore-firewall for routeur-aurore* + template: + src: firewall_config_aurore.py + dest: /var/local/re2o-services/aurore-firewall/firewall_config.py + mode: 0644 + notify: run aurore-firewall + when: "'routeur-aurore' in ansible_hostname" + +- name: Install keepalived + apt: + name: keepalived + update_cache: true + register: apt_result + retries: 3 + until: apt_result is succeeded + +- name: configure keepalived for local router + template: + src: keepalived.conf + dest: /etc/keepalived/keepalived.conf + mode: 0644 + notify: restart keepalived + when: "'routeur-aurore' not in ansible_hostname" + +- name: configure keepalived for routeur-aurore* + template: + src: keepalived-aurore.conf + dest: /etc/keepalived/keepalived.conf + mode: 0644 + notify: restart keepalived + when: "'routeur-aurore' in ansible_hostname" + +- name: Configure cron + template: + src: cron.d/re2o-services + dest: /etc/cron.d/re2o-services + mode: 0644 diff --git a/roles/router/templates/cron.d/re2o-services b/roles/router/templates/cron.d/re2o-services new file mode 100644 index 0000000..e732887 --- /dev/null +++ b/roles/router/templates/cron.d/re2o-services @@ -0,0 +1,3 @@ +# Régénération du firewall +*/5 * * * * root /usr/bin/python3 /var/local/re2o-services/aurore-firewall/main.py +@reboot root /usr/bin/python3 /var/local/re2o-services/aurore-firewall/main.py --force diff --git a/roles/router/templates/firewall_config.py b/roles/router/templates/firewall_config.py new file mode 100644 index 0000000..4f6b755 --- /dev/null +++ b/roles/router/templates/firewall_config.py @@ -0,0 +1,65 @@ +# -*- mode: python; coding: utf-8 -*- +# +# Re2o est un logiciel d'administration développé initiallement au rezometz. Il +# se veut agnostique au réseau considéré, de manière à être installable en +# quelques clics. +# +# Copyright © 2017 Gabriel Détraz +# Copyright © 2017 Goulven Kermarec +# Copyright © 2017 Augustin Lemesle +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +### Give me a role + +# previously: routeur4 = routeur IPv4 +role = ['routeur'] + + +### Specify each interface role + +interfaces_type = { + 'routable' : ['ens20', 'ens21'], + 'sortie' : ['ens19'], + 'admin' : ['ens18'] +} + + +### Specify nat settings: name, interfaces with range, and global range for nat +### WARNING : "interface_ip_to_nat' MUST contain /24 ranges, and ip_sources MUST +### contain /16 range + +nat = [ + { + 'name' : 'Wifi', + 'interfaces_ip_to_nat' : { + 'ens19' : '45.66.109.0/24', + }, + 'ip_sources' : '10.{{ subnet_ids.users_wifi }}.0.0/16', + 'extra_nat' : {} + }, + { + 'name' : 'Filaire', + 'interfaces_ip_to_nat' : { + 'ens19' : '45.66.108.0/24', + }, + 'ip_sources' : '10.{{ subnet_ids.users_wired }}.0.0/16', + 'extra_nat' : { + '10.129.{{ apartment_block_id }}.{{ '1' if "backup" in inventory_hostname else '2' }}40' : '45.66.108.25{{ + apartment_block_id }}', + '10.129.{{ apartment_block_id }}.254' : '45.66.108.25{{ apartment_block_id }}' + } + } +] diff --git a/roles/router/templates/firewall_config_aurore.py b/roles/router/templates/firewall_config_aurore.py new file mode 100644 index 0000000..c41fd92 --- /dev/null +++ b/roles/router/templates/firewall_config_aurore.py @@ -0,0 +1,49 @@ +# -*- mode: python; coding: utf-8 -*- +# Re2o est un logiciel d'administration développé initiallement au rezometz. Il +# se veut agnostique au réseau considéré, de manière à être installable en +# quelques clics. +# +# Copyright © 2017 Gabriel Détraz +# Copyright © 2017 Goulven Kermarec +# Copyright © 2017 Augustin Lemesle +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +### Give me a role + +role = ['routeur'] + +### Specify each interface role + +interfaces_type = { + 'routable' : ['ens21', 'ens22'], + 'sortie' : ['ens18', 'ens1'], + 'admin' : ['ens19', 'ens20', 'ens23'] +} + +### Specify nat settings: name, interfaces with range, and global range for nat +### WARNING : "interface_ip_to_nat' MUST contain /24 ranges, and ip_sources MUST +### contain /16 range + +nat = [ + { + 'name' : 'AdminVlans', + 'extra_nat' : { + '10.129.0.254/32' : '45.66.111.{{ router_hard_ip_suffix }}', + '10.128.0.0/16' : '45.66.111.{{ router_hard_ip_suffix }}', + '10.130.0.0/16' : '45.66.111.{{ router_hard_ip_suffix }}' + } + } +] diff --git a/roles/router/templates/interfaces-aurore b/roles/router/templates/interfaces-aurore new file mode 100644 index 0000000..401e5aa --- /dev/null +++ b/roles/router/templates/interfaces-aurore @@ -0,0 +1,86 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +source /etc/network/interfaces.d/* + +# The loopback network interface +auto lo +iface lo inet loopback + +# VLAN 129: routage +auto ens18 +iface ens18 inet static + address 10.129.0.{{ router_hard_ip_suffix }}/16 + gateway 10.129.0.1 + +iface ens18 inet6 static + address 2a09:6840:129::0:{{ router_hard_ip_suffix }}/64 + + # Now managed by keepalived. + # + # post-up ip route add 2a09:6840:10::/64 via 2a09:6840:129::1:254 dev ens18 + # post-up ip route add 2a09:6840:11::/64 via 2a09:6840:129::1:254 dev ens18 + + # post-up ip route add 2a09:6840:20::/64 via 2a09:6840:129::2:254 dev ens18 + # post-up ip route add 2a09:6840:21::/64 via 2a09:6840:129::2:254 dev ens18 + + # post-up ip route add 2a09:6840:40::/64 via 2a09:6840:129::4:254 dev ens18 + # post-up ip route add 2a09:6840:41::/64 via 2a09:6840:129::4:254 dev ens18 + + # post-up ip route add 2a09:6840:50::/64 via 2a09:6840:129::5:254 dev ens18 + # post-up ip route add 2a09:6840:51::/64 via 2a09:6840:129::5:254 dev ens18 + + +# The primary network interface +allow-hotplug ens19 +iface ens19 inet static + address 10.128.0.{{ router_hard_ip_suffix }}/16 + gateway 10.128.0.254 + dns-search adm.auro.re + +iface ens19 inet6 static + address 2a09:6840:128::0:{{ router_hard_ip_suffix }}/64 + + # Ensures internet connectivity when running as keepalived backup. + gateway 2a09:6840:128::0:254 + +# VlAN 130: switches +auto ens20 +iface ens20 inet static + address 10.130.0.{{ router_hard_ip_suffix }}/16 + +iface ens20 inet6 static + address 2a09:6840:130::0:{{ router_hard_ip_suffix }}/64 + +# VLAN 111: IPs publiques serveurs +auto ens21 +iface ens21 inet static + address 45.66.111.{{ router_hard_ip_suffix }}/24 + + # Nécessaire pour contacter re2o et bootstrap le firewall. + # Ces directives sont _aussi_ set par aurore-firewall ! + up iptables -t nat -A POSTROUTING -s 10.129.0.{{ router_hard_ip_suffix }}/32 -j SNAT --to-source 45.66.111.{{ router_hard_ip_suffix }} + up iptables -t nat -A POSTROUTING -s 10.128.0.0/16 -j SNAT --to-source 45.66.111.{{ router_hard_ip_suffix }} + up iptables -t nat -A POSTROUTING -s 10.130.0.0/16 -j SNAT --to-source 45.66.111.{{ router_hard_ip_suffix }} + +iface ens21 inet6 static + address 2a09:6840:111::{{ router_hard_ip_suffix }}/48 + +# VLAN 110: IP publiques adhérents +auto ens22 +iface ens22 inet static + address 45.66.110.{{ router_hard_ip_suffix }}/24 + +iface ens22 inet6 static + address 2a09:6840:110::{{ router_hard_ip_suffix }}/48 + +# VLAN 131: onduleurs et PDU +auto ens23 +iface ens23 inet static + address 10.131.0.{{ router_hard_ip_suffix }}/16 + +iface ens23 inet6 static + address 2a09:6840:131::0:{{ router_hard_ip_suffix }}/64 + +auto ens1 +iface ens1 inet6 manual diff --git a/roles/router/templates/keepalived-aurore.conf b/roles/router/templates/keepalived-aurore.conf new file mode 100644 index 0000000..cd22a5b --- /dev/null +++ b/roles/router/templates/keepalived-aurore.conf @@ -0,0 +1,131 @@ +global_defs { + notification_email { + monitoring.aurore@lists.crans.org + } + notification_email_from routeur-aurore{% if 'backup' in inventory_hostname %}-backup{% endif %}@auro.re + smtp_server smtp.crans.org +} + + +vrrp_instance VI_ROUT_aurore_IPv4 { + {% if 'backup' in inventory_hostname %} + state BACKUP + priority 100 + {% else %} + state MASTER + priority 150 + {% endif %} + + + # Interface used for VRRP communication. + interface ens19 + + # Shared by MASTER and BACKUP + virtual_router_id 40 + + # Timeout in seconds before failover kicks in. + advert_int 2 + + # Used to authenticate VRRP communication between master and backup. + authentication { + auth_type PASS + auth_pass {{ keepalived_password }} + } + + smtp_alert + + virtual_ipaddress { + # Routing + 10.129.0.254/16 brd 10.129.255.255 dev ens18 scope global + + # Adm + 10.128.0.254/16 brd 10.129.255.255 dev ens19 scope global + + # Switches + 10.130.0.254/16 brd 10.130.255.255 dev ens20 scope global + + # IPs publiques serveurs + 45.66.111.254/24 brd 45.66.111.255 dev ens21 scope global + + # IPs publiques adhérents + 45.66.110.254/24 brd 45.66.110.255 dev ens22 scope global + + # VLAN 131: Onduleurs et PDUs + 10.131.0.254/16 brd 10.131.255.255 dev ens23 scope global + } + + + virtual_routes { + # IPv4 gateway: yggdrasil + src 10.129.0.254 to 0.0.0.0/0 via 10.129.0.1 dev ens18 + } +} + +vrrp_instance VI_ROUT_aurore_IPv6 { + {% if 'backup' in inventory_hostname %} + state BACKUP + priority 100 + {% else %} + state MASTER + priority 150 + {% endif %} + + + # Interface used for VRRP communication. + interface ens19 + + # Shared by MASTER and BACKUP + virtual_router_id 60 + + # Timeout in seconds before failover kicks in. + advert_int 2 + + # Used to authenticate VRRP communication between master and backup. + authentication { + auth_type PASS + auth_pass {{ keepalived_password }} + } + + smtp_alert + + virtual_ipaddress { + # Hello zayo + 2001:1b48:2:103::d7:2/126 dev ens1 scope global + + # Routing + 2a09:6840:129::254/64 dev ens18 scope global + + # Adm + 2a09:6840:128::254/64 dev ens19 scope global + + # Switches + 2a09:6840:130::254/64 dev ens20 scope global + + # IPs publiques serveurs + 2a09:6840:111::254/64 dev ens21 scope global + + # IPs publiques adhérents + 2a09:6840:110::254/64 dev ens22 scope global + + # VLAN 131: Onduleurs et PDUs + 2a09:6840:131::254/64 dev ens23 scope global + } + + + virtual_routes { + # For IPv6, the master router is routeur-aurore, NOT yggdrasil, + # because yggdrasil doesn't support BGPv6 announcements. + src 2001:1b48:2:103::d7:2/126 to ::/0 via 2001:1b48:2:103::d7:1 dev ens1 + + # Routes return for ipv6 connectivity + 2a09:6840:10::/64 via 2a09:6840:129::1:254 dev ens18 + 2a09:6840:11::/64 via 2a09:6840:129::1:254 dev ens18 + 2a09:6840:20::/64 via 2a09:6840:129::2:254 dev ens18 + 2a09:6840:21::/64 via 2a09:6840:129::2:254 dev ens18 + 2a09:6840:40::/64 via 2a09:6840:129::4:254 dev ens18 + 2a09:6840:41::/64 via 2a09:6840:129::4:254 dev ens18 + 2a09:6840:50::/64 via 2a09:6840:129::5:254 dev ens18 + 2a09:6840:51::/64 via 2a09:6840:129::5:254 dev ens18 + } +} + diff --git a/roles/router/templates/keepalived.conf b/roles/router/templates/keepalived.conf new file mode 100644 index 0000000..cd217f3 --- /dev/null +++ b/roles/router/templates/keepalived.conf @@ -0,0 +1,109 @@ +global_defs { + notification_email { + monitoring.aurore@lists.crans.org + } + notification_email_from routeur-{{ apartment_block }}{% if 'backup' in inventory_hostname %}-backup{% endif %}@auro.re + smtp_server smtp.crans.org +} + + +vrrp_instance VI_ROUT_{{ apartment_block }}_IPv4 { + {% if 'backup' in inventory_hostname %} + state BACKUP + priority 100 + {% else %} + state MASTER + priority 150 + {% endif %} + + + # Interface used for VRRP communication. + interface ens18 + + # Shared by MASTER and BACKUP + virtual_router_id 4{{ apartment_block_id }} + + # Timeout in seconds before failover kicks in. + advert_int 2 + + # Used to authenticate VRRP communication between master and backup. + authentication { + auth_type PASS + auth_pass {{ keepalived_password }} + } + + smtp_alert + + virtual_ipaddress { + # Routing subnet + 10.129.{{ apartment_block_id }}.254/16 brd 10.129.255.255 dev ens19 scope global + + + # NATed subnet: wired + 45.66.108.25{{ apartment_block_id }}/24 brd 45.66.108.255 dev ens19 scope global + + # NATed subnet: wifi + 45.66.109.25{{ apartment_block_id }}/24 brd 45.66.109.255 dev ens19 scope global + + # Wired + 10.{{ subnet_ids.users_wired }}.0.254/16 brd 10.{{ subnet_ids.users_wired }}.255.255 dev ens20 scope global + + # Wifi + 10.{{ subnet_ids.users_wifi }}.0.254/16 brd 10.{{ subnet_ids.users_wifi }}.255.255 dev ens21 scope global + } + + + virtual_routes { + # 10.129.0.1 is Yggdrasil + src 10.129.{{ apartment_block_id }}.254 to 0.0.0.0/0 via 10.129.0.1 dev ens19 + } +} + +vrrp_instance VI_ROUT_{{ apartment_block }}_IPv6 { + {% if 'backup' in inventory_hostname %} + state BACKUP + priority 100 + {% else %} + state MASTER + priority 150 + {% endif %} + + + # Interface used for VRRP communication. + interface ens18 + + # Shared by MASTER and BACKUP + virtual_router_id 6{{ apartment_block_id }} + + # Timeout in seconds before failover kicks in. + advert_int 2 + + # Used to authenticate VRRP communication between master and backup. + authentication { + auth_type PASS + auth_pass {{ keepalived_password }} + } + + smtp_alert + + virtual_ipaddress { + # Routing subnet + fe80::1/64 dev ens19 scope global + {{ ipv6_base_prefix }}:129::{{ apartment_block_id }}:254/64 dev ens19 scope global + + # Wired + fe80::1/64 dev ens20 scope global + + # Wifi + fe80::1/64 dev ens21 scope global + } + + + virtual_routes { + # For IPv6, the master router is routeur-aurore, NOT yggdrasil, + # because yggdrasil doesn't support BGPv6 announcements. + src {{ ipv6_base_prefix }}:129::{{ apartment_block_id }}:254 to ::/0 via {{ ipv6_base_prefix }}:129::0:254 dev ens19 + } +} + + diff --git a/roles/unbound/handlers/main.yml b/roles/unbound/handlers/main.yml new file mode 100644 index 0000000..c9d2d42 --- /dev/null +++ b/roles/unbound/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: restart unbound + systemd: + state: restarted + name: unbound + +- name: read unbound apparmor config + command: apparmor_parser -r /etc/apparmor.d/usr.sbin.unbound diff --git a/roles/unbound/tasks/main.yml b/roles/unbound/tasks/main.yml new file mode 100644 index 0000000..d20db0c --- /dev/null +++ b/roles/unbound/tasks/main.yml @@ -0,0 +1,64 @@ +--- + +# This is used to let unbound bind to the right IP addresses. +- name: set dns_host_suffix (main) + set_fact: + dns_host_suffix: "{{ dns_host_suffix_main }}" + when: "'backup' not in inventory_hostname" + +- name: set dns_host_suffix (backup) + set_fact: + dns_host_suffix: "{{ dns_host_suffix_backup }}" + when: "'backup' in inventory_hostname" + + +- name: install unbound + apt: + update_cache: true + name: unbound + state: present + register: unbound_install + +- name: ensure unbound log directory exists + file: + path: /var/log/unbound + state: directory + mode: '0755' + owner: unbound + group: unbound + notify: restart unbound + +- name: add unbound-control configuration + template: + src: unbound-control.conf.j2 + dest: /etc/unbound/unbound.conf.d/unbound-control.conf + mode: 0644 + notify: restart unbound + +- name: setup main unbound config file + template: + src: unbound.conf.j2 + dest: /etc/unbound/unbound.conf + mode: 0644 + notify: restart unbound + + +- name: ask apparmor to allow unbound to write to log file + template: + src: unbound-apparmor-config + dest: /etc/apparmor.d/local/usr.sbin.unbound + mode: '0644' + notify: read unbound apparmor config + +- name: setup unbound log rotation + template: + src: unbound-logrotate.j2 + dest: /etc/logrotate.d/unbound + mode: 0644 + +- name: setup recursive DNS server config + template: + src: recursive.conf.j2 + dest: /etc/unbound/unbound.conf.d/recursive.conf + mode: 0644 + notify: restart unbound diff --git a/roles/unbound/templates/recursive.conf.j2 b/roles/unbound/templates/recursive.conf.j2 new file mode 100644 index 0000000..efdebe1 --- /dev/null +++ b/roles/unbound/templates/recursive.conf.j2 @@ -0,0 +1,60 @@ +# {{ ansible_managed }} + +server: + # Timestamps use UTC ASCII instead of UNIX epoch. + log-time-ascii: yes + + # Only log errors. + verbosity: 0 + log-servfail: yes + + logfile: "/var/log/unbound/unbound.log" + + do-ip4: yes + do-ip6: yes + + # IP addresses on which to listen. + # + # Note: dns_host_suffix is dynamically set in this role's tasks, + # and changes depending on whether we're handling the main or backup + # recursive DNS node. + + # IPv4 + interface: 10.{{ subnet_ids.ap }}.0.{{ dns_host_suffix }} + interface: 10.{{ subnet_ids.users_wired }}.0.{{ dns_host_suffix }} + interface: 10.{{ subnet_ids.users_wifi }}.0.{{ dns_host_suffix }} + + + # IPv6 + interface: {{ ipv6_base_prefix }}:{{ subnet_ids.ap }}::0:{{ dns_host_suffix }} + interface: {{ ipv6_base_prefix }}:{{ subnet_ids.users_wired }}::0:{{ dns_host_suffix }} + interface: {{ ipv6_base_prefix }}:{{ subnet_ids.users_wifi }}::0:{{ dns_host_suffix }} + + + # By default, anything other than localhost is refused. + # Whitelist some subnets: + access-control: 10.{{ subnet_ids.ap }}.0.0/16 allow + access-control: 10.{{ subnet_ids.users_wired }}.0.0/16 allow + access-control: 10.{{ subnet_ids.users_wifi }}.0.0/16 allow + access-control: {{ ipv6_base_prefix }}::/32 allow # Fuck it... :) + + num-threads: {{ ansible_processor_vcpus }} + + private-address: 10.0.0.0/8 + + # The host cache TTL affects blacklisting of supposedly bogus hosts. + # The default was 900 (15 minutes). + infra-host-ttl: 60 + + + # The following is vital, we were having issues + # with DNSSEC that turned out to be due to UDP responses that were too + # large. + + # EDNS reassembly buffer to advertise to UDP peers (the actual buffer + # is set with msg-buffer-size). 1472 can solve fragmentation (timeouts) + edns-buffer-size: {{ mtu }} + + # Maximum UDP response size (not applied to TCP response). + # Suggested values are 512 to 4096. Default is 4096. 65536 disables it. + max-udp-size: {{ mtu }} diff --git a/roles/unbound/templates/unbound-apparmor-config b/roles/unbound/templates/unbound-apparmor-config new file mode 100644 index 0000000..f40ee05 --- /dev/null +++ b/roles/unbound/templates/unbound-apparmor-config @@ -0,0 +1 @@ +/var/log/unbound/unbound.log rw, diff --git a/roles/unbound/templates/unbound-control.conf.j2 b/roles/unbound/templates/unbound-control.conf.j2 new file mode 100644 index 0000000..a3ba77a --- /dev/null +++ b/roles/unbound/templates/unbound-control.conf.j2 @@ -0,0 +1,15 @@ +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: yes + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + control-interface: 127.0.0.1 + + # port number for remote control operations. + control-port: 8953 + + # Disable the use of certificates for unbound-control. + # It's only listening locally, there's no need for the added complexity. + control-use-cert: "no" diff --git a/roles/unbound/templates/unbound-logrotate.j2 b/roles/unbound/templates/unbound-logrotate.j2 new file mode 100644 index 0000000..35fe72f --- /dev/null +++ b/roles/unbound/templates/unbound-logrotate.j2 @@ -0,0 +1,13 @@ +/var/log/unbound/*.log { + size 1G + rotate 4 + missingok + notifempty + compress + delaycompress + sharedscripts + create 644 + postrotate + /usr/sbin/unbound-control log_reopen + endscript +} diff --git a/roles/unbound/templates/unbound.conf.j2 b/roles/unbound/templates/unbound.conf.j2 new file mode 100644 index 0000000..ee9a1cf --- /dev/null +++ b/roles/unbound/templates/unbound.conf.j2 @@ -0,0 +1,12 @@ +# {{ ansible_managed }} +# +# Unbound configuration file for Debian. +# +# See the unbound.conf(5) man page. +# +# See /usr/share/doc/unbound/examples/unbound.conf for a commented +# reference config file. +# +# The following line includes additional configuration files from the +# /etc/unbound/unbound.conf.d directory. +include: "/etc/unbound/unbound.conf.d/*.conf" diff --git a/roles/unifi_controller/tasks/main.yml b/roles/unifi_controller/tasks/main.yml new file mode 100644 index 0000000..7f886f2 --- /dev/null +++ b/roles/unifi_controller/tasks/main.yml @@ -0,0 +1,47 @@ +--- +# Install HTTPS support for APT +- name: Install apt-transport-https + apt: + update_cache: true + name: + - apt-transport-https + - gpg + - dirmngr + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded + +# Add the key +- name: Configure the apt key + apt_key: + keyserver: keyserver.ubuntu.com + id: 06E85760C0A52C50 + state: present + register: apt_key_result + retries: 3 + until: apt_key_result is succeeded + loop: + +# Add the repository into source list +- name: Configure unifi repository + apt_repository: + repo: "{{ item }}" + state: present + loop: + - deb http://www.ui.com/downloads/unifi/debian stable ubiquiti + +- name: Install unifi + apt: + update_cache: true + name: unifi + state: present + register: apt_result + retries: 3 + until: apt_result is succeeded + +- name: Indicate role in motd + template: + src: update-motd.d/05-service.j2 + dest: /etc/update-motd.d/05-unifi-controller + mode: 0755 diff --git a/roles/unifi_controller/templates/update-motd.d/05-service.j2 b/roles/unifi_controller/templates/update-motd.d/05-service.j2 new file mode 100755 index 0000000..b768773 --- /dev/null +++ b/roles/unifi_controller/templates/update-motd.d/05-service.j2 @@ -0,0 +1,3 @@ +#!/bin/sh +# {{ ansible_managed }} +echo "> Le contrôleur Unifi a été déployé sur cette machine." diff --git a/services_web.yml b/services_web.yml new file mode 100755 index 0000000..6bc6a6d --- /dev/null +++ b/services_web.yml @@ -0,0 +1,17 @@ +#!/usr/bin/env ansible-playbook +--- +# Deploy Docker hosts +- hosts: docker-ovh.adm.auro.re,gitea.adm.auro.re,drone.adm.auro.re,stream.adm.auro.re,wikijs.adm.auro.re + roles: + - docker + +# Deploy Passbolt +- hosts: passbolt.adm.auro.re + roles: + - passbolt + +# Deploy reverse proxy +- hosts: proxy*.adm.auro.re + roles: + - certbot + - nginx_reverseproxy diff --git a/upgrade.yml b/upgrade.yml old mode 100644 new mode 100755 index f5fbbef..72cf409 --- a/upgrade.yml +++ b/upgrade.yml @@ -1,3 +1,4 @@ +#!/usr/bin/env ansible-playbook --- # This is a special playbook to upgrade all selected servers ! # Please always use with --limit myserver.adm.auro.re @@ -9,3 +10,6 @@ upgrade: dist update_cache: true cache_valid_time: 86400 # one day + register: apt_result + retries: 3 + until: apt_result is succeeded