On remet bcfg2_reports et bcfg2-graph, et on renomme le nouveau dossier
This commit is contained in:
parent
1df9d480af
commit
08007c623e
10 changed files with 0 additions and 0 deletions
|
@ -1,66 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Génère un graphe de dépendance des groups bcfg2
|
||||
|
||||
Auteurs: Daniel Stan
|
||||
Valentin Samir
|
||||
|
||||
"""
|
||||
from __future__ import print_function
|
||||
import xml.dom.minidom
|
||||
import subprocess
|
||||
|
||||
GRAPH_BIN = '/usr/bin/neato'
|
||||
TMP_FILE = '/tmp/graph.gv'
|
||||
GROUPS_FILE = '/var/lib/bcfg2/Metadata/groups.xml'
|
||||
OUTPUT_FILE = '/usr/scripts/var/doc/bcfg2/groups.svg'
|
||||
|
||||
out = file(TMP_FILE, 'w')
|
||||
|
||||
import datetime
|
||||
|
||||
# groups est le nœud racine
|
||||
groups = xml.dom.minidom.parse(file(GROUPS_FILE,'r')).documentElement
|
||||
|
||||
print("""digraph G {
|
||||
edge [len=4.50, ratio=fill]
|
||||
""", file=out)
|
||||
|
||||
def childGroups(parent):
|
||||
"""Récupère les groups enfants (dépendances) d'un nœud
|
||||
Attention, cette fonction travaille sur (et renvoie) des nœuds xml
|
||||
"""
|
||||
return [ x for x in parent.childNodes if \
|
||||
x.nodeType == x.ELEMENT_NODE and x.tagName == u'Group']
|
||||
|
||||
# Les clients (ie des serveurs)
|
||||
# sont coloriés d'une autre couleur
|
||||
print("""
|
||||
subgraph cluster_1 {
|
||||
node [style=filled];
|
||||
""", file=out)
|
||||
for elem in childGroups(groups):
|
||||
if elem.hasAttribute('profile'):
|
||||
print('"%s";' % elem.getAttribute('name'), file=out)
|
||||
print("""
|
||||
label = "process #2";
|
||||
color=blue
|
||||
}
|
||||
""", file=out)
|
||||
# Le reste
|
||||
|
||||
for elem in childGroups(groups):
|
||||
print('"%s" -> {%s};' % \
|
||||
( elem.getAttribute('name'),
|
||||
" ".join( [ '"%s"' % x.getAttribute('name')
|
||||
for x in childGroups(elem) ]),
|
||||
), file=out)
|
||||
|
||||
print("""
|
||||
label = "\\n\\nBCFG2 Groups\\nLes Nounous\\n%s";
|
||||
}""" % datetime.datetime.now().strftime('%c'), file=out)
|
||||
|
||||
out.close()
|
||||
|
||||
subprocess.Popen([GRAPH_BIN, "-Tsvg", TMP_FILE, "-o", OUTPUT_FILE]).communicate()
|
|
@ -1,52 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
""" Envoie un mail avec la liste des serveurs qui ne sont pas synchro avec bcfg2.
|
||||
|
||||
Si appelé sans l'option ``--mail``, affiche le résultat sur stdout.
|
||||
|
||||
N'affiche que ceux qui datent d'aujourd'hui, hier ou avant-hier
|
||||
(pour ne pas avoir les vieux serveurs qui traînent).
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
import time
|
||||
import datetime
|
||||
import subprocess
|
||||
|
||||
unjour = datetime.timedelta(1)
|
||||
|
||||
aujourdhui = datetime.date(*time.localtime()[:3])
|
||||
hier = aujourdhui - unjour
|
||||
avanthier = aujourdhui - unjour*2
|
||||
|
||||
def get_dirty():
|
||||
"""Récupère les hosts dirty récents."""
|
||||
proc = subprocess.Popen(["/usr/sbin/bcfg2-reports", "-d"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # | sort
|
||||
out, err = proc.communicate()
|
||||
if err:
|
||||
print(err, file=sys.stderr)
|
||||
if proc.returncode != 0:
|
||||
return (False, out)
|
||||
out = [l for l in out.split("\n") if any([date.strftime("%F") in l for date in [aujourdhui, hier, avanthier]])]
|
||||
out.sort()
|
||||
return True, "\n".join(out)
|
||||
|
||||
if __name__ == "__main__":
|
||||
success, hosts = get_dirty()
|
||||
if not success:
|
||||
print(hosts, file=sys.stderr)
|
||||
exit(1)
|
||||
debug = "--debug" in sys.argv
|
||||
if "--mail" in sys.argv:
|
||||
if hosts != "":
|
||||
sys.path.append("/usr/scripts/")
|
||||
import utils.sendmail
|
||||
utils.sendmail.sendmail("root@crans.org", "roots@crans.org", u"Serveurs non synchronisés avec bcfg2", hosts, more_headers={"X-Mailer" : "bcfg2-reports"}, debug=debug)
|
||||
elif debug:
|
||||
print("Empty content, no mail sent")
|
||||
else:
|
||||
print(hosts, end="")
|
Loading…
Add table
Add a link
Reference in a new issue