
S'il existe un mail valide, c'est déjà un unicode, sinon, c'est None, et il ne faut pas le transformer en u"None".
463 lines
15 KiB
Python
Executable file
463 lines
15 KiB
Python
Executable file
#!/bin/bash /usr/scripts/python.sh
|
|
# -*- coding: utf-8 -*-
|
|
"""
|
|
Script qui fait le comptage d'upload des adhérents, et limite leur débit
|
|
montant s'ils dépassent le quota prévu
|
|
"""
|
|
|
|
################################################################################
|
|
# Import des commandes #
|
|
################################################################################
|
|
|
|
import sys
|
|
import psycopg2
|
|
import psycopg2.extras
|
|
import datetime
|
|
import pytz
|
|
import cStringIO
|
|
|
|
import gestion.affichage as affichage
|
|
import lc_ldap.shortcuts as shortcuts
|
|
import lc_ldap.objets as objets
|
|
|
|
from gestion.config import NETs, plage_ens, prefix
|
|
from gestion.config import upload as upload
|
|
import surveillance.analyse2 as analyse
|
|
import gestion.mail as mail_module
|
|
|
|
# Ça printe au lieu de faire bobo quand c'est à True
|
|
DEBUG = False
|
|
|
|
EPOCH = pytz.utc.localize(datetime.datetime(1970, 1, 1))
|
|
TZ = pytz.timezone('Europe/Paris')
|
|
DELTA = datetime.timedelta(0, upload.interval * 3600, 0)
|
|
CUR_DATE = TZ.normalize(TZ.localize(datetime.datetime.now()))
|
|
|
|
QUERY = """WITH
|
|
machines_sans_doublon
|
|
AS
|
|
(
|
|
SELECT DISTINCT ON(mac_addr)
|
|
*
|
|
FROM
|
|
machines
|
|
)
|
|
SELECT
|
|
SUM(agregat.total) as up, machines_sans_doublon.type, machines_sans_doublon.id
|
|
FROM (
|
|
SELECT
|
|
'upload', sum(bytes) AS total, mac_src
|
|
FROM
|
|
upload
|
|
WHERE
|
|
stamp_updated > now() - %(delta_inf)s
|
|
AND
|
|
stamp_updated < now() - %(delta_sup)s
|
|
AND NOT
|
|
(
|
|
ip_dst <<= inet%(plage_ens)s
|
|
OR
|
|
ip_dst <<= inet%(plage_ipv6)s
|
|
OR
|
|
ip_dst <<= inet%(appt)s
|
|
OR
|
|
ip_src <<= inet%(ipv6_local)s
|
|
OR
|
|
ip_src = inet'0.0.0.0'
|
|
OR
|
|
ip_src <<= inet%(plage_adm)s
|
|
OR
|
|
ip_dst <<= inet%(plage_adm)s
|
|
)
|
|
AND
|
|
(
|
|
ip_src <<= inet%(allone)s
|
|
OR
|
|
ip_src <<= inet%(alltwo)s
|
|
OR
|
|
ip_src <<= inet%(plage_ipv6)s
|
|
OR
|
|
ip_src <<= inet%(appt)s
|
|
)
|
|
AND NOT EXISTS
|
|
(
|
|
SELECT 1
|
|
FROM exemptes
|
|
WHERE upload.ip_src <<= exemptes.ip_crans
|
|
AND upload.ip_dst <<= exemptes.ip_dest
|
|
)
|
|
AND NOT EXISTS
|
|
(
|
|
SELECT 1
|
|
FROM exemptes6
|
|
WHERE upload.mac_src = exemptes6.mac_crans
|
|
AND upload.ip_dst <<= exemptes6.ip_dest
|
|
)
|
|
GROUP BY
|
|
mac_src
|
|
) AS agregat
|
|
INNER JOIN
|
|
machines_sans_doublon
|
|
ON
|
|
machines_sans_doublon.mac_addr = agregat.mac_src
|
|
GROUP BY
|
|
machines_sans_doublon.type, machines_sans_doublon.id
|
|
ORDER BY
|
|
up;
|
|
"""
|
|
|
|
def get_last_update(curseur):
|
|
"""Fonction effectuant une requête à la base PostgreSQL pour
|
|
récupérer la date du dernier udate"""
|
|
|
|
requete = """SELECT DISTINCT date FROM accounting;"""
|
|
curseur.execute(requete)
|
|
result = curseur.fetchall()
|
|
if len(result) > 1 or not result:
|
|
curseur.execute("TRUNCATE accounting;")
|
|
result = [[pytz.utc.localize(datetime.datetime.utcfromtimestamp(0))]]
|
|
return result[0][0]
|
|
|
|
def get_upload_data(curseur):
|
|
"""Fonction effectuant la requête à la base postgresql"""
|
|
|
|
# Spécifie si on doit effectuer une soustraction
|
|
substract = True
|
|
to_substract = {}
|
|
|
|
# On récupère la date de dernière modification de la base de données.
|
|
last_update = get_last_update(curseur)
|
|
|
|
delta = TZ.normalize(TZ.localize(datetime.datetime.now())) - last_update
|
|
|
|
# Le delta compte deux fois, donc dès qu'on dépasse 12h, il vaut mieux flusher/recommencer.
|
|
if delta >= datetime.timedelta(0, upload.interval/2 * 3600, 0):
|
|
CURSEUR.execute("TRUNCATE accounting;")
|
|
substract = False
|
|
delta = DELTA
|
|
|
|
# Si on doit faire une mise à jour (et non un nouveau comptage complet)
|
|
if substract:
|
|
curseur.execute(QUERY, {
|
|
'plage_ens': plage_ens,
|
|
'allone': NETs['all'][0],
|
|
'alltwo': NETs['all'][1],
|
|
'plage_ipv6': prefix['subnet'][0],
|
|
'appt': NETs['personnel-ens'][0],
|
|
'ipv6_local': 'fe80::/8',
|
|
'plage_adm': NETs['adm'][0],
|
|
'delta_inf': DELTA + delta,
|
|
'delta_sup': DELTA,
|
|
})
|
|
to_substract = curseur.fetchall()
|
|
|
|
# On récupère aussi ce qui est déjà en BDD (possiblement
|
|
# rien)
|
|
curseur.execute("SELECT * FROM accounting;")
|
|
already_logged = curseur.fetchall()
|
|
|
|
# Et on compte ce qu'il faut ajouter (potentiellement tout)
|
|
curseur.execute(QUERY, {
|
|
'plage_ens': plage_ens,
|
|
'allone': NETs['all'][0],
|
|
'alltwo': NETs['all'][1],
|
|
'plage_ipv6': prefix['subnet'][0],
|
|
'appt': NETs['personnel-ens'][0],
|
|
'ipv6_local': 'fe80::/8',
|
|
'plage_adm': NETs['adm'][0],
|
|
'delta_inf': delta,
|
|
'delta_sup': datetime.timedelta(0),
|
|
})
|
|
to_add = curseur.fetchall()
|
|
|
|
if DEBUG:
|
|
print to_substract
|
|
print already_logged
|
|
print to_add
|
|
return (to_substract, already_logged, to_add)
|
|
|
|
def account(curseur):
|
|
"""Fonction effectuant la récupération des données et leur merging."""
|
|
|
|
accounted = {}
|
|
|
|
to_substract, already_logged, to_add = get_upload_data(curseur)
|
|
|
|
# On compte en négatif les trucs trop vieux
|
|
for entry in to_substract:
|
|
accounted[(entry['type'], entry['id'])] = -entry['up']
|
|
|
|
# Le reste en positif.
|
|
for entry in already_logged + to_add:
|
|
accounted[(entry['type'], entry['id'])] = accounted.get((entry['type'], entry['id']), 0) + entry['up']
|
|
|
|
return accounted
|
|
|
|
def upload_hard(proprio, elupload, elid, eltype, curseur, ldap):
|
|
"""Blackliste le proprio en hard, et fait les envois de mail"""
|
|
# Test: validation_url('upload')
|
|
try:
|
|
data = {
|
|
'dn': proprio.dn.split(',')[0],
|
|
'blid': len(proprio['blacklist'])
|
|
}
|
|
reco_url = mail_module.validation_url('upload', data)
|
|
reco_url_error = u""
|
|
except Exception as error:
|
|
reco_url_error = u"[[erreur de génération: %r]]" % error
|
|
reco_url = u""
|
|
|
|
# On cherche à savoir où et quand on
|
|
# a vu les machines du proprio pour la dernière fois
|
|
####################################################
|
|
machines = proprio.machines()
|
|
macs_dates_chambres = []
|
|
for machine in machines:
|
|
if isinstance(machine, objets.machineFixe):
|
|
mac = unicode(machine.get('macAddress', [u'<automatique>'])[0])
|
|
|
|
# Si automatique, on passe
|
|
if mac == u'<automatique>':
|
|
continue
|
|
|
|
date, chambre = reperage_chambre(mac)
|
|
macs_dates_chambres.append([mac, date, chambre])
|
|
|
|
mdcf = affichage.tableau(macs_dates_chambres, ('mac', 'date', 'chambre'), (20, 21, 7), ('c', 'c', 'c'))
|
|
|
|
mail_data = {
|
|
'from': upload.expediteur,
|
|
'to': proprio.get_mail(),
|
|
'upload': "%.2f" % (elupload,),
|
|
'proprio': proprio,
|
|
'lang_info': 'English version below',
|
|
'mdc': mdcf,
|
|
'chambre': unicode(proprio.get('chbre', [u'????'])[0]),
|
|
'id': unicode(proprio.dn.split(',')[0]),
|
|
'reco_url': reco_url,
|
|
'reco_url_error': reco_url_error,
|
|
'cron_date': CUR_DATE.strftime("%H:%M:%S%z"),
|
|
}
|
|
|
|
# On sanctionne
|
|
###############
|
|
debut = TZ.normalize(TZ.localize(datetime.datetime.now()))
|
|
stamp_debut = int((debut - EPOCH).total_seconds())
|
|
end = debut.strftime("%Y/%m/%d %H:%M:%S")
|
|
hier = TZ.normalize(debut - datetime.timedelta(1)).strftime("%Y/%m/%d %H:%M:%S")
|
|
# On ne reconnecte pas auto si url dispo
|
|
if reco_url:
|
|
fin = "-"
|
|
else:
|
|
fin = int((TZ.normalize(debut + datetime.timedelta(1)) - EPOCH).total_seconds())
|
|
|
|
self_call_type = proprio.dn.split(',')[0].split('=')[0]
|
|
|
|
try:
|
|
with proprio:
|
|
proprio.blacklist(u'autodisc_upload', u'Upload %s Mo' % (elupload,),
|
|
stamp_debut, fin)
|
|
proprio.history_gen()
|
|
if not DEBUG:
|
|
proprio.save()
|
|
else:
|
|
print proprio['blacklist']
|
|
proprio.cancel()
|
|
|
|
# On inscrit l'instance dans la table des avertis_hard
|
|
######################################################
|
|
if not DEBUG:
|
|
curseur.execute("INSERT INTO avertis_upload_hard (type, id, date) VALUES ('%s', '%d', 'now')" % (eltype, elid))
|
|
analyse.self_call([
|
|
"--%s" % (self_call_type,),
|
|
"%s" % (elid,),
|
|
"--dns",
|
|
"--begin",
|
|
"%s" % (hier,),
|
|
"--end",
|
|
"%s" % (end,),
|
|
"--limit", upload.analyse_limit,
|
|
"--fichier", upload.analyse_file_tpl % (end.replace("/", "_").replace(":", "_").replace(" ", "_"), self_call_type, elid)
|
|
])
|
|
except Exception as error:
|
|
sys.stderr.write("Blacklist de %s pour %s Mo échoué, %s\n" % (proprio.dn.split(',')[0], elupload, error))
|
|
return
|
|
|
|
# Vérification du nombre de déconnexions
|
|
#########################################
|
|
nb_decos = sum(
|
|
blacklist['type'] == u'autodisc_upload' and
|
|
int(blacklist['debut']) > stamp_debut - upload.periode_watch
|
|
for blacklist in proprio['blacklist']
|
|
)
|
|
|
|
if nb_decos >= upload.max_decos:
|
|
mail_data.update({
|
|
'nb_decos': nb_decos,
|
|
'fiche_deco': fiche_deco,
|
|
})
|
|
|
|
# On envoie un mail à l'adhérent
|
|
# On envoie un mail à disconnect
|
|
################################
|
|
with mail_module.ServerConnection() as smtp_conn:
|
|
if not DEBUG:
|
|
smtp_conn.send_template('upload_hard', mail_data)
|
|
else:
|
|
print mail_module.generate('upload_hard', mail_data).as_string()
|
|
|
|
# Et on s'envoie un mail de notif
|
|
mail_data['to'] = upload.expediteur
|
|
if not DEBUG:
|
|
smtp_conn.send_template('upload_notif', mail_data)
|
|
else:
|
|
print mail_module.generate('upload_notif', mail_data).as_string()
|
|
|
|
def upload_soft(proprio, elupload, elid, eltype, curseur):
|
|
"""Envoit un mail et stocke l'info"""
|
|
# On inscrit l'ip dans la table des avertis soft
|
|
################################################
|
|
if not DEBUG:
|
|
curseur.execute("INSERT INTO avertis_upload_soft (type, id, date) VALUES ('%s', '%d', 'now')" % (eltype, elid))
|
|
|
|
# On envoie un mail à l'adhérent
|
|
################################
|
|
with mail_module.ServerConnection() as smtp_connect:
|
|
mail_data = {
|
|
'from': upload.expediteur,
|
|
'to': proprio.get_mail(),
|
|
'upload': "%.2f" % (elupload,),
|
|
'proprio': proprio,
|
|
'lang_info':'English version below',
|
|
'limite_soft': upload.soft,
|
|
'limite_hard': upload.hard,
|
|
}
|
|
if not DEBUG:
|
|
smtp_connect.send_template('upload_soft', mail_data)
|
|
else:
|
|
print mail_module.generate('upload_soft', mail_data).as_string()
|
|
|
|
def single_check(eltype, elid, elupload, ldap, curseur):
|
|
"""Fait un test sur un utilisateur, et
|
|
applique une blackliste ou envoit un averto."""
|
|
|
|
if elupload >= upload.hard:
|
|
# L'adhérent a t il été blacklisté ?
|
|
####################################
|
|
if [eltype, int(elid)] in AVERTIS_UPLOAD_HARD and not DEBUG:
|
|
return
|
|
|
|
# Propriétaire issu de LDAP
|
|
###########################
|
|
if eltype == 'club':
|
|
proprio = ldap.search(u'(cid=%d)' % (int(elid),), 'w')
|
|
elif eltype == 'adherent':
|
|
proprio = ldap.search(u'(aid=%d)' % (int(elid),), 'w')
|
|
else:
|
|
return
|
|
|
|
if len(proprio) != 1:
|
|
print 'Erreur : Proprio non trouvé (%s) %d' % (eltype, elid)
|
|
return
|
|
|
|
proprio = proprio[0]
|
|
upload_hard(proprio, elupload, elid, eltype, curseur, ldap)
|
|
|
|
elif elupload >= upload.soft:
|
|
# L'adhérent a t il été averti ou est déjà déco ?
|
|
#################################################
|
|
if ([eltype, int(elid)] in AVERTIS_UPLOAD_SOFT or [eltype, int(elid)] in AVERTIS_UPLOAD_HARD) and not DEBUG:
|
|
return
|
|
|
|
# Objets LDAP
|
|
#############
|
|
if eltype == 'club':
|
|
proprio = ldap.search(u'(cid=%d)' % (int(elid),), 'w')
|
|
elif eltype == 'adherent':
|
|
proprio = ldap.search(u'(aid=%d)' % (int(elid),), 'w')
|
|
else:
|
|
return
|
|
|
|
if len(proprio) != 1:
|
|
print 'Erreur : Proprio non trouvé (%s) %d' % (eltype, elid)
|
|
return
|
|
|
|
proprio = proprio[0]
|
|
upload_soft(proprio, elupload, elid, eltype, curseur)
|
|
|
|
def check_and_blacklist(accounted, curseur, ldap):
|
|
"""Récupère le dico des uploaders, et bloque ceux qui sont
|
|
hors limite"""
|
|
|
|
for ((eltype, elid), elupload) in accounted.iteritems():
|
|
single_check(eltype, elid, float(elupload)/1024/1024, ldap, curseur)
|
|
|
|
# On supprime les vieux avertisements
|
|
curseur.execute("DELETE FROM avertis_upload_hard WHERE date < timestamp 'now' - interval '85200 seconds'") # 23h et 40min pour prolonger les blacklists toujours au dessus de la limite
|
|
curseur.execute("DELETE FROM avertis_upload_soft WHERE date < timestamp 'now' - interval '1 day'")
|
|
|
|
def main(curseur, ldap):
|
|
"""Fonction effectuant les appels aux autres fonctions"""
|
|
|
|
# On récupère les données
|
|
accounted = account(curseur)
|
|
|
|
# On crée un stream qui sera ensuite utilisé par PostgreSQL
|
|
stream = cStringIO.StringIO("\n".join(["%s\t%s\t%s\t%s" % (key[0], key[1], value, CUR_DATE) for (key, value) in accounted.iteritems()]))
|
|
|
|
# On met à jour accounting, en utilisant une nouvelle
|
|
# connexion, qui effectue le truncate/copy dans la même
|
|
# transaction.
|
|
conn = psycopg2.connect(database='filtrage', user='crans')
|
|
curs = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
|
curs.execute("TRUNCATE accounting;")
|
|
curs.copy_from(stream, "accounting")
|
|
conn.commit()
|
|
|
|
# Check and blacklist
|
|
check_and_blacklist(accounted, curseur, ldap)
|
|
|
|
# Pour trouver la chambre où était la machine que l'on déconnecte.
|
|
# TODO: mettre ça dans annuaires_pg
|
|
def reperage_chambre(mac):
|
|
"""Repère la chambre où a été vue la mac pour la dernière fois"""
|
|
if mac == '<automatique>':
|
|
return "Inconnue", "Inconnue"
|
|
|
|
pgsql = psycopg2.connect(host="thot.adm.crans.org", database='mac_prises', user='crans')
|
|
# A priori, pas besoin, on ne fait que des select
|
|
pgsql.set_session(autocommit=True)
|
|
curseur = pgsql.cursor()
|
|
requete = "SELECT date, chambre FROM correspondance WHERE mac=%s ORDER BY date DESC LIMIT 1;"
|
|
curseur.execute(requete, (mac,))
|
|
result = curseur.fetchall()
|
|
if result:
|
|
return result[0][0], result[0][1]
|
|
else:
|
|
return "Inconnue", "Inconnue"
|
|
|
|
if __name__ == "__main__":
|
|
LDAP = shortcuts.lc_ldap_admin()
|
|
|
|
# Connection à la base sql via pgsql
|
|
PGSQL = psycopg2.connect(database='filtrage', user='crans')
|
|
PGSQL.set_session(autocommit=True)
|
|
CURSEUR = PGSQL.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
|
|
|
# Avertis upload hard
|
|
REQUETE = "SELECT type, id FROM avertis_upload_hard WHERE date > now() - %(interval)s;"
|
|
CURSEUR.execute(REQUETE, {
|
|
'interval': DELTA,
|
|
})
|
|
|
|
AVERTIS_UPLOAD_HARD = CURSEUR.fetchall()
|
|
|
|
# Avertis upload soft
|
|
REQUETE = "SELECT type, id FROM avertis_upload_soft WHERE date > now() - %(interval)s"
|
|
CURSEUR.execute(REQUETE, {
|
|
'interval': DELTA,
|
|
})
|
|
|
|
AVERTIS_UPLOAD_SOFT = CURSEUR.fetchall()
|
|
|
|
main(CURSEUR, LDAP)
|