plus utilis (les script utiliss sont les scripts de xabi dans

/usr/scripts/surveillance)

darcs-hash:20051025182637-4ec08-e5b5aad14e959bcdc7ee559aeb3d17c36495b591.gz
This commit is contained in:
chove 2005-10-25 20:26:37 +02:00
parent 86fdb945d9
commit 85340419f2
7 changed files with 0 additions and 1210 deletions

View file

@ -1,84 +0,0 @@
#!/bin/sh
##
## Analyse
##
## Made by Tab <rv@crans.org>
##
## Started on Tue 09 Oct 2001 01:28:25 AM CEST tab
## MAJ : 06/02/2003 -- Fred
TEMPFILE="/tmp/analyse.mail"
# Options pour les stats traffic
PROG="/usr/scripts/analyse_komaz/nacct.py"
OPTIONS="-N 15"
OPTIONS2="-N 15 -c 2"
OPTIONS3="-N 15 -c 5"
# Divers chemins
BLACKLIST="/var/zamok/CRANS/blacklist.cf"
AUTODISC_LOG="/var/log/autodisconnect.log"
VIRUS_BLACKLIST="/tmp/virus_blacklist"
#################################################################################
# Machines ayant été disconnectées dans les 24h :
# affichage de leur historique
echo "Bilan des autodisconnexions des dernières 24h :" >> $TEMPFILE
echo "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=" >> $TEMPFILE
if [ -e $AUTODISC_LOG ]; then
for host in $(awk -F'[ .]' '/'$(date --date=yesterday +%d-%m-%Y)'-(0[7-9]|1|2)|'$(date +%d-%m-%Y)'-0[0-6]/ {print $2}' $AUTODISC_LOG)
do
echo "$host :"
( awk -F'-' '/'$host'.crans.org/ {print $3$2"@"$1"/"$2"/"$3" : autodisconnecté"}' $AUTODISC_LOG
awk -F'[:/ ]' '/^'$host'/ || /^#'$host'/ {print $6$5"@"$4"/"$5"/"$6" : déco manuelle (->"$7"/"$8"/"$9")"}' $BLACKLIST ) \
| sort -r \
| awk -F@ '{print "\t"$2}'
done >> $TEMPFILE
else
echo -e "Problème avec $AUTODISC_LOG" >> $TEMPFILE
fi
echo -e "\n" >> $TEMPFILE
#################################################################################
# Vérolés
if [ -e $VIRUS_BLACKLIST ]; then
echo "Bilan des bloquages web pour virus :" >> $TEMPFILE
echo "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-" >> $TEMPFILE
echo "Il y a $(wc -l $VIRUS_BLACKLIST | awk '{print $1}') machines infectées." >> $TEMPFILE
if [ -e $VIRUS_BLACKLIST.hier ] ; then
diff -U0 -s $VIRUS_BLACKLIST.hier $VIRUS_BLACKLIST \
| egrep -v '\-\-\-|\+\+\+|@@' >> $TEMPFILE
else
cat $VIRUS_BLACKLIST >> $TEMPFILE
fi
cp -f $VIRUS_BLACKLIST $VIRUS_BLACKLIST.hier
echo -e "\n" >> $TEMPFILE
fi
#################################################################################
# Statistiques
echo "Statistiques upload/download :" >> $TEMPFILE
echo "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-" >> $TEMPFILE
if [ -x $PROG ]; then
echo -e "Upload (Download) pur normal :\n" >> $TEMPFILE
$PROG $OPTIONS | tail -15 | awk -F "|" '{print $1" ("$3")"" "$7}' >> $TEMPFILE
echo -e "\nUpload total normal :\n" >> $TEMPFILE
$PROG $OPTIONS2 | tail -15 | awk -F "|" '{print $2" "$7}' >> $TEMPFILE
echo -e "\nUpload total exempté :\n" >> $TEMPFILE
$PROG $OPTIONS3 | tail -15 | awk -F "|" '{print $5" "$7}' >> $TEMPFILE
else
echo -e "Problème avec $PROG" >> $TEMPFILE
fi
#################################################################################
# Envoi du mail
cat $TEMPFILE |/usr/bin/mail -s "Stats -- $(date --date=yesterday +%A\ %d\ %B\ %Y)" disconnect@crans.org
rm -f $TEMPFILE

View file

@ -1,215 +0,0 @@
#!/bin/zsh
##
## autodisconnect
##
## Made by stransky
## Login stransky <stransky@crans.org>
##
## Started on ven 25 jan 2002 22:24:26 CEST Nicolas STRANSKY
## Last update lun 03 nov 2003 07:38:51 CET Nicolas STRANSKY
##
## script de surveillance automatique de l'upload.
## à lancer par cron.
export LANG=fr_FR@euro
LIMITE_SOFT=100
LIMITE_HARD=700
MAIL_INTERVAL=100
LOG=/var/log/autodisconnect.log
LOGFILE=/var/log/net-acct/net-acct.log
outpF=$(mktemp)
TEMPFILE=/tmp/uploaders
LISTFILE=/tmp/liste
EMPREINTE_NEW=/tmp/empreinte-new
EMPREINTE=/tmp/empreinte
HABITUES=/tmp/habitues # Liste des habitués du système
BLACKLISTES=/tmp/blacklistes
BLACKLISTES_NEW=${BLACKLISTES}-new
BLACKLISTE_ZAMOK=/var/zamok/CRANS/blacklist.cf
VERIF=0 # on envoie un mail à disconnect si VERIF=1
RENEW=1 # si cette valeur reste à 1 on vide le fichier empreinte.
date=$(date +%A\ %d\ %B\ %Y\ \ %T)
date2=$(date +%A\ %d\ %B\ %Y)
date3=$(date +%d-%m-%Y-%T)
DIR=/usr/scripts/analyse_komaz
# Si /tmp/exempts.pickle n'existe pas, on relance exempts.py
if [ ! -f /tmp/exempts.pickle ]; then
$DIR/exempts.py
fi
# On renforce déjà la blacklist (au cas où le firewall a été relancé)
if [ -f $BLACKLISTES ]; then
while read machine ; do
if ! (/sbin/iptables -nL BLACKLIST_SRC | grep -q "^REJECT.*$machine "); then
/sbin/iptables -I BLACKLIST_SRC -s $machine -j REJECT
echo blacklist $machine
fi
done < $BLACKLISTES
fi
# Retourne l'adresse email correspondant à la machine donnée en paramètre
# Le premier paramètre est le nom de la machine en FQDN (lucas.crans.org)
getAdresseMail() {
# Quel est l'adhérent de la machine ?
aid=$(ldapsearch -LLL -H ldaps://sila.crans.org -x -D "cn=admin,dc=crans,dc=org" \
-w $(cat /etc/ldap.secret) -b "ou=data,dc=crans,dc=org" \
host=$1 dn | head -1 | awk -F, '{print $2}')
# On affiche son adresse email
mail=$(ldapsearch -LLL -H ldaps://sila.crans.org -x -D "cn=admin,dc=crans,dc=org" \
-w $(cat /etc/ldap.secret) -b "ou=data,dc=crans,dc=org" $aid \
mail | awk -F': ' '($1 ~ /^mail$/) {if ($2 ~ /@/) print $2; else print $2"@crans.org"}')
if [[ -z $mail ]]; then
echo disconnect@crans.org
else
echo $mail
fi
}
echo $date > $LISTFILE
echo " " >> $LISTFILE
#/usr/scripts/analyse.pl -u -n 10 -f $LOGFILE --noperiod | egrep -v "zamok|komaz|sila" | \
#grep -v "Ko" | awk '{print $1" "$3}' | sed 's/Mo/ /g' | sed 's/\.[0-9]*Go/000 /g' > $TEMPFILE
$DIR/nacct.py -n -N 25| grep -v 'NoDNS_' > $outpF
if [ $? -eq 255 ]; then
echo "Problème avec nacct.py : lock"
exit 1
fi
cat $outpF | tail -25 | awk -F "|" '{print $1" "$7}' | egrep -v "136\.(1|2|3|4|6|8|9|10)$" > $TEMPFILE
# Pour conserver un historique de la sortie de nacct.py
echo -e "\n$date" >> /tmp/nacct_history
cat $outpF >> /tmp/nacct_history
if ! grep -q "^===========================================================================" $outpF ; then
echo "Pas de données ; problème avec nacct"
exit 1
fi
rm $outpF
while read upload machine ; do # On lit dans $TEMPFILE
if ! (/sbin/iptables -nL BLACKLIST_SRC | grep -q "^REJECT.*$machine ") && [ $( echo $upload | cut -d '.' -f 1 ) -ge $LIMITE_SOFT ]
then # Si la machine dépasse la limite soft et n'est pas déjà blacklistée,
RENEW=0 # on ne vide pas le fichier empreinte
hostname=`host $machine | awk '/Name:/ {print $2}'`
if [ $( echo $upload | cut -d '.' -f 1 ) -ge $LIMITE_HARD ]
then
/sbin/iptables -I BLACKLIST_SRC -s $machine -j REJECT
echo blacklist $hostname
echo "$hostname FIREWALISÉ ! ($upload Mo)" >> $LISTFILE
echo "$date3 $hostname $upload" >> $LOG
echo "$machine" >> $BLACKLISTES
VERIF=1
# Envoi d'un mail pour prévenir la personne (à modifier pour éventuellement utiliser sendmail)
# On doit tester la variable EMAIL ?
EMAIL=$(getAdresseMail $hostname)
echo $EMAIL
cat <<EOF | mail -s "Deconnexion temporaire de $hostname" -a "From: disconnect@crans.org" -a "Reply-To: disconnect@crans.org" $EMAIL
Bonjour,
Ta machine $hostname a été temporairement déconnectée parce que tu uploadais
une quantité importante de données. Tu recevras dans moins de 24h
un mail indiquant ta reconnexion. Tu as toujours accès au web ainsi qu'à tes mails
mais les autres services sont suspendus. Si cela se renouvelle trop souvent, tu
risques d'être déconnecté entièrement pour une durée plus importante. Il
t'appartient donc de surveiller cela de plus près et de faire en sorte que ta
machine n'uploade pas de manière excessive à l'avenir.
Pour plus d'informations, tu peux consulter la page:
http://wiki.crans.org/moin.cgi/VieCrans_2fD_e9connexionPourUpload
Si tu as des questions, contacte disconnect@crans.org
EOF
elif ! (grep -q $hostname $EMPREINTE) # si la machine n'a pas encore été repérée par autodisconnect
then
EMAIL=$(getAdresseMail $hostname)
# On évince les habitués, ils reçoivent assez de mails comme ça
if ! (grep -q $EMAIL $HABITUES); then
echo "$hostname uploade ($upload Mo)" >> $LISTFILE
echo "$hostname $upload" >> $EMPREINTE_NEW
VERIF=1 # Pas d'envoi de mail dans ce cas
cat <<EOF | mail -s "Ta machine $hostname uploade" -a "From: disconnect@crans.org" -a "Reply-To: disconnect@crans.org" $EMAIL
Bonjour,
Ta machine $hostname uploade une quantité importante de données vers l'extérieur
(actuellement, $upload Mo). Si cela continuait, elle serait automatiquement
déconnectée pour une durée d'environ 24 heures. Il t'appartient donc de
surveiller cela de plus près et de faire en sorte que ta machine n'uploade pas
de manière excessive à l'avenir.
Pour plus d'informations, tu peux consulter la page:
http://wiki.crans.org/moin.cgi/VieCrans_2fD_e9connexionPourUpload
Si tu as des questions, contacte disconnect@crans.org
EOF
fi
elif [ `echo $upload | cut -d '.' -f 1` -gt `echo $(cat $EMPREINTE | \
awk -v MACHINE=$hostname '$0 ~ MACHINE {print $2}')"/1+$MAIL_INTERVAL"|bc` ]
# si la machine a uploadé $MAIL_INTERVAL Mo de plus que la dernière
# fois qu'on a envoyé un mail à son sujet, on renvoie un mail.
then
echo "$hostname uploade encore. ($upload Mo)" >> $LISTFILE
echo "$hostname $upload" >> $EMPREINTE_NEW
else # sinon on conserve les infos sur la machine
cat $EMPREINTE | grep $hostname >> $EMPREINTE_NEW
fi
fi
done < $TEMPFILE
if [ $RENEW -eq 1 ] ; then cat /dev/null > $EMPREINTE ; fi
if [ -f $EMPREINTE_NEW ] ; then mv $EMPREINTE_NEW $EMPREINTE ; fi
# Reconnexion des machines déconnectées
rm -f $BLACKLISTES_NEW
if [ -f $BLACKLISTES ]; then
while read machine ; do
# Est-ce que cette machine est toujours dans le TOP 12 ?
# Juste pour vérifier où se situe le problème -- Nico
echo "$machine firewalisé en est encore à : $(awk '($2 == "'$machine'") {print $1}' $TEMPFILE)"
if ! (grep -q $machine'$' $TEMPFILE) || \
[ $LIMITE_SOFT -ge $(awk '($2 == "'$machine'") {print $1}' $TEMPFILE | cut -d '.' -f 1 ) ]; then
VERIF=1
# On doit la reconnecter : on la vire de BLACKLISTES
grep -v $machine $BLACKLISTES > $BLACKLISTES_NEW
hostname=`host $machine | awk '/Name:/ {print $2}'`
# Reconnexion
if ! (grep -q "^$(echo $hostname | cut -f1 -d.)" $BLACKLISTE_ZAMOK )
then # Si la machine est blacklistée sur zamok on ne reconnecte pas
/sbin/iptables -D BLACKLIST_SRC -s $machine -j REJECT
echo "La machine $hostname a été reconnectée. ($(awk '($2 == "'$machine'") {print $1}' $TEMPFILE) Mo)" >> $LISTFILE
EMAIL=$(getAdresseMail $hostname)
cat <<EOF | mail -s "Reconnexion de $hostname" -a "From: disconnect@crans.org" -a "Reply-To: disconnect@crans.org" $EMAIL
Bonjour,
Ta machine $hostname avait été temporairement déconnectée, elle est maintenant
reconnectée. Vérifie que tu ne fais plus d'upload pour éviter de te faire
déconnecter une nouvelle fois.
EOF
else
echo "La machine $hostname reste blacklisstée sur zamok. ($(awk '($2 == "'$machine'") {print $1}' $TEMPFILE) Mo)" >> $LISTFILE
fi
echo reconnexion $machine
fi
done < $BLACKLISTES
fi
# Bug, on n'enlève qu'une machine à chaque fois, pas trop grave
if [ -f $BLACKLISTES_NEW ]; then cat $BLACKLISTES_NEW > $BLACKLISTES; fi
# Eventuellement, appel de firewall blacklist si on n'a pas encore reconnecté
if [ $VERIF -eq 1 ]
then
echo -e "\n-- \ncréé par autodisconnect." >> $LISTFILE
cat $LISTFILE | /usr/bin/mail -s "upload temps réel - $date2" -a "From: disconnect@crans.org" -a "Reply-To: disconnect@crans.org" pessoles@crans.org
fi
exit 0

View file

@ -1,42 +0,0 @@
#! /usr/bin/env python
# -*- encoding: iso-8859-15 -*-
import cPickle,re
exempts = { 'dst' : [], 'src_dst' : {} }
"""
exempts['dst'] : liste de réseaux (exprimés en regex) vers lesquels le traffic est exempté
exempts['src_dst'] [ IP ] : idem, mais seulement en provenance de IP.
"""
exempts['dst'].append( '138\.231\..*' ) # * => *.ens-cachan.fr
exempts['src_dst']['138.231.149.10'] = ['134\.157\.96\.216'] # rivendell.wifi.crans.org => *.ccr.jussieu.fr
exempts['src_dst']['138.231.141.187'] = ['129\.104\.17\..*', '134\.157\.96\.216' ] # barad-dur.crans => *.polytechnique.fr et *.ccr.jussieu.fr
exempts['src_dst']['138.231.136.7'] = ['195\.221\.21\.36'] # egon => ftp.crihan.fr pour rsync mirroir debian/fedor
exempts['src_dst']['138.231.143.62'] =['193\.49\.25\.152' , '138\.195\.34\..*' ] # ogre => centrale / labo fast (psud)
exempts['src_dst']['138.231.140.173'] =['195\.220\.131\.33' , '195\.220\.133\.98' ] # duckien => rebol.ephe.sorbonne.fr oss.ephe.sorbonne.fr, 28/1/2005 -- Bilou
exempts['src_dst']['138.231.137.230'] =['129\.175\.100\.221' ] # helene => orsay
exempts['src_dst']['138.231.136.7'] =['138\.195\..*' ] # egon => centrale paris
exempts['src_dst']['138.231.139.106'] =['138\.195\.74\..*' ] # schuss => centrale paris
exempts['src_dst']['138.231.139.106'] =['138\.195\.75\..*' ] # schuss => centrale paris
exempts['src_dst']['138.231.150.106'] =['157\.99\.164\.27' ] # sayan-ftp.wifi => chile.sysbio.pasteur.fr
def compileRegs( exempts) :
L = []
for s in exempts['dst'] :
L.append( re.compile(s) )
exempts['dst'] = L
for k in exempts['src_dst'].keys() :
L = []
for s in exempts['src_dst'] [k] :
L.append( re.compile(s) )
exempts['src_dst'] [k] = L
compileRegs( exempts )
fd=open("/tmp/exempts.pickle","wb")
cPickle.dump(exempts, fd)

View file

@ -1,28 +0,0 @@
#!/usr/bin/awk -f
#Lecture des logs du firewall pour retourner la liste des machines floodant
#
#Arguments :
#<fichier(s) à scanner> <autre fichier donc le nom contient blacklist>
# Le second fichier contient les IP des machines supplémentaires à sortir.
#
# Format de sortie : Mois jour hh:mm:ss hostname nb_attques
#
# 02/2003 Frédéric Pauget
{ if (FILENAME~"blacklist") {
if ($0=="") nextfile;
tentatives[$0]=0;
dern_tentative[$0]="Vieux 00 00:00:00"; }
}
/.*Flood:IN=eth0.*/ {
gsub("SRC=","",$9);
tentatives[$9]++;
dern_tentative[$9]=$1" "$2" "$3;
}
END{
for (machine in tentatives){
system("echo "dern_tentative[machine]" $(host "machine" 2>/dev/null | awk '/^Name/ {print $2}') "tentatives[machine])
}
}

View file

@ -1,572 +0,0 @@
#! /usr/bin/env python
# -*- encoding: iso-8859-15 -*-
## (C) Samuel Krempp 2001
## krempp@crans.ens-cachan.fr
## Permission to copy, use, modify, sell and
## distribute this software is granted provided this copyright notice appears
## in all copies. This software is provided "as is" without express or implied
## warranty, and with no claim as to its suitability for any purpose.
import os,sys,string,re,getopt,time
import cPickle # load/dump python objects
import socket
""" nacct.py : parse the logs+dump of net-acct, then print a summary of the last 24h
Usage : nacct.py [-N 20 ] : display the top-20
[-c 1] : sort by the first column.
[-n] : display numeric IP (instead of resolved hostnames)
[-p nacct.pickle ] : where to put the persistence file
[-f net-acct.log] [-f net.acct.log.0] [-u net-acct.dump]
[-T <duree> = 24 ] : analyse data of the last <duree> hours
[-T <duree2>= 2 ] : store data of the last <duree2> days.
[-h host] : prints details for given host, from the *persistent* file only.
[-t <date>=current time] : choose current time
e.g. : nacct.py -h toto.crans.org -t '2002 12 31 23:59' -T 48
will print details for toto on the 48 hours before given time.
"""
def isExempt(exempts, src_ip, dst_ip) :
is_exempt=0
for r in exempts['dst'] :
if r.search(dst_ip) :
is_exempt = 1
break
if is_exempt ==0 :
if exempts['src_dst'].has_key(src_ip) :
tmp=exempts['src_dst'][src_ip]
for r in tmp :
if r.search(dst_ip) :
is_exempt=1
break
return is_exempt
def parseInputUntil ( f, prevline, conns_DB, exempts, end_time) :
""" reads lines of the file f _until_ the timestamp is >= end_time
data structure :
. exempts [ src ] [ dst ] : if exists, count this traffic separately ('exempted' bytes)
. conns_DB [ IP ] = [ PURE_upload_bytes, upload_bytes, download_bytes # normal bytes
, gPUL, gUL, gDL ] # 'exempted' bytes
(i.e. there are 6 counters, 3 for normal bytes, 3 for exempted bytes)
optionnally, prev_line is a line to be parsed before processing the file
(used because we read next line's timestamps)
Returns : (nextline, last_time, got_nothing)
. nextLine : similar to prevLine
. last_time : a timestamp such that : . all read timeStamps are <= last_time
. all unread timeStamps are > last_time
in practice, last_time is either the greatest read timeStamp, or (nextline's timestamp) - 1
. got_nothing : true iff the file was completely empty.
"""
got_nothing = 1
nextline = "" # in case a line needs to be parsed at next call..
last_time = 0
t=0
src_ip=""
dst_ip=""
size=0
# local variable aliases (optimising lookup..)
lsplit = string.split; llong=long;
lregLAN=regLAN
end_time=repr(end_time)
(prev_stime, prev_proto, prev_src_ip, prev_src_port, prev_dst_ip, prev_dst_port, prev_size, pd)\
= ["" ] * 8
prev_is_symmetric = 1
prev_m_src = 0
lineN=0
while(1) :
if not prevline :
line = f.readline()
lineN += 1
else :
line = prevline
prevline=""
if not line : break
got_nothing = 0
(stime, proto, src_ip, src_port, dst_ip, dst_port, size, pd) = lsplit(line, '\t', 7)
if stime >= end_time :
nextline=line
# if a whole slice is absent in logs, we need to set last_time here :
if last_time =="" : last_time = int(stime) - 1
break
else :
last_time = stime
if 1 : # now really PARSE the line :
try:
size=llong(size)
except ValueError:
raise ValueError("INCORRECT size \"%s\" at line %d : %s " % (size, lineN, line) )
# Upload :
is_exempt=0
if isExempt(exempts, src_ip, dst_ip) :
is_exempt = 3
try:
conns_DB[src_ip ][is_exempt +1] += size
except KeyError:
conns_DB[src_ip ] = [long(0)]*6
conns_DB[src_ip ][is_exempt +1] = long(size)
# PURE Upload :
is_symmetric = ( prev_src_ip == dst_ip and prev_src_port== dst_port and \
prev_dst_ip == src_ip and prev_dst_port== src_port and \
prev_stime == stime and prev_proto==proto )
if is_symmetric :
try :
if prev_size > size :
conns_DB[prev_src_ip ][ prev_is_exempt + 0] += prev_size
else:
conns_DB[src_ip ][ is_exempt +0] += size
except KeyError:
print "proto=%s %s, src_ip=%s %s" % (prev_proto, proto, prev_src_ip, src_ip)
else :
if prev_is_symmetric == 0 :
# previous line has no symetrical transfer, assume PURE upload
conns_DB[prev_src_ip ][ prev_is_exempt + 0] += prev_size
# Download :
#m=lregLAN.search(dst_ip)
if 1:
dst_is_exempt=0
if isExempt(exempts, dst_ip, src_ip) :
dst_is_exempt = 3
try:
conns_DB[dst_ip ][dst_is_exempt +2] += size
except KeyError:
conns_DB[dst_ip ] = [long(0)]*6
conns_DB[dst_ip ][dst_is_exempt +2] = long(size)
(prev_stime, prev_proto, prev_src_ip, prev_src_port) = (stime, proto, src_ip, src_port)
(prev_dst_ip, prev_dst_port, prev_size) = (dst_ip, dst_port, size)
(prev_is_exempt, prev_is_symmetric) = (is_exempt, is_symmetric)
return (nextline, int(last_time), got_nothing)
def readSlices(inFile, db, exempts, slice0) :
"""Loop on time slices, and parse the file step by step"""
prevLine=""; last_time=0
slice= slice0
while 1 : # loop on time_slice
end_time = (slice+1) * timeStep
u=db[slice]
(prevLine, lTime, got_nothing) = parseInputUntil(inFile, prevLine, db [slice], exempts, end_time)
if got_nothing :
break
if lTime != 0 : last_time = lTime
slice = max ( 1+slice, last_time / timeStep)
if not db.has_key(slice) :
db[slice]={}
return (last_time)
def readFile(file_info, db, exempts ) :
""" reads -completely, partially, or not at all- a list of rotated files.
1/ find the file in the list that is the first that contains new data
2/ seek the position where we stopped and read the file, and the newer ones.
file_info fields used here :
['fnames'] : list of the rotated-files for one log, e.g. ['net-acct.log', 'net-acct.log.0']
must be in anti-chronological order (else the script aborts)
['prev_pos'] : offset-position pointig where we stopped reading at previous call
(because of log-rotates, we have to guess for which file this offset is..)
['last_time'] : timestamp of the last read entry of this log
used to guess which file was opened previously, and which are new.
"""
if debug :
print "VeryBeg: lasttime = %d" % file_info.get('last_time', 777)
file_info.setdefault('last_time',0)
# 1.
# Where did we stop, on the previous execution ?
# in the list of files, find which need update => [0, end_of_new[ :
times = [0]*len(file_info['fnames'])
min_time=0
i=-1
for name in file_info['fnames'] :
i += 1
try :
inFile=open(name,"rb")
s = inFile.readline()
inFile.close()
except IOError :
continue
if not s :
continue
t = int( string.split(s, '\t')[0] )
assert t > 1
if min_time != 0 : assert t <= min_time
min_time = t
times[i] = t
end_of_new=0
if file_info['last_time']==0 : # first time we read those files
file_info['last_time'] = min_time-1
end_of_new = len(times)
else : # we have archives about those files, see which files are new/updated
for t in times :
end_of_new += 1
if t <= file_info['last_time'] :
break # the ones before are old ones. this is last updated one.
FileNames=file_info['fnames'][0:end_of_new]
if debug :
print "first data at %s(%d), fileTimes= %s" % \
(time.asctime(time.localtime(file_info['last_time'])),
file_info['last_time'], times)
print "We need to read/update %s" % (FileNames)
if file_info['last_time'] < min_time :
file_info['prev_pos'] = 0
if file_info.get('reset_if_new', 0) :
# erase counters, they are no longer wanted.
for k in db.keys() :
del db[k]
slice0= file_info['last_time'] / timeStep
# 2.
# everything's ready, loop on files, and parse them.
FileNames.reverse() # start with the oldest
times=times[0:end_of_new]
Files_and_pos= zip( FileNames, [file_info['prev_pos']] + [0]*(end_of_new-1) )
last_time=0; last_pos =0
i=len(FileNames)
for (fname, pos) in Files_and_pos :
i -= 1
if debug :
print " read %s => Seek to pos %d" % (fname, pos )
try: inFile = open(fname, "rb")
except IOError:
continue
inFile.seek(pos)
db.setdefault(slice0, {} )
last_time = readSlices(inFile, db, exempts, slice0)
if last_time != 0 : # we advanced in this file.
slice0= last_time / timeStep
elif i>= 1 : # guess an adequate slice0 to start with for next file :
slice0= times[i-1]/timeStep
last_pos = inFile.tell()
assert last_pos >= pos
inFile.close()
# 3.
# Update file_info variables :
if 1:
if last_time != 0 :
assert file_info.get('last_time', 0) <= last_time
file_info['last_time'] = last_time
if last_pos > 0 :
file_info['prev_pos'] = last_pos
if debug and file_info.has_key('last_time') :
print "VeryLast: lasttime = %d" % file_info['last_time']
def loadPersist() :
data = {}
try:
data = cPickle.load( open(pickleName, "rb") )
except IOError:
print "[can not load persistent data. Will need to read all the log-file.]"
return data
def updateData() :
""" structure of data :
data['counts'] : the actual counters, split in separate databases :
['dump'] : for bytes read in the dump
['log'] : for bytes read in the log
each is a 'conns_DB', that holds one database per timeslice :
[<slice>] [IP] : 6-uple (see parseInputUntil)
data['files']
['ledump'] : is the file_info for the dump files.
['lelog' ] : is the file_info for the regular log files
each 'file_info' DB has following keys :
'fnames', 'prev_pos', 'last_time' (see readFile)
'dbName' (the corresponding key into data['counts'])
'reset_if_new' (optionnal, designed for the dump file)
"""
data = loadPersist()
try:
exempts = cPickle.load( open(pickleExemptsName, "rb") )
except IOError:
print "[can not load exmpts data. assuming no exempt]"
exempts = { 'dst' : [], 'src_dst' : {} }
# initialise each database if needed :
for k in ['files', 'counts' ]:
data.setdefault(k, {} )
Files=data['files']
Files.setdefault('ledump', { 'dbName':'dump', 'fnames': dumpFNames, 'reset_if_new':1 })
Files.setdefault('lelog', {'dbName':'log', 'fnames': logFNames } )
# overwrite the filenames stored in pickle with those from the command-line.
Files['ledump'] ['fnames'] = dumpFNames
Files['lelog'] ['fnames'] = logFNames
for k in Files.keys():
data['counts'].setdefault(Files[k]['dbName'], {} )
for key in data['files'].keys() :
file_info = data['files'][key]
if debug:
print "file_info : %s " % file_info
print "Parsing %s into data['counts'][ %s ]" % ( file_info['fnames'], file_info['dbName'])
readFile( file_info, data['counts'] [file_info['dbName'] ], exempts )
return data
def printCounters(counts, mkHeaders=0) :
unit = 1e3
if megas : unit = 1e6
if mkHeaders :
return "%9s|%9s|%9s | %10s|%9s|%9s" % ('Pure UL ', 'Upload ', 'Download',
'Exempt PUL', 'Exempt U', 'Exempt D' )
s="%9.3f|%9.3f|%9.3f | %9.3f|%9.3f|%9.3f" % (counts[0]/(unit), counts[1]/(unit), counts[2]/(unit),
counts[3]/unit, counts[4]/unit, counts[5]/unit)
return s
def bilan(DB, dbNames, duree, cur_time, disp_all = 0) :
slice0=int( (cur_time-duree) / timeStep )
by_host={}
Nslices = {}
for db_key in dbNames :
Nslices[db_key] = 0
for slice in DB[db_key].keys() :
if slice >= slice0 :
Nslices[db_key] += 1
for host in DB[db_key][slice].keys() :
if disp_all or regLAN.search(host):
counts=DB[db_key][slice][host]
cur = by_host.setdefault(host, [long(0)] *len(counts) + [host] )
for i in range(len(counts)):
cur[i] += counts[i]
liste=by_host.values()
liste.sort( lambda x, y: -cmp(x[sort_column], y[sort_column] )) # tri décroissant sur le N° champ
print " %5.1f h stats since %s. %d hour-slices found " % (duree/3600.0,
time.asctime(time.localtime(slice0*timeStep)),
max(Nslices.values()) )
print printCounters(0, 1) + " | HOST"
print "="*77
for l in liste[0:top10_length] :
# Test si le DNS de la machine existe (donc si la machine est inscrite au crans)
try:
host = socket.gethostbyaddr( l[-1] ) [0]
bad = 0
except socket.error :
host = l[-1]
bad = 1
if not resolve_names :
# On veut l'IP
host = l[-1]
if bad :
host = "NoDNS_" + host
print printCounters(l)+ (" |%s" % host)
def detail_bilan(DB, hostName, IP, duree, cur_time) :
slice0 = int( (cur_time-duree) / timeStep )
slice1 = slice0 + int( duree/timeStep)
slicePrints={}
Nslices = {}
db_key = 'log'
Nslices[db_key] = 0
for slice in range(slice0, slice1+1) :
pref = time.strftime("%Hh%M", time.localtime(slice*timeStep) )
str = " (No record of this time-slice at all)"
if slice in DB[db_key].keys() :
str = " (No activity for this host in this time-slice)"
Nslices[db_key] += 1
if IP in DB[db_key][slice].keys() :
str = printCounters( DB[db_key][slice][IP])
slicePrints[slice] = "%s|%s" %(pref,str)
print "Comptes par tranches de %ds pour la machine %s" % (timeStep, hostName)
print "début : %s" % (time.asctime(time.localtime( slice0 * timeStep) ) )
print ("%5s|" % 'time') + printCounters(0,1)
print "="*77
for slice in range(slice0, slice1+1) :
l=slicePrints[slice]
print l
print "Fin : %s" % (time.asctime(time.localtime( -1 + (slice1+1) * timeStep) ) )
def main(cur_time) :
data=updateData()
bilan(data['counts'], ['log', 'dump'], duree, cur_time, disp_all)
# make persistent data as small as possible :
del data['counts'][ data['files']['ledump']['dbName'] ]
del data['files']['ledump']
cur_t = time.time()
del_slices=[]
# -> get rid of old slices
for slice in data['counts']['log'].keys() :
if slice < (cur_t-sduree)/timeStep :
del_slices.append(slice)
for slice in del_slices :
del data['counts']['log'][slice]
# get rid of useless extern hosts :
for slice in data['counts']['log'].keys() :
d=data['counts']['log'][slice]
del_hosts=[]
for host in d.keys() :
m= store_all or regLAN.search(host)
# keep extern hosts that were used as big upload targets : download >= 1 Mo
if not m and d[host][2]< 1e6 :
del_hosts.append( host)
for host in del_hosts :
del d[host]
cPickle.dump(data, open(pickleName,"wb") )
#################
# global vars :
#
timeStep=3600 # 1h slices
#################
optlist, args = getopt.getopt(sys.argv[1:], "dkDsnc:p:f:h::u:L:N:T:t:")
lock_name = "/var/lock/nacct.py" # Fichier de lock
store_all = 0 # if false, store only hosts matching regLAN
disp_all = 0 # if false, display only .. ..
sduree = 48*3600 # delete slices when they are that much old
duree = 0 # display the stats over this period
top10_length = 30
sort_column= 0 # 0 : sort by PURE, 1 : by upload, 2: by download.. up to 6 (cf parseInputUntil)
resolve_names = 1 # resolve hostnames
logFNames= []
dumpFNames=[]
debug=0
megas=1
detail_host=""
cur_time=time.time()
network=""
pickleName=""
pickleExemptsName = ""
if os.path.isfile(lock_name) :
# Ya le lock
print "Lock existant (%s)" % lock_name
fd = open(lock_name, "r")
msg=fd.readlines()
fd.close()
pid=string.split(msg[0],'\n')[0]
msg=string.strip(string.join(msg[1:], '') )
q = os.system("ps -o pid,tty,user,etime,command " +pid)
if q==256:
print "PID lock no trouvé => delock"
try :
os.remove(lock_name)
except :
None
else :
print "Script lockant en activité, sortie"
sys.exit(255)
#Locking
lock_fd=open(lock_name, "w")
lock_comment = "%s" % os.getpid()
lock_fd.write(lock_comment)
lock_fd.close()
for [key, val] in optlist :
if key == '-f' :
logFNames.append(val)
if key == '-u' :
dumpFNames.append(val)
if key == '-d' :
debug = 1
if key == '-D' :
disp_all = 1
if key == '-L' :
network = val
if key == '-h' :
detail_host = val
if key == '-t' :
cur_time = int( time.mktime(time.strptime(val,"%Y %m %d %H:%M")) )
if key == '-N' :
top10_length = int(val)
if key == '-k' :
megas = 0 # use kilos instead of Megs
if key == '-p' :
pickleName=val
if key == '-s' :
store_all = 1
if key == '-n' :
resolve_names = 0
if key == '-T' :
if duree == 0 :
duree = int( float(val) * 3600 )
else:
sduree = int( float(val) * 3600 * 24 )
if key == '-c' :
sort_column = int(val) -1
if duree == 0:
duree = 24*3600
if not logFNames :
logFNames = ["/var/log/net-acct/net-acct.log", "/var/log/net-acct/net-acct.log.0" ]
if not dumpFNames :
dumpFNames = ["/var/log/net-acct/dump" ]
if not network :
network = "^138\.231\.1((3[6-9]|4[0-9]|50|51).*)$"
regLAN=re.compile(network)
if not pickleName :
pickleName="/tmp/nacct.pickle"
if not pickleExemptsName :
pickleExemptsName="/tmp/exempts.pickle"
# launch :
if detail_host :
data=loadPersist()
IP = socket.gethostbyname( detail_host)
detail_bilan(data['counts'], detail_host, IP, duree, cur_time)
else :
data = main(cur_time)
# Supression du lock
try :
os.remove(lock_name)
except :
None

View file

@ -1,29 +0,0 @@
#!/usr/bin/awk -f
#Lecture des logs du firewall pour retourner la liste des machines attaquant
#sur le port 135 ou 6667
#
#Arguments :
#<fichier(s) à scanner> <autre fichier donc le nom contient blacklist>
# Le second fichier contient les IP des machines supplémentaires à sortir.
#
# Format de sortie : Mois jour hh:mm:ss hostname nb_attques
#
# 02/2003 Frédéric Pauget
{ if (FILENAME~"blacklist") {
if ($0=="") nextfile;
tentatives[$0]=0;
dern_tentative[$0]="Vieux 00 00:00:00"; }
}
/.*Virus:IN=eth0.*/{
gsub("SRC=","",$9);
tentatives[$9]++;
dern_tentative[$9]=$1" "$2" "$3;
}
END{
for (machine in tentatives){
system("echo "dern_tentative[machine]" $(host "machine" 2>/dev/null | awk '/^Name/ {print $2}') "tentatives[machine])
}
}

View file

@ -1,240 +0,0 @@
#!/bin/sh
#############################################################################
## Script de déconexion/reconnexion automatique pour virus de type blaster ##
## ##
## Principe : ##
## -> détection des attaques grâce aux logs du firewall ##
## lecture par *_scan.awk ##
## -> à partir du nombre d'attaques et de l'heure de dernière attaque ##
## déconnecte ou reconnecte des machines ##
## ##
## Frédéric Pauget 02/2003 ##
## 07/2004 adaptation pour scan de plusieurs types d'attaques ##
#############################################################################
is_up() {
# Supression de la machine dans la table arp
/usr/sbin/arp -d $1 2> /dev/null
# Teste si la machine founie est up
if fping -c1 $1 > /dev/null 2>&1 ; then
# Elle a répondu au ping
return 0
fi
if /usr/sbin/arp $1 2>/dev/null | egrep -q '(no entry|incomplete)' ; then
# Elle n'est pas dans la table ARP
return 1
else
# Elle est dans la table ARP
return 0
fi
}
if [[ $1 ]] && [[ $1 = "--real-run" ]]; then
dry=false
else
dry=true
fi
BLACKLIST_FINAL='/tmp/virus_blacklist'
# Logs du firewall
FW_LOGS=/var/log/firewall/filtre.log
# Prétraitement logs
tail -7200 $FW_LOGS > /tmp/fw_logs_light
# Fonction utile : retourne l'IP d'une machine
ip() {
echo $(host $1 2>/dev/null | awk '{print $3}')
}
# Fonction principale
scan() {
# signification des arguments :
# 1 : nombre d'attaques pour être considéré infecté
# 2 : nombre de secondes sans attaques pour être considéré sain
# 3 : script de scan
# 4 : repertoire de stockage fichiers
nb_att=$1
nb_sec=$2
SCRIPT=$3
# Liste des attaques
INFECTES=$4/infectes
# Machines décontaminées
RECO=$4/reconectes
# Machines blacklistées
BLACKLIST=$4/blacklist
# Fichiers temporaires supplémentaires
DIFF=/tmp/virus_diff
TMPFILE=/tmp/virus_scan
# Doit exister, même vides
touch $RECO
touch $BLACKLIST
if ! [[ -e $INFECTES ]]; then
dry=true
echo "dry-run mode forcé (fichier absent)"
touch $INFECTES
fi
# Test préliminaire
if [[ ! -e $SCRIPT ]] ; then
echo "Erreur : $SCRIPT non trouvé"
exit 255
fi
# Conversion blacklist hostname -> IPs
if [[ "$(head -1 $BLACKLIST)" == "komaz" ]]; then
echo "Ancienne blackliste vide"
touch $BLACKLIST.ip
else
echo "Conversion blackliste..."
for i in $(cat $BLACKLIST | sort | uniq)
do
ip $i
done > $BLACKLIST.ip
fi
echo "Détection des infectés..."
$SCRIPT $BLACKLIST.ip /tmp/fw_logs_light > $TMPFILE
# sort un fichier du type :
# Mois Jour Heure hostname nb d'attaques depuis les dernier logrotate
echo "Traitement..."
mv $INFECTES $INFECTES.old
sort -r $TMPFILE > $INFECTES
echo -n "" > $TMPFILE
# Différencee entre le fichier obtenu la au dernier lancement et le nouveau
diff -U 1000 $INFECTES.old $INFECTES | egrep -v '\-\-\-|\+\+\+|@@' > $DIFF
if ! [[ -s $DIFF ]]; then
echo "Aucun changement par rapport au dernier scan."
cp $INFECTES $DIFF
fi
# Traitement par host
for host in $(awk '{print $4}' $DIFF | sort | uniq)
do
if grep -q "\+.* $host " $DIFF && grep -q "\-.* $host " $DIFF ; then
# En + et - : variation
nb=$(echo $(awk '$4=="'$host'" {print $5}' $INFECTES) - $(awk '$4=="'$host'" {print $5}' $INFECTES.old) | bc)
echo -ne "Variation ($nb) "
if grep -q "^$host$" $BLACKLIST ; then
# Déja blacklisté, on remet
echo -ne "\033[1;31m(RESTE) "
echo $host >> $TMPFILE
elif [[ $nb -gt $nb_att ]] ; then
# Nouveau
echo -ne "\033[1;31m(NOUVEAU) "
echo $host >> $TMPFILE
else
# Pas assez de tentatives
echo -n "(PASSE) "
fi
elif grep -q "\+.* $host " $DIFF ; then
# Que en + donc c'est un nouveau
nb=$(awk '$4=="'$host'" {print $5}' $INFECTES)
if [[ $nb -gt $nb_att ]] ; then
echo -ne "\033[1;31mNOUVEAU ($nb) "
echo $host >> $TMPFILE
else
echo -ne "PASSE ($nb) "
fi
elif grep -q "\-.* $host " $DIFF ; then
# Que en -, c'est un coup de logrotate, on remet les blacklistés.
if grep -q "^$host$" $BLACKLIST ; then
echo "RESTE : $host"
echo $host >> $TMPFILE
else
echo "Vieux : $host"
fi
else
# Pas de variation
if grep -q "^$host$" $BLACKLIST ; then
echo -n "Pas de variation "
# UP or not ?
if is_up $host ; then
# UP
last=$(date -d "$(awk '$4=="'$host'" {print $1" "$2" "$3}' $INFECTES)" +%s 2>/dev/null)
# Cas ou c'est vraiment trop vieux
if [[ -z $last ]] ; then
last=0
fi
now=$(date +%s)
t=$(echo "$now-$last" | bc)
if [[ $t -gt $nb_sec ]] ; then
# Reconexions automatique
echo -n " reconnexion"
echo $host >> $RECO
else
echo $host >> $TMPFILE
fi
else
# Down
echo -ne "\033[1;41m(NO_PING)"
echo $host >> $TMPFILE
fi
echo -ne "\033[0m : "
else
echo -n "Reste connecté "
fi
fi
echo -ne "\033[0m"
awk '$4=="'$host'" {print $1" "$2" "$3" "$4}' $INFECTES
done
# Opérations finales
sort $TMPFILE > $BLACKLIST
}
#######################################################################
# Scan des attaques sur le 135 :
# 10 tentatives pour être considéré infecté
# 1h sans attaque pour être considéré désinfecté
echo -e "\033[1;33m###############################\nScan attaques port 135 ou 6667\033[1;0m"
scan 10 3600 /usr/scripts/analyse_komaz/rpc_scan.awk /var/tmp/rpc
# Scan des floods :
# 100 tentatives pour être considéré infecté
# 1h sans attaque pour être considéré désinfecté
echo -e "\033[1;33m###############################\nScan floods\033[1;0m"
scan 100 3600 /usr/scripts/analyse_komaz/flood_scan.awk /var/tmp/flood
# Constitution de la blackliste finale
cat /var/tmp/rpc/blacklist /var/tmp/flood/blacklist | sort | uniq > $BLACKLIST_FINAL.new
if ! [[ -s $BLACKLIST_FINAL.new ]]; then
# Il n'y a personne, il faut au moins une machine sinon squid aime pas.
echo 'komaz' > $BLACKLIST_FINAL.new
fi
if ! $dry ; then
if diff -q $BLACKLIST_FINAL $BLACKLIST_FINAL.new ; then
echo "Pas de changement dans la blackliste"
else
# Synchro blacklist
/usr/bin/rsync -C -a -e "ssh -i /usr/scripts/analyse_komaz/keys/synchro_virus" $BLACKLIST_FINAL.new root@sila.crans.org:/etc/squid/blacklist_infectes
# Reload de squid
/usr/bin/ssh -o StrictHostKeyChecking=no -i /usr/scripts/analyse_komaz/keys/reload_squid root@sila.crans.org squid reload
fi
else
echo "Dry mode : blackliste non copiée sur sila et squid non relancé"
echo "Utiliser l'option --real-run pour tout faire."
fi
# On ne garde que la dernière version de la blacklist
mv $BLACKLIST_FINAL.new $BLACKLIST_FINAL