add indexed files

This commit is contained in:
bdvllrs 2018-02-06 14:31:01 +01:00
parent cbbb8148b5
commit bf1fa8e3dd
7 changed files with 250 additions and 190 deletions

1
.gitignore vendored
View file

@ -3,3 +3,4 @@ __pycache__/*
*.sublime-project
*.sublime-workspace
config.conf
.idea/*

View file

@ -1,4 +1,4 @@
#coding:utf-8
# coding:utf-8
from configobj import ConfigObj
import shutil
import posixpath
@ -12,6 +12,7 @@ class Config:
"""
Gère le fichier de configuration
"""
def __init__(self):
try:
@ -46,9 +47,8 @@ class Config:
raise ConfigError("no 'api' section in config file")
for k in ['app', 'token', 'extensions', 'server']:
if not k in self.obj['api']:
raise ConfigError("no '"+k+"' value in config file, subsection 'api'")
raise ConfigError("no '" + k + "' value in config file, subsection 'api'")
def is_valid_file(self, name):
_, ext = posixpath.splitext(name)
return ext[1:] in self.obj['api']['extensions']

21
file.py
View file

@ -1,11 +1,13 @@
#coding:utf-8
# coding:utf-8
import posixpath
import re
class File:
"""
Décrit une référence de fichier dans le disque
"""
def __init__(self, path, name, info, api_id=None, api_fileid=None, api_fileable_type=None):
self.path = path
self.name = name
@ -13,7 +15,7 @@ class File:
self.markers = {}
self.api_id = api_id
self.api_fileid = api_fileid
self.fileable_type = {None: None, 'App\\Film':'film', 'App\\Episode':'episode'}[api_fileable_type]
self.fileable_type = {None: None, 'App\\Film': 'film', 'App\\Episode': 'episode'}[api_fileable_type]
def get_ext(self):
"""
@ -46,7 +48,7 @@ class File:
fname = fname.replace('(', ' ( ')
fname = fname.replace(')', ' ) ')
# 2) marqueurs
fname, info = tok.tokenize(' '+fname+' ')
fname, info = tok.tokenize(' ' + fname + ' ')
self.markers = info
# 3) minuscule
fname = fname.lower()
@ -57,10 +59,10 @@ class File:
self.info['YEAR'] = m.group(1)
if 'YEAR' in self.info:
year = self.info['YEAR']
fname = fname.replace('['+str(year)+']', '')
fname = fname.replace('[ '+str(year)+' ]', '')
fname = fname.replace('['+str(year)+' ]', '')
fname = fname.replace('[ '+str(year)+']', '')
fname = fname.replace('[' + str(year) + ']', '')
fname = fname.replace('[ ' + str(year) + ' ]', '')
fname = fname.replace('[' + str(year) + ' ]', '')
fname = fname.replace('[ ' + str(year) + ']', '')
# 5) espaces en bout, centraux et rajoutés
fname = fname.lstrip().rstrip()
while ' ' in fname:
@ -92,9 +94,8 @@ class File:
def __str__(self):
if 'YEAR' in self.info:
return str(self.path+'/'+self.name+' year:['+str(self.info['YEAR'])+']')
return str(self.path+'/'+self.name)
return str(self.path + '/' + self.name + ' year:[' + str(self.info['YEAR']) + ']')
return str(self.path + '/' + self.name)
def __repr__(self):
return str(self)

View file

@ -2,37 +2,39 @@ import re
PATTERN_BASIC = re.compile(r'#(.+?)#')
def match_rules(filename, rules):
for r in rules:
match = r.apply(filename)
if match:
return r, match
return None
for r in rules:
match = r.apply(filename)
if match:
return r, match
return None
class FileRule:
def __init__(self, rule, conf):
# trouve les marqueurs dans la règle
self.patterns = PATTERN_BASIC.findall(rule)
rg = PATTERN_BASIC.sub(lambda m:self.regFor(match=m, conf=conf), rule)
self.rule = re.compile(rg) # on garde la version compilée (donc optimisée)
def __init__(self, rule, conf):
# trouve les marqueurs dans la règle
self.patterns = PATTERN_BASIC.findall(rule)
rg = PATTERN_BASIC.sub(lambda m: self.regFor(match=m, conf=conf), rule)
self.rule = re.compile(rg) # on garde la version compilée (donc optimisée)
@staticmethod
def regFor(match, conf):
name = match.group(0)
trunc_name = name[1:-1]
if name == '#EXT#':
return '('+'|'.join(conf.extensions)+')'
elif name in ['#EPISODE_NUM#', '#SEASON_NUM#']:
return '([0-9]+)'
return '([^/]+)'
@staticmethod
def regFor(match, conf):
name = match.group(0)
trunc_name = name[1:-1]
if name == '#EXT#':
return '(' + '|'.join(conf.extensions) + ')'
elif name in ['#EPISODE_NUM#', '#SEASON_NUM#']:
return '([0-9]+)'
return '([^/]+)'
def apply(self, filename):
# applique la règle à un objet file
match = self.rule.match(filename)
if not match:
return None
# trouve les différents marqueurs de règle
patterns = {}
for i in range(len(self.patterns)):
patterns[self.patterns[i]] = match.group(i+1)
return patterns
def apply(self, filename):
# applique la règle à un objet file
match = self.rule.match(filename)
if not match:
return None
# trouve les différents marqueurs de règle
patterns = {}
for i in range(len(self.patterns)):
patterns[self.patterns[i]] = match.group(i + 1)
return patterns

167
main.py
View file

@ -1,4 +1,4 @@
#coding:utf-8
# coding:utf-8
'''
Pierre Cadart
@ -36,13 +36,14 @@ def ftpwalk(directory, ftp):
continue
if prop['type'] == 'dir':
Ldirs.append(name)
to_visit.append(current+'/'+name)
to_visit.append(current + '/' + name)
elif prop['type'] == 'file':
Lfiles.append(name)
# ne construit pas la liste complète,
# mais retourne les résultats intermédiaires
yield (current, Ldirs, Lfiles)
def visit_folder(domain, api, rules, tok):
"""
Visite un dossier sur un serveur, spécifié par <domain>,
@ -55,39 +56,47 @@ def visit_folder(domain, api, rules, tok):
ftp = FTP(domain['server'][6:], user=domain['username'], passwd=domain['password'])
ftp.encoding = 'UTF-8'
# Initialisation des listes de mises à jour
L_missing = [] # fichiers non trouvés sur le serveur FTP
L_unreferenced = [] # fichiers non référencés dans l'API
L_moved = [] # fichiers déplacés sur le serveur FTP
L_missing = [] # fichiers non trouvés sur le serveur FTP
L_unreferenced = [] # fichiers non référencés dans l'API
L_moved = [] # fichiers déplacés sur le serveur FTP
# Lecture des fichiers sur le serveur FTP
Lloc = []
indexed_files = []
for path, _, files in ftpwalk(domain['path'], ftp):
# Ajoute les fichiers correspondants aux extensions
for f in files:
match = filerule.match_rules(path+'/'+f, rules)
indexed_files.append({
"path": domain['server'] + path,
"name": f
})
match = filerule.match_rules(path + '/' + f, rules)
if match:
#print('got match:',match[1], 'name:',path+'/'+f)
# print('got match:',match[1], 'name:',path+'/'+f)
F = file.File(path, f, match[1])
F.extract_title(tok)
Lloc.append(F)
print('indexing all files :', len(indexed_files), ' files')
api.post_index(indexed_files)
ftp.close()
print('total loc for ',domain['server']+domain['path'],':', len(Lloc))
print('total loc for ', domain['server'] + domain['path'], ':', len(Lloc))
# Récupère les fichiers de l'api
Lapi = []
for info in api.get_files(path=domain['server']+domain['path'], like=1, filable=1):
for info in api.get_files(path=domain['server'] + domain['path'], like=1, filable=1):
nfo = {}
if ('filable' not in info) or ('filable_type' not in info):
print('nfo:', info) # le fileable associé a été supprimé
print('nfo:', info) # le fileable associé a été supprimé
else:
year = int(info['filable']['release_date'][:4])
nfo['YEAR'] = year
F = file.File(info['path'][len(domain['server']):], info['name'], nfo, api_id=info['filable_id'], api_fileid=info['id'], api_fileable_type=info['filable_type'])
F = file.File(info['path'][len(domain['server']):], info['name'], nfo, api_id=info['filable_id'],
api_fileid=info['id'], api_fileable_type=info['filable_type'])
F.extract_title(tok)
match = filerule.match_rules(F.path+'/'+F.name, rules)
match = filerule.match_rules(F.path + '/' + F.name, rules)
if match:
F.info = match[1]
F.info['YEAR'] = nfo['YEAR'] # may not work :-(
F.info['YEAR'] = nfo['YEAR'] # may not work :-(
Lapi.append(F)
print('total api for ',domain['server']+domain['path'],':', len(Lapi))
print('total api for ', domain['server'] + domain['path'], ':', len(Lapi))
# traite les films
Lfilm_loc = [f for f in Lloc if f.fileable_type == 'film']
@ -99,7 +108,8 @@ def visit_folder(domain, api, rules, tok):
Lepisode_api = [f for f in Lapi if f.fileable_type == 'episode']
handle_episodes(Lepisode_loc, Lepisode_api, domain, api, rules, tok)
print('visit finished ',domain['server']+domain['path'])
print('visit finished ', domain['server'] + domain['path'])
def handle_films(Lfilm_loc, Lfilm_api, domain, api, rules, tok):
"""
@ -109,84 +119,87 @@ def handle_films(Lfilm_loc, Lfilm_api, domain, api, rules, tok):
Lloc = Lfilm_loc
Lapi = Lfilm_api
#print('loc titles:', '|'.join(sorted([f.title for f in Lloc])))
#print('loc titles:', '|'.join(sorted([f.title for f in Lloc])))
#print('\n'*2)
#print('api titles:', '|'.join(sorted([f.title for f in Lapi])))
# print('loc titles:', '|'.join(sorted([f.title for f in Lloc])))
# print('loc titles:', '|'.join(sorted([f.title for f in Lloc])))
# print('\n'*2)
# print('api titles:', '|'.join(sorted([f.title for f in Lapi])))
# supprime les dossiers de l'api (ils ne devraient pas apparaître)
Lapi, Linvalid = filter_by(Lapi, lambda f:tok.conf.is_valid_file(f.name) and filerule.match_rules(f.path+'/'+f.name, rules))
Lapi, Linvalid = filter_by(Lapi,
lambda f: tok.conf.is_valid_file(f.name) and filerule.match_rules(f.path + '/' + f.name,
rules))
# Compare avec la liste de l'api
Lmissing = [f for f in Lapi if f not in Lloc] # fichiers non présents localement
Lunref = [f for f in Lloc if f not in Lapi] # fichiers non référencés
Lunref = [f for f in Lloc if f not in Lapi] # fichiers non référencés
# Fichiers déplacés (ou copiés) localement
Lunref, _, Lrelink = update_find_by_common(Lunref, Lapi, lambda f:f.name)
Lunref, _, Lrelink = update_find_by_common(Lunref, Lapi, lambda f: f.name)
for floc, fapi in Lrelink:
if fApi in Lmissing:
Lmissing.remove(fApi)
print('moved/copied:', Lrelink)
# Linke les fichiers du même titre (simplifié) au même film
Lunref, _, Llink = update_find_by_common(Lunref, Lapi, lambda f:f.title)
Lunref, _, Llink = update_find_by_common(Lunref, Lapi, lambda f: f.title)
for f, fApi in Llink:
if fApi in Lmissing:
Lmissing.remove(fApi)
print('doubles:', sorted(Llink, key=lambda f:str(f[0])))
print('doubles:', sorted(Llink, key=lambda f: str(f[0])))
# Linke les films par nom si possible
APIfilms = api.get_films()
API_alltitles = []
for f in APIfilms:
if f['title']:
t = f['title'].replace(' ','').lower()
t = f['title'].replace(' ', '').lower()
if len(t) > 2:
if t not in [e[0] for e in API_alltitles]:
API_alltitles.append((t, f['id']))
if f['title_vo']:
t = f['title_vo'].replace(' ','').lower()
t = f['title_vo'].replace(' ', '').lower()
if len(t) > 2:
if t not in [e[0] for e in API_alltitles]:
API_alltitles.append((t, f['id']))
Llink2 = []
for film in Lunref:
for title, fid in API_alltitles:
if title==film.title:
if title == film.title:
Llink2.append((film, fid))
break # pour ne pas référencer deux fois le même fichier
#print(film, ' <-> ', [f for f in APIfilms if f['id']==fid][0]['title'])
break # pour ne pas référencer deux fois le même fichier
# print(film, ' <-> ', [f for f in APIfilms if f['id']==fid][0]['title'])
print('easy ref:', sorted(Llink2, key=lambda f:str(f)))
print('easy ref:', sorted(Llink2, key=lambda f: str(f)))
for f, _ in Llink2:
Lunref.remove(f)
print('invalid:'+'\n'.join(str(f.api_fileid)+' '+str(f) for f in Linvalid))
print('missing (', len(Lmissing), '):','\n'.join([str(e.api_id)+':'+repr(e)+'('+e.title+')' for e in sorted(Lmissing, key=lambda e:e.title)]))
print('unreferenced:'+'\n'.join(str(f) for f in sorted(Lunref, key=lambda e:e.title)))
#print('unreferenced titles:\n', '\n'.join(sorted([f.title for f in Lunref])))
print('invalid:' + '\n'.join(str(f.api_fileid) + ' ' + str(f) for f in Linvalid))
print('missing (', len(Lmissing), '):', '\n'.join(
[str(e.api_id) + ':' + repr(e) + '(' + e.title + ')' for e in sorted(Lmissing, key=lambda e: e.title)]))
print('unreferenced:' + '\n'.join(str(f) for f in sorted(Lunref, key=lambda e: e.title)))
# print('unreferenced titles:\n', '\n'.join(sorted([f.title for f in Lunref])))
return
# Supprime les fichiers invalides (dossiers/ ne répondent à aucune règle)
for i, film in enumerate(Linvalid):
print('['+str(i+1)+'/'+str(len(Linvalid))+']'+'invalid:', film)
print('[' + str(i + 1) + '/' + str(len(Linvalid)) + ']' + 'invalid:', film)
try:
resp = api.delete_file(id=film.api_fileid)
except Exception as e:
print(e)
print('film '+film.title+' not deleted')
print('film ' + film.title + ' not deleted')
raise Exception('end')
time.sleep(1)
# Supprime les fichiers qui n'existent plus
for i, film in enumerate(Lmissing):
print('['+str(i+1)+'/'+str(len(Lmissing))+']'+'missing:', film)
print('[' + str(i + 1) + '/' + str(len(Lmissing)) + ']' + 'missing:', film)
try:
resp = api.delete_file(id=film.api_fileid)
except Exception as e:
print(e)
print('film '+film.title+' not deleted')
print('film ' + film.title + ' not deleted')
raise Exception('end')
time.sleep(1)
@ -194,13 +207,13 @@ def handle_films(Lfilm_loc, Lfilm_api, domain, api, rules, tok):
i = 0
for filmLoc, filmApi in Lrelink:
i += 1
print('['+str(i)+'/'+str(len(Lrelink))+']'+'relink:', filmApi.title)
print('[' + str(i) + '/' + str(len(Lrelink)) + ']' + 'relink:', filmApi.title)
try:
api.put_file(id=filmApi.api_fileid, path=domain['server']+filmLoc.path, name=filmLoc.name)
api.put_file(id=filmApi.api_fileid, path=domain['server'] + filmLoc.path, name=filmLoc.name)
time.sleep(1)
except Exception as e:
print(e)
print('film '+filmApi.title+' not edited')
print('film ' + filmApi.title + ' not edited')
raise Exception('end')
# Poste les ajouts de doubles
@ -208,30 +221,31 @@ def handle_films(Lfilm_loc, Lfilm_api, domain, api, rules, tok):
for film, filmAPI in Llink:
filmID = filmAPI.api_id
i += 1
print('['+str(i)+'/'+str(len(Llink))+']'+'link:', film.title)
print('[' + str(i) + '/' + str(len(Llink)) + ']' + 'link:', film.title)
try:
resp = api.post_file(path=domain['server']+film.path, name=film.name, type='film', type_id=filmID)
resp = api.post_file(path=domain['server'] + film.path, name=film.name, type='film', type_id=filmID)
if 'id' in resp:
post_markers(api, film, resp['id'])
time.sleep(1)
except Exception as e:
print(e)
print('film '+film.title+' not added')
print('film ' + film.title + ' not added')
raise Exception('end')
# Poste les ajouts de doubles plus complexes
i = 0
for film, filmID in Llink2:
i += 1
print('['+str(i)+'/'+str(len(Llink2))+']'+'link2:', film.title)
print('[' + str(i) + '/' + str(len(Llink2)) + ']' + 'link2:', film.title)
try:
resp = api.post_file(path=domain['server']+film.path, name=film.name, type='film', type_id=filmID, **film.additional_info())
resp = api.post_file(path=domain['server'] + film.path, name=film.name, type='film', type_id=filmID,
**film.additional_info())
if 'id' in resp:
post_markers(api, film, resp['id'])
time.sleep(1)
except Exception as e:
print(e)
print('film '+film.title+' not added')
print('film ' + film.title + ' not added')
raise Exception('end')
# Poste tout les films locaux (doit faire une reqête Tmdb, qui peut ne pas marcher)
@ -239,22 +253,22 @@ def handle_films(Lfilm_loc, Lfilm_api, domain, api, rules, tok):
Lcannot_post = []
for film in Lunref:
i += 1
print('['+str(i)+'/'+str(len(Lunref))+']'+'post:', film.title, str(film.info.get('YEAR')))
print('[' + str(i) + '/' + str(len(Lunref)) + ']' + 'post:', film.title, str(film.info.get('YEAR')))
try:
posted = False
if 'YEAR' in film.info: # tente avec l'année spécifié en premier
if 'YEAR' in film.info: # tente avec l'année spécifié en premier
resp = api.post_film(title=film.title, year=film.info['YEAR'])
if "id" in resp: # id du film
if "id" in resp: # id du film
posted = True
if not posted:
resp = api.post_film(title=film.title)
if "id" in resp: # id du film
if "id" in resp: # id du film
posted = True
if posted:
print('post: path=',domain['server']+film.path)
resp = api.post_file(path=domain['server']+film.path, name=film.name, type='film', type_id=resp["id"])
if 'id' in resp: # id du file
print('post: path=', domain['server'] + film.path)
resp = api.post_file(path=domain['server'] + film.path, name=film.name, type='film', type_id=resp["id"])
if 'id' in resp: # id du file
post_markers(api, film, resp['id'])
else:
Lcannot_post.append(film)
@ -263,11 +277,12 @@ def handle_films(Lfilm_loc, Lfilm_api, domain, api, rules, tok):
time.sleep(1)
except Exception as e:
print(e)
print('film '+film.title+' not posted')
print('film ' + film.title + ' not posted')
raise Exception('end')
# TODO: traiter les films non postés (Lcannot_post)
def handle_episodes(Lepisode_loc, Lepisode_api, domain, api, rules, tok):
"""
Utilise les listes des fichiers locaux Lepisode_loc et des fichiers de l'API Lepisode_api
@ -277,11 +292,13 @@ def handle_episodes(Lepisode_loc, Lepisode_api, domain, api, rules, tok):
Lapi = Lepisode_api
# fichiers invalides
Lapi, Linvalid = filter_by(Lapi, lambda f:tok.conf.is_valid_file(f.name) and filerule.match_rules(f.path+'/'+f.name, rules))
Lapi, Linvalid = filter_by(Lapi,
lambda f: tok.conf.is_valid_file(f.name) and filerule.match_rules(f.path + '/' + f.name,
rules))
# Compare avec la liste de l'api
Lmissing = [f for f in Lapi if f not in Lloc] # fichiers non présents localement
Lunref = [f for f in Lloc if f not in Lapi] # fichiers non référencés
Lunref = [f for f in Lloc if f not in Lapi] # fichiers non référencés
# de même avec les noms de séries
Lseries_loc = list(set([f.info['SERIE'] for f in Lloc]))
@ -292,8 +309,8 @@ def handle_episodes(Lepisode_loc, Lepisode_api, domain, api, rules, tok):
# récupère les séries, et les correspondances des noms locaux
APIseries = api.get_series(episodes=1)
APIepisodes = api.get_episodes(files=1)
APIepisodes_byid = {e['id']:e for e in APIepisodes}
APIfiles_byid = {f.api_id:f for f in Lapi}
APIepisodes_byid = {e['id']: e for e in APIepisodes}
APIfiles_byid = {f.api_id: f for f in Lapi}
series_id_bytitle = {}
series_id_bysimpletitle = {}
for s in APIseries:
@ -314,30 +331,30 @@ def handle_episodes(Lepisode_loc, Lepisode_api, domain, api, rules, tok):
# Supprime les fichiers invalides (dossiers/ ne répondent à aucune règle)
for i, episode in enumerate(Linvalid):
print('['+str(i+1)+'/'+str(len(Linvalid))+']'+'invalid:', episode)
print('[' + str(i + 1) + '/' + str(len(Linvalid)) + ']' + 'invalid:', episode)
try:
resp = api.delete_file(id=episode.api_fileid)
except Exception as e:
print(e)
print('episode '+episode.title+' not deleted')
print('episode ' + episode.title + ' not deleted')
raise Exception('end')
time.sleep(1)
# Supprime les fichiers qui n'existent plus
for i, episode in enumerate(Lmissing):
print('['+str(i+1)+'/'+str(len(Lmissing))+']'+'missing:', episode)
print('[' + str(i + 1) + '/' + str(len(Lmissing)) + ']' + 'missing:', episode)
try:
resp = api.delete_file(id=episode.api_fileid)
except Exception as e:
print(e)
print('episode '+episode.title+' not deleted')
print('episode ' + episode.title + ' not deleted')
raise Exception('end')
time.sleep(1)
# Poste les séries non présentes, récupère les références correspondantes
series_not_found = []
for i, serie in enumerate(Lunref_serie):
print('['+str(i+1)+'/'+str(len(Lunref_serie))+']'+'unref_serie:', serie)
print('[' + str(i + 1) + '/' + str(len(Lunref_serie)) + ']' + 'unref_serie:', serie)
try:
resp = api.post_serie(title=serie)
if 'id' in resp:
@ -348,7 +365,7 @@ def handle_episodes(Lepisode_loc, Lepisode_api, domain, api, rules, tok):
print('not found: ', resp)
except Exception as e:
print(e)
print('serie '+serie+' not posted')
print('serie ' + serie + ' not posted')
raise Exception('end')
time.sleep(1)
@ -357,25 +374,27 @@ def handle_episodes(Lepisode_loc, Lepisode_api, domain, api, rules, tok):
# Poste les episodes locaux
for i, episode in enumerate(Lunref):
print('['+str(i+1)+'/'+str(len(Lunref))+']'+'post:', episode)
print('[' + str(i + 1) + '/' + str(len(Lunref)) + ']' + 'post:', episode)
try:
serie_id = series_id_bytitle[episode.info['SERIE']]
episode_num = int(episode.info['EPISODE_NUM'])
season_num = int(episode.info['SEASON_NUM'])
resp = api.post_episode(serie_id=serie_id, episode=episode_num, season=season_num)
if 'id' in resp:
resp = api.post_file(path=domain['server']+episode.path, name=episode.name, type='episode', type_id=resp["id"])
print('response: ',resp)
resp = api.post_file(path=domain['server'] + episode.path, name=episode.name, type='episode',
type_id=resp["id"])
print('response: ', resp)
else:
print('episode not posted:', resp)
except Exception as e:
print(e)
print('episode '+episode.title+' not posted')
#raise Exception('end')
print('episode ' + episode.title + ' not posted')
# raise Exception('end')
time.sleep(1)
# TODO: traiter les séries non référencées (series_not_found)
def filter_by(L, f_prop):
"""
Sépare la liste L en deux listes,
@ -389,6 +408,7 @@ def filter_by(L, f_prop):
prop_false.append(e)
return prop_true, prop_false
def update_find_by_common(L1, L2, f_prop):
"""
Effectue une recherche de propriétés commune entre les éléments
@ -402,6 +422,7 @@ def update_find_by_common(L1, L2, f_prop):
L2 = [e for e in L2 if e not in found2]
return L1, L2, found
def find_by_common(L1, L2, f_prop):
"""
Associe les éléments de <L1> et <L2> à travers leur propriétés
@ -435,6 +456,7 @@ def post_markers(api, file_, fileid):
api.post_file_subtitle(fileid, value=sub)
time.sleep(1)
def main():
"""
Fonction principale du programme, réalise toute les opérations
@ -449,9 +471,10 @@ def main():
rules = api.get_paths()
for fold in folders[1:]:
applicable = [filerule.FileRule(re.escape(fold['path'])+'\\/'+r['regex'], conf) for r in rules if int(r['indexer_folder_id']) == fold['id']]
applicable = [filerule.FileRule(re.escape(fold['path']) + '\\/' + r['regex'], conf) for r in rules if
int(r['indexer_folder_id']) == fold['id']]
visit_folder(fold, api, applicable, tokens)
if __name__ == '__main__':
main()

View file

@ -1,28 +1,34 @@
#coding:utf-8
# coding:utf-8
import requests
from requests.auth import HTTPBasicAuth
class PiexelErrors(Exception):
pass
class InvalidToken(PiexelErrors):
pass
class ParameterError(PiexelErrors):
pass
class InvalidResponse(PiexelErrors):
pass
class Piexel:
def __init__(self, domain, app='', token='', endpoint='/api/'):
#self.app = app
#self.token = token
# self.app = app
# self.token = token
self.auth = HTTPBasicAuth(app, token)
self.domain = domain
self.endpoint = endpoint
def _get_response(self, controller, fields, request_type='get'):
def _get_response(self, controller, fields, request_type='get', data=None):
"""
Build response
:param controller: controller à utiliser
@ -32,9 +38,15 @@ class Piexel:
if request_type == 'get':
response = requests.get(url, auth=self.auth, params=fields)
elif request_type == 'post':
response = requests.post(url, auth=self.auth, params=fields)
if data is not None:
response = requests.post(url, auth=self.auth, params=fields, json=data)
else:
response = requests.post(url, auth=self.auth, params=fields)
elif request_type == 'put':
response = requests.put(url, auth=self.auth, params=fields)
if data is not None:
response = requests.put(url, auth=self.auth, params=fields, json=data)
else:
response = requests.put(url, auth=self.auth, params=fields)
elif request_type == 'delete':
response = requests.delete(url, auth=self.auth, data=fields)
response.encoding = 'utf-8'
@ -68,7 +80,8 @@ class Piexel:
Récupère les films
:param params: paramètres à passer
"""
fields = self._get_request(['id', 'title', 'title_vo', 'imdb_id', 'limit', 'first', 'first', 'files'], [], **params)
fields = self._get_request(['id', 'title', 'title_vo', 'imdb_id', 'limit', 'first', 'first', 'files'], [],
**params)
return self._get_response('films', fields)
def get_series(self, **params):
@ -84,7 +97,8 @@ class Piexel:
Récupère les épisodes
:param params: paramètres à passer
"""
fields = self._get_request(['id', 'serie_id', 'title', 'imdb_id', 'limit', 'first', 'first', 'episodes'], [], **params)
fields = self._get_request(['id', 'serie_id', 'title', 'imdb_id', 'limit', 'first', 'first', 'episodes'], [],
**params)
return self._get_response('episodes', fields)
def get_tokens(self, **params):
@ -157,7 +171,7 @@ class Piexel:
:param params: paramètres à passer
"""
fields = self._get_request(['value'], ['value'], **params)
return self._get_response('languages/'+str(file)+'/attach', fields, 'post')
return self._get_response('languages/' + str(file) + '/attach', fields, 'post')
def post_file_subtitle(self, file, **params):
"""
@ -165,7 +179,7 @@ class Piexel:
:param params: paramètres à passer
"""
fields = self._get_request(['value'], ['value'], **params)
return self._get_response('subtitle-languages/'+str(file)+'/attach', fields, 'post')
return self._get_response('subtitle-languages/' + str(file) + '/attach', fields, 'post')
def post_file_qualities(self, file, **params):
"""
@ -173,14 +187,15 @@ class Piexel:
:param params: paramètres à passer
"""
fields = self._get_request(['value'], ['value'], **params)
return self._get_response('qualities/'+str(file)+'/attach', fields, 'post')
return self._get_response('qualities/' + str(file) + '/attach', fields, 'post')
def post_subtitle(self, **params):
"""
Ajoute un sous-titre
:param params: paramètres à passer
"""
fields = self._get_request(['path', 'name', 'type', 'type_id', 'quality', 'lang'], ['path', 'name', 'type', 'type_id'], **params)
fields = self._get_request(['path', 'name', 'type', 'type_id', 'quality', 'lang'],
['path', 'name', 'type', 'type_id'], **params)
return self._get_response('subtitles', fields, 'post')
def post_serie(self, **params):
@ -204,8 +219,9 @@ class Piexel:
Edite un fichier
:param params: paramètres à passer
"""
fields = self._get_request(['path', 'name', 'filable_type', 'filable_id', 'quality', 'lang', 'subtitles'], [], **params)
return self._get_response('file/'+str(id), fields, 'put')
fields = self._get_request(['path', 'name', 'filable_type', 'filable_id', 'quality', 'lang', 'subtitles'], [],
**params)
return self._get_response('file/' + str(id), fields, 'put')
def delete_file(self, id, **params):
"""
@ -213,5 +229,26 @@ class Piexel:
:param params: paramètres à passer
"""
fields = self._get_request([], [], **params)
return self._get_response('file/'+str(id), fields, 'delete')
return self._get_response('file/' + str(id), fields, 'delete')
def get_index(self, **params):
"""
Récupère les fichiers indexés
:param params: paramètres à passer
"""
fields = self._get_request(['id', 'path', 'name', 'limit', 'first', 'is_referenced', 'like'], [], **params)
return self._get_response('index', fields)
def post_index(self, params):
"""
Ajoute un fichier
:param params: paramètres à passer
"""
return self._get_response('index', {}, 'post', params)
def delete_index(self, id):
"""
Supprime un fichier indéxé
:param id: id du fichier à supprimer
"""
return self._get_response('index/' + str(id), {}, 'delete')

View file

@ -1,53 +1,49 @@
#coding:utf-8
# coding:utf-8
import piexel
import re
class Tokenizer:
def __init__(self, conf, api):
self.conf = conf
self.reload_tokens(api)
def reload_tokens(self, api):
"""
Charge les tokens depuis l'API,
et initialise la liste des étapes
"""
self.tk = api.get_tokens()
self.steps = list(set(t['step'] for t in self.tk))
self.steps.sort()
def get_tokens_step(self, step):
"""
Retourne tout les tokens de l'étape <step>
"""
return [t for t in self.tk if t['step'] == step]
def tokenize(self, filename):
"""
Analyse <filename> pour trouver tous ses marqueurs.
Les marqueurs sont enlevés, et rangés dans des listes.
retourne le nom privé des marqueurs, ainsi que le dictionnaire des marqueurs
"""
found = {'lang':[], 'quality':[], 'subtitle':[]}
for step in self.steps:
for tok in self.get_tokens_step(step):
if(not bool(int(tok['case_sensitive']))):
reg = re.compile(r' '+tok['token']+r' ', re.IGNORECASE)
else:
reg = re.compile(r' '+tok['token']+r' ')
if reg.search(filename):
for tok_lang in tok['languages']:
found['lang'].append(tok_lang['value'])
for tok_qual in tok['qualities']:
found['quality'].append(tok_qual['value'])
for tok_sub in tok['subtitle_languages']:
found['subtitle'].append(tok_sub['value'])
filename = reg.sub(' ', filename)
for typ in found:
found[typ] = [e for e in found[typ] if e != 'N/A']
return filename, found
def __init__(self, conf, api):
self.conf = conf
self.reload_tokens(api)
def reload_tokens(self, api):
"""
Charge les tokens depuis l'API,
et initialise la liste des étapes
"""
self.tk = api.get_tokens()
self.steps = list(set(t['step'] for t in self.tk))
self.steps.sort()
def get_tokens_step(self, step):
"""
Retourne tout les tokens de l'étape <step>
"""
return [t for t in self.tk if t['step'] == step]
def tokenize(self, filename):
"""
Analyse <filename> pour trouver tous ses marqueurs.
Les marqueurs sont enlevés, et rangés dans des listes.
retourne le nom privé des marqueurs, ainsi que le dictionnaire des marqueurs
"""
found = {'lang': [], 'quality': [], 'subtitle': []}
for step in self.steps:
for tok in self.get_tokens_step(step):
if (not bool(int(tok['case_sensitive']))):
reg = re.compile(r' ' + tok['token'] + r' ', re.IGNORECASE)
else:
reg = re.compile(r' ' + tok['token'] + r' ')
if reg.search(filename):
for tok_lang in tok['languages']:
found['lang'].append(tok_lang['value'])
for tok_qual in tok['qualities']:
found['quality'].append(tok_qual['value'])
for tok_sub in tok['subtitle_languages']:
found['subtitle'].append(tok_sub['value'])
filename = reg.sub(' ', filename)
for typ in found:
found[typ] = [e for e in found[typ] if e != 'N/A']
return filename, found