
Sur le vlan accueil on ne peux pas aller chercher les css sur le wiki. darcs-hash:20080908182104-af139-dbef8a654e2fc494171a8df35e129cf438d8128f.gz
163 lines
5.9 KiB
Python
Executable file
163 lines
5.9 KiB
Python
Executable file
#! /usr/bin/env python
|
|
# -*- coding: iso-8859-15 -*-
|
|
#
|
|
# Dumpe les pages de déconnections pour les coller sur squid
|
|
# sudo /usr/scripts/wiki/dump-squid.py
|
|
|
|
"""
|
|
MoinMoin - Dump a MoinMoin wiki to static pages
|
|
|
|
@copyright: 2002-2004 by Jürgen Hermann <jh@web.de>
|
|
@license: GNU GPL, see COPYING for details.
|
|
"""
|
|
|
|
__version__ = '20040329'
|
|
|
|
# use this if your moin installation is not in sys.path:
|
|
import sys
|
|
sys.path.insert(0, '/etc/moin')
|
|
|
|
url_prefix = "."
|
|
|
|
url_base='VieCrans/PagesDeDeconnexion/'
|
|
outputdir='/usr/scripts/squid/errors'
|
|
outputdir_rouge='/var/www/rouge/squid'
|
|
|
|
page_template = u"""<html>
|
|
<head>
|
|
<meta http-equiv="content-type" content="text/html; charset=%(charset)s">
|
|
<title>%(pagename)s</title>
|
|
<style type="text/css">
|
|
%(common_css)s
|
|
%(blackliste_css)s
|
|
</style>
|
|
</head>
|
|
<body>
|
|
|
|
<h1>%(pagenamewithlinks)s</h1>
|
|
|
|
%(pagehtml)s
|
|
|
|
<p class="creation">
|
|
Cette page a été extraite du wiki le %(timestamp)s. Vous pouvez l'<a href="%(wikilink)s?action=edit">éditer</a> ou <a href="%(wikilink)s">voir</a> la page originale.
|
|
</p>
|
|
</body>
|
|
</html>
|
|
"""
|
|
|
|
import os, time, StringIO, codecs, shutil, re
|
|
from MoinMoin import config, wikiutil, Page
|
|
from MoinMoin.script import _util
|
|
from MoinMoin.request import RequestCLI
|
|
from MoinMoin.action import AttachFile
|
|
|
|
# on supprime toutes les pages
|
|
for r in [outputdir, outputdir_rouge]:
|
|
for f in os.listdir(r):
|
|
os.remove(os.path.join(r,f))
|
|
|
|
# Recuperation des css
|
|
common_css = file('/usr/scripts/wiki/static/blackliste/css/common.css').read()
|
|
blackliste_css = file('/usr/scripts/wiki/static/blackliste/css/blackliste.css').read()
|
|
|
|
class MoinDump(_util.Script):
|
|
def __init__(self):
|
|
_util.Script.__init__(self, __name__, '')
|
|
|
|
def mainloop(self):
|
|
""" moin-dump's main code. """
|
|
|
|
AttachFile.getAttachUrl = lambda pagename, filename, request, addts=0, escaped=0: (get_attachment(request, pagename, filename, outputdir))
|
|
|
|
# Dump the wiki
|
|
request = RequestCLI(u"wiki.crans.org/")
|
|
request.form = request.args = request.setup_args()
|
|
|
|
# fix url_prefix so we get relative paths in output html
|
|
request.cfg.url_prefix = url_prefix
|
|
|
|
# Get all existing pages in the wiki
|
|
pages = list(request.rootpage.getPageList(user=''))
|
|
pages = list(filter(lambda x: re.match(os.path.join(url_base,'.*'), x), pages))
|
|
|
|
pages.sort()
|
|
|
|
quoteWikinameOriUrl = wikiutil.quoteWikinameURL
|
|
wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: (qfn(pagename) )
|
|
|
|
for pagename in pages:
|
|
file = wikiutil.quoteWikinameURL(pagename) # we have the same name in URL and FS
|
|
# On construit le nom de la page avec les liens (peut sans doute mieux faire)
|
|
originalpagename = pagename
|
|
pagename = pagename.replace("/PagesStatiques", "")
|
|
pagenamewithlinks = [u'']
|
|
for composant in pagename.split("/"):
|
|
pagenamewithlinks.append(pagenamewithlinks[-1]+'/'+composant)
|
|
pagenamewithlinks = u" / ".join(map(lambda x: u'<a href="/wiki/%s">%s</a>' % (
|
|
wikiutil.quoteWikinameURL(x[1:]), x[1:].split("/")[-1]), pagenamewithlinks[1:]))
|
|
_util.log('Writing %s...' % file.split('(2f)')[-1])
|
|
try:
|
|
pagehtml = ''
|
|
page = Page.Page(request, originalpagename)
|
|
try:
|
|
request.reset()
|
|
out = StringIO.StringIO()
|
|
request.redirect(out)
|
|
request.page = page
|
|
request.remote_addr = '138.231.136.3'
|
|
page.send_page(request, count_hit=0, content_only=1)
|
|
pagehtml = out.getvalue()
|
|
request.redirect()
|
|
except:
|
|
print >>sys.stderr, "*** Caught exception while writing page!"
|
|
import traceback
|
|
finally:
|
|
timestamp = time.strftime("%Y-%m-%d %H:%M")
|
|
|
|
filepath = os.path.join(outputdir, file.split('(2f)')[-1])
|
|
filepath_rouge = os.path.join(outputdir_rouge, file.split('(2f)')[-1]+'.html')
|
|
|
|
fileout = codecs.open(filepath, 'w', config.charset)
|
|
fileout_rouge = codecs.open(filepath_rouge, 'w', config.charset)
|
|
|
|
pagehtml = pagehtml.replace('%U','<a href="%U">%U</a>')
|
|
|
|
contenu = page_template % {
|
|
'charset': config.charset,
|
|
'pagename': pagename,
|
|
'pagenamewithlinks': pagenamewithlinks,
|
|
'pagehtml': pagehtml.replace('./','http://wiki.crans.org/wiki/'),
|
|
'timestamp': timestamp,
|
|
'wikilink': u"http://wiki.crans.org/%s" % quoteWikinameOriUrl(originalpagename).encode("UTF-8"),
|
|
'theme': request.cfg.theme_default,
|
|
'common_css': common_css,
|
|
'blackliste_css': blackliste_css
|
|
}
|
|
|
|
contenu = contenu.replace("./monobook", "..")
|
|
|
|
fileout_rouge.write(contenu)
|
|
contenu = re.sub('</(body|html)>\n?', '', contenu)
|
|
fileout.write(contenu)
|
|
|
|
fileout.close()
|
|
fileout_rouge.close()
|
|
|
|
def run():
|
|
MoinDump().run()
|
|
|
|
def get_attachment(request, pagename, filename, outputdir):
|
|
"""Traitement des attachements"""
|
|
source_dir = AttachFile.getAttachDir(request, pagename)
|
|
source_file = os.path.join(source_dir, filename)
|
|
if not os.path.isfile(source_file):
|
|
print "%s n'existe pas !" % source_file
|
|
return
|
|
dest_file = os.path.join(outputdir, "attach",
|
|
"%s_%s" % (wikiutil.quoteWikinameFS(pagename), filename))
|
|
shutil.copyfile(source_file, dest_file)
|
|
return os.path.join("..", "attach",
|
|
"%s_%s" % (wikiutil.quoteWikinameFS(pagename), filename))
|
|
|
|
if __name__ == "__main__":
|
|
run()
|