dplacemetn de dump-wiki
darcs-hash:20051001130220-4ec08-2e0fc86ec80f393ee175222bc37873f3f7cde02d.gz
This commit is contained in:
parent
83a8ac0e54
commit
f74c56e157
1 changed files with 0 additions and 0 deletions
184
wiki/dump-wiki.py
Executable file
184
wiki/dump-wiki.py
Executable file
|
@ -0,0 +1,184 @@
|
|||
#! /usr/bin/env python
|
||||
# -*- coding: iso-8859-15 -*-
|
||||
#
|
||||
# Dumpe une partie du Wiki dans le répertoire donné pour créer
|
||||
# une copie "statique".
|
||||
#
|
||||
# Exemple : python dump-wiki.py --regex WiFi/AvoirLeWifi".*" ~/macopie
|
||||
# Les pages correspondant à la regex donnée seront placées dans
|
||||
# le répertoire ~/macopie/wiki. Les attachements seront dans
|
||||
# ~/macopie/attach.
|
||||
#
|
||||
# Actuellement, j'utilise :
|
||||
# sudo python /usr/scripts/dump-wiki.py --regex 'WiFi(/PositionnementDesBornes|/AvoirLeWifi.*)?' ~/mointest
|
||||
|
||||
#!/usr/bin/python2.3
|
||||
|
||||
# -*- coding: iso-8859-1 -*-
|
||||
"""
|
||||
MoinMoin - Dump a MoinMoin wiki to static pages
|
||||
|
||||
@copyright: 2002-2004 by Jürgen Hermann <jh@web.de>
|
||||
@license: GNU GPL, see COPYING for details.
|
||||
"""
|
||||
|
||||
__version__ = "20040329"
|
||||
|
||||
# use this if your moin installation is not in sys.path:
|
||||
import sys
|
||||
sys.path.insert(0, '../..') # path to MoinMoin
|
||||
sys.path.insert(0, '/etc/moin')
|
||||
|
||||
url_prefix = "."
|
||||
HTML_SUFFIX = ".html"
|
||||
|
||||
page_template = u'''<html>
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html; charset=%(charset)s">
|
||||
<title>%(pagename)s</title>
|
||||
<link rel="stylesheet" type="text/css" href="../wiki.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class="avertissement">
|
||||
|
||||
Ce site est une copie statique et partielle de ce que l'on peut trouver sur le
|
||||
<a href="http://wiki.crans.org">wiki</a> de l'association. Si vous êtes ici, alors
|
||||
que vous avez demandé un autre site, c'est sans doute que vous êtes connecté
|
||||
au réseau wifi de l'association mais que vous n'avez pas encore complété toutes
|
||||
les étapes nécessaires pour avoir une connexion pleinement fonctionnelle. Ce site
|
||||
contient donc les infos pour configurer correctement votre connexion.
|
||||
</p>
|
||||
|
||||
<h1>%(pagenamewithlinks)s</h1>
|
||||
|
||||
%(pagehtml)s
|
||||
|
||||
<p class="creation">
|
||||
Cette page a été extraite du wiki le %(timestamp)s. Vous pouvez l'<a href="%(wikilink)s?action=edit">éditer</a> ou <a href="%(wikilink)s">voir</a> la page originale.
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
#'
|
||||
|
||||
import os, time, StringIO, codecs, shutil, re
|
||||
from MoinMoin import config, wikiutil, Page
|
||||
from MoinMoin.scripts import _util
|
||||
from MoinMoin.request import RequestCLI
|
||||
from MoinMoin.action import AttachFile
|
||||
|
||||
class MoinDump(_util.Script):
|
||||
def __init__(self):
|
||||
_util.Script.__init__(self, __name__, "[options] <target-directory>")
|
||||
|
||||
# --regex=REGEX
|
||||
self.parser.add_option(
|
||||
"--regex", metavar="REGEX", dest="regex",
|
||||
help="Ne copie que les pages correspondant à cette regex"
|
||||
)
|
||||
|
||||
def mainloop(self):
|
||||
""" moin-dump's main code. """
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
self.parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
# Prepare output directory
|
||||
outputdir = self.args[0]
|
||||
outputdir = os.path.abspath(outputdir)
|
||||
if not os.path.isdir(outputdir):
|
||||
try:
|
||||
os.mkdir(outputdir)
|
||||
_util.log("Created output directory '%s'!" % outputdir)
|
||||
except OSError:
|
||||
_util.fatal("Cannot create output directory '%s'!" % outputdir)
|
||||
|
||||
AttachFile.getAttachUrl = lambda pagename, filename, request, addts=0, escaped=0: (get_attachment(request, pagename, filename, outputdir))
|
||||
|
||||
|
||||
# Dump the wiki
|
||||
request = RequestCLI(u"wiki.crans.org/")
|
||||
request.form = request.args = request.setup_args()
|
||||
|
||||
# fix url_prefix so we get relative paths in output html
|
||||
request.cfg.url_prefix = url_prefix
|
||||
|
||||
# Get all existing pages in the wiki
|
||||
pages = list(request.rootpage.getPageList(user=''))
|
||||
if self.options.regex:
|
||||
pages = list(filter(lambda x: re.match(self.options.regex, x), pages))
|
||||
|
||||
pages.sort()
|
||||
|
||||
quoteWikinameOriUrl = wikiutil.quoteWikinameURL
|
||||
wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: (qfn(pagename) + HTML_SUFFIX)
|
||||
|
||||
errfile = os.path.join(outputdir, 'error.log')
|
||||
errlog = open(errfile, 'w')
|
||||
errcnt = 0
|
||||
|
||||
for pagename in pages:
|
||||
file = wikiutil.quoteWikinameURL(pagename) # we have the same name in URL and FS
|
||||
# On construit le nom de la page avec les liens (peut sans doute mieux faire)
|
||||
pagenamewithlinks = [u'']
|
||||
for composant in pagename.split("/"):
|
||||
pagenamewithlinks.append(pagenamewithlinks[-1]+'/'+composant)
|
||||
pagenamewithlinks = u" / ".join(map(lambda x: u'<a href="/wiki/%s">%s</a>' % (
|
||||
wikiutil.quoteWikinameURL(x[1:]), x[1:].split("/")[-1]), pagenamewithlinks[1:]))
|
||||
_util.log('Writing "%s"...' % file)
|
||||
try:
|
||||
pagehtml = ''
|
||||
page = Page.Page(request, pagename)
|
||||
try:
|
||||
request.reset()
|
||||
out = StringIO.StringIO()
|
||||
request.redirect(out)
|
||||
page.send_page(request, count_hit=0, content_only=1)
|
||||
pagehtml = out.getvalue()
|
||||
request.redirect()
|
||||
except:
|
||||
errcnt = errcnt + 1
|
||||
print >>sys.stderr, "*** Caught exception while writing page!"
|
||||
print >>errlog, "~" * 78
|
||||
print >>errlog, file # page filename
|
||||
import traceback
|
||||
traceback.print_exc(None, errlog)
|
||||
finally:
|
||||
timestamp = time.strftime("%Y-%m-%d %H:%M")
|
||||
filepath = os.path.join(outputdir, 'wiki', file)
|
||||
fileout = codecs.open(filepath, 'w', config.charset)
|
||||
fileout.write((page_template % {
|
||||
'charset': config.charset,
|
||||
'pagename': pagename,
|
||||
'pagenamewithlinks': pagenamewithlinks,
|
||||
'pagehtml': pagehtml,
|
||||
'timestamp': timestamp,
|
||||
'wikilink': u"http://wiki.crans.org/%s" % quoteWikinameOriUrl(pagename).encode("UTF-8"),
|
||||
'theme': request.cfg.theme_default,
|
||||
}).replace("./monobook", ".."))
|
||||
fileout.close()
|
||||
|
||||
|
||||
errlog.close()
|
||||
if errcnt:
|
||||
print >>sys.stderr, "*** %d error(s) occurred, see '%s'!" % (errcnt, errfile)
|
||||
|
||||
def run():
|
||||
MoinDump().run()
|
||||
|
||||
def get_attachment(request, pagename, filename, outputdir):
|
||||
"""Traitement des attachements"""
|
||||
source_dir = AttachFile.getAttachDir(request, pagename)
|
||||
source_file = os.path.join(source_dir, filename)
|
||||
if not os.path.isfile(source_file):
|
||||
print "%s n'existe pas !" % source_file
|
||||
return
|
||||
dest_file = os.path.join(outputdir, "attach",
|
||||
"%s_%s" % (wikiutil.quoteWikinameFS(pagename), filename))
|
||||
shutil.copyfile(source_file, dest_file)
|
||||
return os.path.join("..", "attach",
|
||||
"%s_%s" % (wikiutil.quoteWikinameFS(pagename), filename))
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
Loading…
Add table
Add a link
Reference in a new issue