From 9fcac5548c2397cd65ff38d32289df562bd2978a Mon Sep 17 00:00:00 2001 From: Antoine Durand-Gasselin Date: Tue, 9 Dec 2008 18:49:26 +0100 Subject: [PATCH] [wiki-lenny] Hack de request/__init__.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pour lancer le thème crans-www sur le www Ah, et j'ai aussi modifié le logo darcs-hash:20081209174926-bd074-85b41aca2d387200ab6d2cf77540fc9d4de85af2.gz --- .../action/fillpoll.py} | 0 wiki-lenny/share/request.__init__.orig.py | 1680 ++++++++++++++++ wiki-lenny/share/request.__init__.py | 1684 +++++++++++++++++ wiki-lenny/static/crans.png | Bin 18580 -> 19148 bytes 4 files changed, 3364 insertions(+) rename wiki-lenny/{share/action.fillpoll.py => local/action/fillpoll.py} (100%) create mode 100644 wiki-lenny/share/request.__init__.orig.py create mode 100644 wiki-lenny/share/request.__init__.py diff --git a/wiki-lenny/share/action.fillpoll.py b/wiki-lenny/local/action/fillpoll.py similarity index 100% rename from wiki-lenny/share/action.fillpoll.py rename to wiki-lenny/local/action/fillpoll.py diff --git a/wiki-lenny/share/request.__init__.orig.py b/wiki-lenny/share/request.__init__.orig.py new file mode 100644 index 00000000..bcaf90a5 --- /dev/null +++ b/wiki-lenny/share/request.__init__.orig.py @@ -0,0 +1,1680 @@ +# -*- coding: iso-8859-1 -*- +""" + MoinMoin - RequestBase Implementation + + @copyright: 2001-2003 Juergen Hermann , + 2003-2008 MoinMoin:ThomasWaldmann + @license: GNU GPL, see COPYING for details. +""" + +# Support for remote IP address detection when using (reverse) proxy (or even proxies). +# If you exactly KNOW which (reverse) proxies you can trust, put them into the list +# below, so we can determine the "outside" IP as your trusted proxies see it. + +proxies_trusted = [] # trust noone! +#proxies_trusted = ['127.0.0.1', ] # can be a list of multiple IPs + +from MoinMoin import log +logging = log.getLogger(__name__) + +def find_remote_addr(addrs): + """ Find the last remote IP address before it hits our reverse proxies. + The LAST address in the list is the remote IP as detected by the server + (not taken from some x-forwarded-for header). + The FIRST address in the list might be the client's IP - if noone cheats + and everyone supports x-f-f header. + + See http://bob.pythonmac.org/archives/2005/09/23/apache-x-forwarded-for-caveat/ + + For debug loglevel, we log all . + + TODO: refactor request code to first do some basic IP init, then load configuration, + TODO: then do proxy processing. + TODO: add wikiconfig configurability for proxies_trusted + TODO: later, make it possible to put multipe remote IP addrs into edit-log + """ + logging.debug("request.find_remote_addr: addrs == %r" % addrs) + if proxies_trusted: + result = [addr for addr in addrs if addr not in proxies_trusted] + if result: + return result[-1] # last IP before it hit our trusted (reverse) proxies + return addrs[-1] # this is a safe remote_addr, not taken from x-f-f header + + +import os, re, time, sys, cgi, StringIO +import Cookie +import traceback + +from MoinMoin.Page import Page +from MoinMoin import config, wikiutil, user, caching, error +from MoinMoin.config import multiconfig +from MoinMoin.support.python_compatibility import set +from MoinMoin.util import IsWin9x +from MoinMoin.util.clock import Clock +from MoinMoin import auth +from urllib import quote, quote_plus + +# umask setting -------------------------------------------------------- +def set_umask(new_mask=0777^config.umask): + """ Set the OS umask value (and ignore potential failures on OSes where + this is not supported). + Default: the bitwise inverted value of config.umask + """ + try: + old_mask = os.umask(new_mask) + except: + # maybe we are on win32? + pass + +# We do this at least once per Python process, when request is imported. +# If other software parts (like twistd's daemonize() function) set an +# unwanted umask, we have to call this again to set the correct one: +set_umask() + +# Exceptions ----------------------------------------------------------- + +class MoinMoinFinish(Exception): + """ Raised to jump directly to end of run() function, where finish is called """ + + +class HeadersAlreadySentException(Exception): + """ Is raised if the headers were already sent when emit_http_headers is called.""" + + +class RemoteClosedConnection(Exception): + """ Remote end closed connection during request """ + +# Utilities + +def cgiMetaVariable(header, scheme='http'): + """ Return CGI meta variable for header name + + e.g 'User-Agent' -> 'HTTP_USER_AGENT' + See http://www.faqs.org/rfcs/rfc3875.html section 4.1.18 + """ + var = '%s_%s' % (scheme, header) + return var.upper().replace('-', '_') + + +# Request Base ---------------------------------------------------------- + +class RequestBase(object): + """ A collection for all data associated with ONE request. """ + + # Defaults (used by sub classes) + http_accept_language = 'en' + server_name = 'localhost' + server_port = '80' + + # Extra headers we support. Both standalone and twisted store + # headers as lowercase. + moin_location = 'x-moin-location' + proxy_host = 'x-forwarded-host' # original host: header as seen by the proxy (e.g. wiki.example.org) + proxy_xff = 'x-forwarded-for' # list of original remote_addrs as seen by the proxies (e.g. ,,,...) + + def __init__(self, properties={}): + + # twistd's daemonize() overrides our umask, so we reset it here every + # request. we do it for all request types to avoid similar problems. + set_umask() + + self._finishers = [] + + self._auth_redirected = False + + # Decode values collected by sub classes + self.path_info = self.decodePagename(self.path_info) + + self.failed = 0 + self._available_actions = None + self._known_actions = None + + # Pages meta data that we collect in one request + self.pages = {} + + self.sent_headers = None + self.user_headers = [] + self.cacheable = 0 # may this output get cached by http proxies/caches? + self.http_caching_disabled = 0 # see disableHttpCaching() + self.page = None + self._dicts = None + + # session handling. users cannot rely on a session being + # created, but we should always set request.session + self.session = {} + + # setuid handling requires an attribute in the request + # that stores the real user + self._setuid_real_user = None + + # Check for dumb proxy requests + # TODO relying on request_uri will not work on all servers, especially + # not on external non-Apache servers + self.forbidden = False + if self.request_uri.startswith('http://'): + self.makeForbidden403() + + # Init + else: + self.writestack = [] + self.clock = Clock() + self.clock.start('total') + self.clock.start('base__init__') + # order is important here! + self.__dict__.update(properties) + try: + self._load_multi_cfg() + except error.NoConfigMatchedError: + self.makeForbidden(404, 'No wiki configuration matching the URL found!\r\n') + return + + self.isSpiderAgent = self.check_spider() + + # Set decode charsets. Input from the user is always in + # config.charset, which is the page charsets. Except + # path_info, which may use utf-8, and handled by decodePagename. + self.decode_charsets = [config.charset] + + if self.query_string.startswith('action=xmlrpc'): + self.args = {} + self.form = {} + self.action = 'xmlrpc' + self.rev = None + else: + try: + self.args = self.form = self.setup_args() + except UnicodeError: + self.makeForbidden(403, "The input you sent could not be understood.") + return + self.action = self.form.get('action', ['show'])[0] + try: + self.rev = int(self.form['rev'][0]) + except: + self.rev = None + + from MoinMoin.Page import RootPage + self.rootpage = RootPage(self) + + from MoinMoin.logfile import editlog + self.editlog = editlog.EditLog(self) + + from MoinMoin import i18n + self.i18n = i18n + i18n.i18n_init(self) + + # authentication might require translated forms, so + # have a try at guessing the language from the browser + lang = i18n.requestLanguage(self, try_user=False) + self.getText = lambda text, i18n=self.i18n, request=self, lang=lang, **kw: i18n.getText(text, request, lang, **kw) + + # session handler start, auth + self.parse_cookie() + user_obj = self.cfg.session_handler.start(self, self.cfg.session_id_handler) + shfinisher = lambda request: self.cfg.session_handler.finish(request, request.user, + self.cfg.session_id_handler) + self.add_finisher(shfinisher) + # set self.user even if _handle_auth_form raises an Exception + self.user = None + self.user = self._handle_auth_form(user_obj) + del user_obj + self.cfg.session_handler.after_auth(self, self.cfg.session_id_handler, self.user) + if not self.user: + self.user = user.User(self, auth_method='request:invalid') + + # setuid handling, check isSuperUser() because the user + # might have lost the permission between requests + if 'setuid' in self.session and self.user.isSuperUser(): + self._setuid_real_user = self.user + uid = self.session['setuid'] + self.user = user.User(self, uid, auth_method='setuid') + # set valid to True so superusers can even switch + # to disable accounts + self.user.valid = True + + if self.action != 'xmlrpc': + if not self.forbidden and self.isForbidden(): + self.makeForbidden403() + if not self.forbidden and self.surge_protect(): + self.makeUnavailable503() + + self.pragma = {} + self.mode_getpagelinks = 0 # is > 0 as long as we are in a getPageLinks call + self.parsePageLinks_running = {} # avoid infinite recursion by remembering what we are already running + + self.lang = i18n.requestLanguage(self) + # Language for content. Page content should use the wiki default lang, + # but generated content like search results should use the user language. + self.content_lang = self.cfg.language_default + self.getText = lambda text, i18n=self.i18n, request=self, lang=self.lang, **kv: i18n.getText(text, request, lang, **kv) + + self.reset() + + from MoinMoin.formatter.text_html import Formatter + self.html_formatter = Formatter(self) + self.formatter = self.html_formatter + + self.clock.stop('base__init__') + + def surge_protect(self, kick_him=False): + """ check if someone requesting too much from us, + if kick_him is True, we unconditionally blacklist the current user/ip + """ + limits = self.cfg.surge_action_limits + if not limits: + return False + + if self.remote_addr.startswith('127.'): # localnet + return False + + validuser = self.user.valid + current_id = validuser and self.user.name or self.remote_addr + current_action = self.action + + default_limit = limits.get('default', (30, 60)) + + now = int(time.time()) + surgedict = {} + surge_detected = False + + try: + # if we have common farm users, we could also use scope='farm': + cache = caching.CacheEntry(self, 'surgeprotect', 'surge-log', scope='wiki', use_encode=True) + if cache.exists(): + data = cache.content() + data = data.split("\n") + for line in data: + try: + id, t, action, surge_indicator = line.split("\t") + t = int(t) + maxnum, dt = limits.get(action, default_limit) + if t >= now - dt: + events = surgedict.setdefault(id, {}) + timestamps = events.setdefault(action, []) + timestamps.append((t, surge_indicator)) + except StandardError: + pass + + maxnum, dt = limits.get(current_action, default_limit) + events = surgedict.setdefault(current_id, {}) + timestamps = events.setdefault(current_action, []) + surge_detected = len(timestamps) > maxnum + + surge_indicator = surge_detected and "!" or "" + timestamps.append((now, surge_indicator)) + if surge_detected: + if len(timestamps) < maxnum * 2: + timestamps.append((now + self.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out + + if current_action not in ('cache', 'AttachFile', ): # don't add cache/AttachFile accesses to all or picture galleries will trigger SP + current_action = 'all' # put a total limit on user's requests + maxnum, dt = limits.get(current_action, default_limit) + events = surgedict.setdefault(current_id, {}) + timestamps = events.setdefault(current_action, []) + + if kick_him: # ban this guy, NOW + timestamps.extend([(now + self.cfg.surge_lockout_time, "!")] * (2 * maxnum)) + + surge_detected = surge_detected or len(timestamps) > maxnum + + surge_indicator = surge_detected and "!" or "" + timestamps.append((now, surge_indicator)) + if surge_detected: + if len(timestamps) < maxnum * 2: + timestamps.append((now + self.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out + + data = [] + for id, events in surgedict.items(): + for action, timestamps in events.items(): + for t, surge_indicator in timestamps: + data.append("%s\t%d\t%s\t%s" % (id, t, action, surge_indicator)) + data = "\n".join(data) + cache.update(data) + except StandardError: + pass + + if surge_detected and validuser and self.user.auth_method in self.cfg.auth_methods_trusted: + logging.info("Trusted user %s would have triggered surge protection if not trusted." % self.user.name) + return False # do not subject trusted users to surge protection + + return surge_detected + + def getDicts(self): + """ Lazy initialize the dicts on the first access """ + if self._dicts is None: + from MoinMoin import wikidicts + dicts = wikidicts.GroupDict(self) + dicts.load_dicts() + self._dicts = dicts + return self._dicts + + def delDicts(self): + """ Delete the dicts, used by some tests """ + del self._dicts + self._dicts = None + + dicts = property(getDicts, None, delDicts) + + def _load_multi_cfg(self): + # protect against calling multiple times + if not hasattr(self, 'cfg'): + self.clock.start('load_multi_cfg') + self.cfg = multiconfig.getConfig(self.url) + self.clock.stop('load_multi_cfg') + + def setAcceptedCharsets(self, accept_charset): + """ Set accepted_charsets by parsing accept-charset header + + Set self.accepted_charsets to an ordered list based on http_accept_charset. + + Reference: http://www.w3.org/Protocols/rfc2616/rfc2616.txt + + TODO: currently no code use this value. + + @param accept_charset: accept-charset header + """ + charsets = [] + if accept_charset: + accept_charset = accept_charset.lower() + # Add iso-8859-1 if needed + if (not '*' in accept_charset and + 'iso-8859-1' not in accept_charset): + accept_charset += ',iso-8859-1' + + # Make a list, sorted by quality value, using Schwartzian Transform + # Create list of tuples (value, name) , sort, extract names + for item in accept_charset.split(','): + if ';' in item: + name, qval = item.split(';') + qval = 1.0 - float(qval.split('=')[1]) + else: + name, qval = item, 0 + charsets.append((qval, name)) + charsets.sort() + # Remove *, its not clear what we should do with it later + charsets = [name for qval, name in charsets if name != '*'] + + self.accepted_charsets = charsets + + def _setup_vars_from_std_env(self, env): + """ Set common request variables from CGI environment + + Parse a standard CGI environment as created by common web servers. + Reference: http://www.faqs.org/rfcs/rfc3875.html + + @param env: dict like object containing cgi meta variables + """ + # Values we can just copy + self.env = env + self.http_accept_language = env.get('HTTP_ACCEPT_LANGUAGE', self.http_accept_language) + self.server_name = env.get('SERVER_NAME', self.server_name) + self.server_port = env.get('SERVER_PORT', self.server_port) + self.saved_cookie = env.get('HTTP_COOKIE', '') + self.script_name = env.get('SCRIPT_NAME', '') + self.path_info = env.get('PATH_INFO', '') + self.query_string = env.get('QUERY_STRING', '') + self.request_method = env.get('REQUEST_METHOD', None) + self.remote_addr = env.get('REMOTE_ADDR', '') + self.http_user_agent = env.get('HTTP_USER_AGENT', '') + try: + self.content_length = int(env.get('CONTENT_LENGTH')) + except (TypeError, ValueError): + self.content_length = None + self.if_modified_since = env.get('If-modified-since') or env.get(cgiMetaVariable('If-modified-since')) + self.if_none_match = env.get('If-none-match') or env.get(cgiMetaVariable('If-none-match')) + + # REQUEST_URI is not part of CGI spec, but an addition of Apache. + self.request_uri = env.get('REQUEST_URI', '') + + # Values that need more work + self.setHttpReferer(env.get('HTTP_REFERER')) + self.setIsSSL(env) + self.setHost(env.get('HTTP_HOST')) + self.fixURI(env) + + self.setURL(env) + #self.debugEnvironment(env) + + def setHttpReferer(self, referer): + """ Set http_referer, making sure its ascii + + IE might send non-ascii value. + """ + value = '' + if referer: + value = unicode(referer, 'ascii', 'replace') + value = value.encode('ascii', 'replace') + self.http_referer = value + + def setIsSSL(self, env): + """ Set is_ssl + + @param env: dict like object containing cgi meta variables + """ + self.is_ssl = bool(env.get('SSL_PROTOCOL') or + env.get('SSL_PROTOCOL_VERSION') or + env.get('HTTPS') == 'on') + + def setHost(self, host=None): + """ Set http_host + + Create from server name and port if missing. Previous code + default to localhost. + """ + if not host: + port = '' + standardPort = ('80', '443')[self.is_ssl] + if self.server_port != standardPort: + port = ':' + self.server_port + host = self.server_name + port + self.http_host = host + + def fixURI(self, env): + """ Fix problems with script_name and path_info + + Handle the strange charset semantics on Windows and other non + posix systems. path_info is transformed into the system code + page by the web server. Additionally, paths containing dots let + most webservers choke. + + Broken environment variables in different environments: + path_info script_name + Apache1 X X PI does not contain dots + Apache2 X X PI is not encoded correctly + IIS X X path_info include script_name + Other ? - ? := Possible and even RFC-compatible. + - := Hopefully not. + + @param env: dict like object containing cgi meta variables + """ + # Fix the script_name when using Apache on Windows. + server_software = env.get('SERVER_SOFTWARE', '') + if os.name == 'nt' and 'Apache/' in server_software: + # Removes elements ending in '.' from the path. + self.script_name = '/'.join([x for x in self.script_name.split('/') + if not x.endswith('.')]) + + # Fix path_info + if os.name != 'posix' and self.request_uri != '': + # Try to recreate path_info from request_uri. + import urlparse + scriptAndPath = urlparse.urlparse(self.request_uri)[2] + path = scriptAndPath.replace(self.script_name, '', 1) + self.path_info = wikiutil.url_unquote(path, want_unicode=False) + elif os.name == 'nt': + # Recode path_info to utf-8 + path = wikiutil.decodeWindowsPath(self.path_info) + self.path_info = path.encode("utf-8") + + # Fix bug in IIS/4.0 when path_info contain script_name + if self.path_info.startswith(self.script_name): + self.path_info = self.path_info[len(self.script_name):] + + def setURL(self, env): + """ Set url, used to locate wiki config + + This is the place to manipulate url parts as needed. + + @param env: dict like object containing cgi meta variables or http headers. + """ + # proxy support + self.rewriteRemoteAddr(env) + self.rewriteHost(env) + + self.rewriteURI(env) + + if not self.request_uri: + self.request_uri = self.makeURI() + self.url = self.http_host + self.request_uri + + def rewriteHost(self, env): + """ Rewrite http_host transparently + + Get the proxy host using 'X-Forwarded-Host' header, added by + Apache 2 and other proxy software. + + TODO: Will not work for Apache 1 or others that don't add this header. + + TODO: If we want to add an option to disable this feature it + should be in the server script, because the config is not + loaded at this point, and must be loaded after url is set. + + @param env: dict like object containing cgi meta variables or http headers. + """ + proxy_host = (env.get(self.proxy_host) or + env.get(cgiMetaVariable(self.proxy_host))) + if proxy_host: + self.http_host = proxy_host + + def rewriteRemoteAddr(self, env): + """ Rewrite remote_addr transparently + + Get the proxy remote addr using 'X-Forwarded-For' header, added by + Apache 2 and other proxy software. + + TODO: Will not work for Apache 1 or others that don't add this header. + + TODO: If we want to add an option to disable this feature it + should be in the server script, because the config is not + loaded at this point, and must be loaded after url is set. + + @param env: dict like object containing cgi meta variables or http headers. + """ + xff = (env.get(self.proxy_xff) or + env.get(cgiMetaVariable(self.proxy_xff))) + if xff: + xff = [addr.strip() for addr in xff.split(',')] + xff.append(self.remote_addr) + self.remote_addr = find_remote_addr(xff) + + def rewriteURI(self, env): + """ Rewrite request_uri, script_name and path_info transparently + + Useful when running mod python or when running behind a proxy, + e.g run on localhost:8000/ and serve as example.com/wiki/. + + Uses private 'X-Moin-Location' header to set the script name. + This allow setting the script name when using Apache 2 + directive:: + + + RequestHeader set X-Moin-Location /my/wiki/ + + + TODO: does not work for Apache 1 and others that do not allow + setting custom headers per request. + + @param env: dict like object containing cgi meta variables or http headers. + """ + location = (env.get(self.moin_location) or + env.get(cgiMetaVariable(self.moin_location))) + if location is None: + return + + scriptAndPath = self.script_name + self.path_info + location = location.rstrip('/') + self.script_name = location + + # This may happen when using mod_python + if scriptAndPath.startswith(location): + self.path_info = scriptAndPath[len(location):] + + # Recreate the URI from the modified parts + if self.request_uri: + self.request_uri = self.makeURI() + + def makeURI(self): + """ Return uri created from uri parts """ + uri = self.script_name + wikiutil.url_quote(self.path_info) + if self.query_string: + uri += '?' + self.query_string + return uri + + def splitURI(self, uri): + """ Return path and query splited from uri + + Just like CGI environment, the path is unquoted, the query is not. + """ + if '?' in uri: + path, query = uri.split('?', 1) + else: + path, query = uri, '' + return wikiutil.url_unquote(path, want_unicode=False), query + + def _handle_auth_form(self, user_obj): + username = self.form.get('name', [None])[0] + password = self.form.get('password', [None])[0] + oid = self.form.get('openid_identifier', [None])[0] + login = 'login' in self.form + logout = 'logout' in self.form + stage = self.form.get('stage', [None])[0] + return self.handle_auth(user_obj, attended=True, username=username, + password=password, login=login, logout=logout, + stage=stage, openid_identifier=oid) + + def handle_auth(self, user_obj, attended=False, **kw): + username = kw.get('username') + password = kw.get('password') + oid = kw.get('openid_identifier') + login = kw.get('login') + logout = kw.get('logout') + stage = kw.get('stage') + extra = { + 'cookie': self.cookie, + } + if login: + extra['attended'] = attended + extra['username'] = username + extra['password'] = password + extra['openid_identifier'] = oid + if stage: + extra['multistage'] = True + login_msgs = [] + self._login_multistage = None + + if logout and 'setuid' in self.session: + del self.session['setuid'] + return user_obj + + for authmethod in self.cfg.auth: + if logout: + user_obj, cont = authmethod.logout(self, user_obj, **extra) + elif login: + if stage and authmethod.name != stage: + continue + ret = authmethod.login(self, user_obj, **extra) + user_obj = ret.user_obj + cont = ret.continue_flag + if stage: + stage = None + del extra['multistage'] + if ret.multistage: + self._login_multistage = ret.multistage + self._login_multistage_name = authmethod.name + return user_obj + if ret.redirect_to: + nextstage = auth.get_multistage_continuation_url(self, authmethod.name) + url = ret.redirect_to + url = url.replace('%return_form', quote_plus(nextstage)) + url = url.replace('%return', quote(nextstage)) + self._auth_redirected = True + self.http_redirect(url) + return user_obj + msg = ret.message + if msg and not msg in login_msgs: + login_msgs.append(msg) + else: + user_obj, cont = authmethod.request(self, user_obj, **extra) + if not cont: + break + + self._login_messages = login_msgs + return user_obj + + def handle_jid_auth(self, jid): + return user.get_by_jabber_id(self, jid) + + def parse_cookie(self): + try: + self.cookie = Cookie.SimpleCookie(self.saved_cookie) + except Cookie.CookieError: + self.cookie = None + + def reset(self): + """ Reset request state. + + Called after saving a page, before serving the updated + page. Solves some practical problems with request state + modified during saving. + + """ + # This is the content language and has nothing to do with + # The user interface language. The content language can change + # during the rendering of a page by lang macros + self.current_lang = self.cfg.language_default + + # caches unique ids + self.init_unique_ids() + + if hasattr(self, "_fmt_hd_counters"): + del self._fmt_hd_counters + + def loadTheme(self, theme_name): + """ Load the Theme to use for this request. + + @param theme_name: the name of the theme + @type theme_name: str + @rtype: int + @return: success code + 0 on success + 1 if user theme could not be loaded, + 2 if a hard fallback to modern theme was required. + """ + fallback = 0 + if theme_name == "": + theme_name = self.cfg.theme_default + + try: + Theme = wikiutil.importPlugin(self.cfg, 'theme', theme_name, 'Theme') + except wikiutil.PluginMissingError: + fallback = 1 + try: + Theme = wikiutil.importPlugin(self.cfg, 'theme', self.cfg.theme_default, 'Theme') + except wikiutil.PluginMissingError: + fallback = 2 + from MoinMoin.theme.modern import Theme + + self.theme = Theme(self) + return fallback + + def setContentLanguage(self, lang): + """ Set the content language, used for the content div + + Actions that generate content in the user language, like search, + should set the content direction to the user language before they + call send_title! + """ + self.content_lang = lang + self.current_lang = lang + + def getPragma(self, key, defval=None): + """ Query a pragma value (#pragma processing instruction) + + Keys are not case-sensitive. + """ + return self.pragma.get(key.lower(), defval) + + def setPragma(self, key, value): + """ Set a pragma value (#pragma processing instruction) + + Keys are not case-sensitive. + """ + self.pragma[key.lower()] = value + + def getPathinfo(self): + """ Return the remaining part of the URL. """ + return self.path_info + + def getScriptname(self): + """ Return the scriptname part of the URL ('/path/to/my.cgi'). """ + if self.script_name == '/': + return '' + return self.script_name + + def getKnownActions(self): + """ Create a dict of avaiable actions + + Return cached version if avaiable. + + @rtype: dict + @return: dict of all known actions + """ + try: + self.cfg.cache.known_actions # check + except AttributeError: + from MoinMoin import action + self.cfg.cache.known_actions = set(action.getNames(self.cfg)) + + # Return a copy, so clients will not change the set. + return self.cfg.cache.known_actions.copy() + + def getAvailableActions(self, page): + """ Get list of avaiable actions for this request + + The dict does not contain actions that starts with lower case. + Themes use this dict to display the actions to the user. + + @param page: current page, Page object + @rtype: dict + @return: dict of avaiable actions + """ + if self._available_actions is None: + # some actions might make sense for non-existing pages, so we just + # require read access here. Can be later refined to some action + # specific check: + if not self.user.may.read(page.page_name): + return [] + + # Filter non ui actions (starts with lower case letter) + actions = self.getKnownActions() + actions = [action for action in actions if not action[0].islower()] + + # Filter wiki excluded actions + actions = [action for action in actions if not action in self.cfg.actions_excluded] + + # Filter actions by page type, acl and user state + excluded = [] + if ((page.isUnderlayPage() and not page.isStandardPage()) or + not self.user.may.write(page.page_name) or + not self.user.may.delete(page.page_name)): + # Prevent modification of underlay only pages, or pages + # the user can't write and can't delete + excluded = [u'RenamePage', u'DeletePage', ] # AttachFile must NOT be here! + actions = [action for action in actions if not action in excluded] + + self._available_actions = set(actions) + + # Return a copy, so clients will not change the dict. + return self._available_actions.copy() + + def redirectedOutput(self, function, *args, **kw): + """ Redirect output during function, return redirected output """ + buf = StringIO.StringIO() + self.redirect(buf) + try: + function(*args, **kw) + finally: + self.redirect() + text = buf.getvalue() + buf.close() + return text + + def redirect(self, file=None): + """ Redirect output to file, or restore saved output """ + if file: + self.writestack.append(self.write) + self.write = file.write + else: + self.write = self.writestack.pop() + + def log(self, msg): + """ DEPRECATED - Log msg to logging framework + Please call logging.info(...) directly! + """ + msg = msg.strip() + # Encode unicode msg + if isinstance(msg, unicode): + msg = msg.encode(config.charset) + logging.info(msg) + + def timing_log(self, start, action): + """ Log to timing log (for performance analysis) """ + indicator = '' + if start: + total = "vvv" + else: + self.clock.stop('total') # make sure it is stopped + total_secs = self.clock.timings['total'] + # we add some stuff that is easy to grep when searching for peformance problems: + if total_secs > 50: + indicator += '!4!' + elif total_secs > 20: + indicator += '!3!' + elif total_secs > 10: + indicator += '!2!' + elif total_secs > 2: + indicator += '!1!' + total = self.clock.value('total') + # use + for existing pages, - for non-existing pages + if self.page is not None: + indicator += self.page.exists() and '+' or '-' + if self.isSpiderAgent: + indicator += "B" + + pid = os.getpid() + msg = 'Timing %5d %-6s %4s %-10s %s\n' % (pid, total, indicator, action, self.url) + logging.info(msg) + + def send_file(self, fileobj, bufsize=8192, do_flush=False): + """ Send a file to the output stream. + + @param fileobj: a file-like object (supporting read, close) + @param bufsize: size of chunks to read/write + @param do_flush: call flush after writing? + """ + while True: + buf = fileobj.read(bufsize) + if not buf: + break + self.write(buf) + if do_flush: + self.flush() + + def write(self, *data): + """ Write to output stream. """ + raise NotImplementedError + + def encode(self, data): + """ encode data (can be both unicode strings and strings), + preparing for a single write() + """ + wd = [] + for d in data: + try: + if isinstance(d, unicode): + # if we are REALLY sure, we can use "strict" + d = d.encode(config.charset, 'replace') + elif d is None: + continue + wd.append(d) + except UnicodeError: + logging.error("Unicode error on: %s" % repr(d)) + return ''.join(wd) + + def decodePagename(self, name): + """ Decode path, possibly using non ascii characters + + Does not change the name, only decode to Unicode. + + First split the path to pages, then decode each one. This enables + us to decode one page using config.charset and another using + utf-8. This situation happens when you try to add to a name of + an existing page. + + See http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 + + @param name: page name, string + @rtype: unicode + @return decoded page name + """ + # Split to pages and decode each one + pages = name.split('/') + decoded = [] + for page in pages: + # Recode from utf-8 into config charset. If the path + # contains user typed parts, they are encoded using 'utf-8'. + if config.charset != 'utf-8': + try: + page = unicode(page, 'utf-8', 'strict') + # Fit data into config.charset, replacing what won't + # fit. Better have few "?" in the name than crash. + page = page.encode(config.charset, 'replace') + except UnicodeError: + pass + + # Decode from config.charset, replacing what can't be decoded. + page = unicode(page, config.charset, 'replace') + decoded.append(page) + + # Assemble decoded parts + name = u'/'.join(decoded) + return name + + def normalizePagename(self, name): + """ Normalize page name + + Prevent creating page names with invisible characters or funny + whitespace that might confuse the users or abuse the wiki, or + just does not make sense. + + Restrict even more group pages, so they can be used inside acl lines. + + @param name: page name, unicode + @rtype: unicode + @return: decoded and sanitized page name + """ + # Strip invalid characters + name = config.page_invalid_chars_regex.sub(u'', name) + + # Split to pages and normalize each one + pages = name.split(u'/') + normalized = [] + for page in pages: + # Ignore empty or whitespace only pages + if not page or page.isspace(): + continue + + # Cleanup group pages. + # Strip non alpha numeric characters, keep white space + if wikiutil.isGroupPage(self, page): + page = u''.join([c for c in page + if c.isalnum() or c.isspace()]) + + # Normalize white space. Each name can contain multiple + # words separated with only one space. Split handle all + # 30 unicode spaces (isspace() == True) + page = u' '.join(page.split()) + + normalized.append(page) + + # Assemble components into full pagename + name = u'/'.join(normalized) + return name + + def read(self, n): + """ Read n bytes from input stream. """ + raise NotImplementedError + + def flush(self): + """ Flush output stream. """ + pass + + def check_spider(self): + """ check if the user agent for current request is a spider/bot """ + isSpider = False + ua = self.getUserAgent() + if ua and self.cfg.cache.ua_spiders: + isSpider = self.cfg.cache.ua_spiders.search(ua) is not None + return isSpider + + def isForbidden(self): + """ check for web spiders and refuse anything except viewing """ + forbidden = 0 + # we do not have a parsed query string here, so we can just do simple matching + qs = self.query_string + action = self.action + if ((qs != '' or self.request_method != 'GET') and + action != 'rss_rc' and + # allow spiders to get attachments and do 'show' + not (action == 'AttachFile' and 'do=get' in qs) and + action != 'show' and + action != 'sitemap' + ): + forbidden = self.isSpiderAgent + + if not forbidden and self.cfg.hosts_deny: + ip = self.remote_addr + for host in self.cfg.hosts_deny: + if host[-1] == '.' and ip.startswith(host): + forbidden = 1 + logging.debug("hosts_deny (net): %s" % str(forbidden)) + break + if ip == host: + forbidden = 1 + logging.debug("hosts_deny (ip): %s" % str(forbidden)) + break + return forbidden + + def setup_args(self): + """ Return args dict + First, we parse the query string (usually this is used in GET methods, + but TwikiDraw uses ?action=AttachFile&do=savedrawing plus posted stuff). + Second, we update what we got in first step by the stuff we get from + the form (or by a POST). We invoke _setup_args_from_cgi_form to handle + possible file uploads. + """ + args = cgi.parse_qs(self.query_string, keep_blank_values=1) + args = self.decodeArgs(args) + # if we have form data (in a POST), those override the stuff we already have: + if self.request_method == 'POST': + postargs = self._setup_args_from_cgi_form() + args.update(postargs) + return args + + def _setup_args_from_cgi_form(self, form=None): + """ Return args dict from a FieldStorage + + Create the args from a given form. Each key contain a list of values. + This method usually gets overridden in classes derived from this - it + is their task to call this method with an appropriate form parameter. + + @param form: a cgi.FieldStorage + @rtype: dict + @return: dict with form keys, each contains a list of values + """ + args = {} + for key in form: + values = form[key] + if not isinstance(values, list): + values = [values] + fixedResult = [] + for item in values: + if isinstance(item, cgi.FieldStorage) and item.filename: + fixedResult.append(item.file) # open data tempfile + # Save upload file name in a separate key + args[key + '__filename__'] = item.filename + else: + fixedResult.append(item.value) + args[key] = fixedResult + + return self.decodeArgs(args) + + def decodeArgs(self, args): + """ Decode args dict + + Decoding is done in a separate path because it is reused by + other methods and sub classes. + """ + decode = wikiutil.decodeUserInput + result = {} + for key in args: + if key + '__filename__' in args: + # Copy file data as is + result[key] = args[key] + elif key.endswith('__filename__'): + result[key] = decode(args[key], self.decode_charsets) + else: + result[key] = [decode(value, self.decode_charsets) for value in args[key]] + return result + + def getBaseURL(self): + """ Return a fully qualified URL to this script. """ + return self.getQualifiedURL(self.getScriptname()) + + def getQualifiedURL(self, uri=''): + """ Return an absolute URL starting with schema and host. + + Already qualified urls are returned unchanged. + + @param uri: server rooted uri e.g /scriptname/pagename. + It must start with a slash. Must be ascii and url encoded. + """ + import urlparse + scheme = urlparse.urlparse(uri)[0] + if scheme: + return uri + + scheme = ('http', 'https')[self.is_ssl] + result = "%s://%s%s" % (scheme, self.http_host, uri) + + # This might break qualified urls in redirects! + # e.g. mapping 'http://netloc' -> '/' + return wikiutil.mapURL(self, result) + + def getUserAgent(self): + """ Get the user agent. """ + return self.http_user_agent + + def makeForbidden(self, resultcode, msg): + statusmsg = { + 401: 'Authorization required', + 403: 'FORBIDDEN', + 404: 'Not found', + 503: 'Service unavailable', + } + headers = [ + 'Status: %d %s' % (resultcode, statusmsg[resultcode]), + 'Content-Type: text/plain; charset=utf-8' + ] + # when surge protection triggered, tell bots to come back later... + if resultcode == 503: + headers.append('Retry-After: %d' % self.cfg.surge_lockout_time) + self.emit_http_headers(headers) + self.write(msg) + self.forbidden = True + + def makeForbidden403(self): + self.makeForbidden(403, 'You are not allowed to access this!\r\n') + + def makeUnavailable503(self): + self.makeForbidden(503, "Warning:\r\n" + "You triggered the wiki's surge protection by doing too many requests in a short time.\r\n" + "Please make a short break reading the stuff you already got.\r\n" + "When you restart doing requests AFTER that, slow down or you might get locked out for a longer time!\r\n") + + def initTheme(self): + """ Set theme - forced theme, user theme or wiki default """ + if self.cfg.theme_force: + theme_name = self.cfg.theme_default + else: + theme_name = self.user.theme_name + self.loadTheme(theme_name) + + def _try_redirect_spaces_page(self, pagename): + if '_' in pagename and not self.page.exists(): + pname = pagename.replace('_', ' ') + pg = Page(self, pname) + if pg.exists(): + url = pg.url(self) + self.http_redirect(url) + return True + return False + + def run(self): + # Exit now if __init__ failed or request is forbidden + if self.failed or self.forbidden or self._auth_redirected: + # Don't sleep() here, it binds too much of our resources! + return self.finish() + + _ = self.getText + self.clock.start('run') + + self.initTheme() + + action_name = self.action + if self.cfg.log_timing: + self.timing_log(True, action_name) + + if action_name == 'xmlrpc': + from MoinMoin import xmlrpc + if self.query_string == 'action=xmlrpc': + xmlrpc.xmlrpc(self) + elif self.query_string == 'action=xmlrpc2': + xmlrpc.xmlrpc2(self) + if self.cfg.log_timing: + self.timing_log(False, action_name) + return self.finish() + + # parse request data + try: + # The last component in path_info is the page name, if any + path = self.getPathinfo() + + # we can have all action URLs like this: /action/ActionName/PageName?action=ActionName&... + # this is just for robots.txt being able to forbid them for crawlers + prefix = self.cfg.url_prefix_action + if prefix is not None: + prefix = '/%s/' % prefix # e.g. '/action/' + if path.startswith(prefix): + # remove prefix and action name + path = path[len(prefix):] + action, path = (path.split('/', 1) + ['', ''])[:2] + path = '/' + path + + if path.startswith('/'): + pagename = self.normalizePagename(path) + else: + pagename = None + + # need to inform caches that content changes based on: + # * cookie (even if we aren't sending one now) + # * User-Agent (because a bot might be denied and get no content) + # * Accept-Language (except if moin is told to ignore browser language) + if self.cfg.language_ignore_browser: + self.setHttpHeader("Vary: Cookie,User-Agent") + else: + self.setHttpHeader("Vary: Cookie,User-Agent,Accept-Language") + + # Handle request. We have these options: + # 1. jump to page where user left off + if not pagename and self.user.remember_last_visit and action_name == 'show': + pagetrail = self.user.getTrail() + if pagetrail: + # Redirect to last page visited + last_visited = pagetrail[-1] + wikiname, pagename = wikiutil.split_interwiki(last_visited) + if wikiname != 'Self': + wikitag, wikiurl, wikitail, error = wikiutil.resolve_interwiki(self, wikiname, pagename) + url = wikiurl + wikiutil.quoteWikinameURL(wikitail) + else: + url = Page(self, pagename).url(self) + else: + # Or to localized FrontPage + url = wikiutil.getFrontPage(self).url(self) + self.http_redirect(url) + return self.finish() + + # 2. handle action + else: + # pagename could be empty after normalization e.g. '///' -> '' + # Use localized FrontPage if pagename is empty + if not pagename: + self.page = wikiutil.getFrontPage(self) + else: + self.page = Page(self, pagename) + if self._try_redirect_spaces_page(pagename): + return self.finish() + + msg = None + # Complain about unknown actions + if not action_name in self.getKnownActions(): + msg = _("Unknown action %(action_name)s.") % { + 'action_name': wikiutil.escape(action_name), } + + # Disallow non available actions + elif action_name[0].isupper() and not action_name in self.getAvailableActions(self.page): + msg = _("You are not allowed to do %(action_name)s on this page.") % { + 'action_name': wikiutil.escape(action_name), } + if not self.user.valid: + # Suggest non valid user to login + msg += " " + _("Login and try again.") + + if msg: + self.theme.add_msg(msg, "error") + self.page.send_page() + # Try action + else: + from MoinMoin import action + handler = action.getHandler(self, action_name) + if handler is None: + msg = _("You are not allowed to do %(action_name)s on this page.") % { + 'action_name': wikiutil.escape(action_name), } + if not self.user.valid: + # Suggest non valid user to login + msg += " " + _("Login and try again.") + self.theme.add_msg(msg, "error") + self.page.send_page() + else: + handler(self.page.page_name, self) + + # every action that didn't use to raise MoinMoinFinish must call this now: + # self.theme.send_closing_html() + + except MoinMoinFinish: + pass + except RemoteClosedConnection: + # at least clean up + pass + except SystemExit: + raise # fcgi uses this to terminate a thread + except Exception, err: + try: + # nothing we can do about further failures! + self.fail(err) + except: + pass + + if self.cfg.log_timing: + self.timing_log(False, action_name) + + return self.finish() + + def http_redirect(self, url): + """ Redirect to a fully qualified, or server-rooted URL + + @param url: relative or absolute url, ascii using url encoding. + """ + url = self.getQualifiedURL(url) + self.emit_http_headers(["Status: 302 Found", "Location: %s" % url]) + + def emit_http_headers(self, more_headers=[], testing=False): + """ emit http headers after some preprocessing / checking + + Makes sure we only emit headers once. + Encodes to ASCII if it gets unicode headers. + Make sure we have exactly one Content-Type and one Status header. + Make sure Status header string begins with a integer number. + + For emitting (testing == False), it calls the server specific + _emit_http_headers method. For testing, it returns the result. + + @param more_headers: list of additional header strings + @param testing: set to True by test code + """ + user_headers = self.user_headers + self.user_headers = [] + tracehere = ''.join(traceback.format_stack()[:-1]) + all_headers = [(hdr, tracehere) for hdr in more_headers] + user_headers + + if self.sent_headers: + # Send headers only once + logging.error("Attempt to send headers twice!") + logging.error("First attempt:\n%s" % self.sent_headers) + logging.error("Second attempt:\n%s" % tracehere) + raise HeadersAlreadySentException("emit_http_headers has already been called before!") + else: + self.sent_headers = tracehere + + # assemble dict of http headers + headers = {} + traces = {} + for header, trace in all_headers: + if isinstance(header, unicode): + header = header.encode('ascii') + key, value = header.split(':', 1) + lkey = key.lower() + value = value.lstrip() + if lkey in headers: + if lkey in ['vary', 'cache-control', 'content-language', ]: + # these headers (list might be incomplete) allow multiple values + # that can be merged into a comma separated list + headers[lkey] = headers[lkey][0], '%s, %s' % (headers[lkey][1], value) + traces[lkey] = trace + else: + logging.warning("Duplicate http header: %r (ignored)" % header) + logging.warning("Header added first at:\n%s" % traces[lkey]) + logging.warning("Header added again at:\n%s" % trace) + else: + headers[lkey] = (key, value) + traces[lkey] = trace + + if 'content-type' not in headers: + headers['content-type'] = ('Content-type', 'text/html; charset=%s' % config.charset) + + if 'status' not in headers: + headers['status'] = ('Status', '200 OK') + else: + # check if we got a valid status + try: + status = headers['status'][1] + int(status.split(' ', 1)[0]) + except: + logging.error("emit_http_headers called with invalid header Status: %r" % status) + headers['status'] = ('Status', '500 Server Error - invalid status header') + + header_format = '%s: %s' + st_header = header_format % headers['status'] + del headers['status'] + ct_header = header_format % headers['content-type'] + del headers['content-type'] + + headers = [header_format % kv_tuple for kv_tuple in headers.values()] # make a list of strings + headers = [st_header, ct_header] + headers # do NOT change order! + if not testing: + self._emit_http_headers(headers) + else: + return headers + + def _emit_http_headers(self, headers): + """ server specific method to emit http headers. + + @param headers: a list of http header strings in this FIXED order: + 1. status header (always present and valid, e.g. "200 OK") + 2. content type header (always present) + 3. other headers (optional) + """ + raise NotImplementedError + + def setHttpHeader(self, header): + """ Save header for later send. + + Attention: although we use a list here, some implementations use a dict, + thus multiple calls with the same header type do NOT work in the end! + """ + # save a traceback with the header for duplicate bug reporting + self.user_headers.append((header, ''.join(traceback.format_stack()[:-1]))) + + def fail(self, err): + """ Fail when we can't continue + + Send 500 status code with the error name. Reference: + http://www.w3.org/Protocols/rfc2616/rfc2616-sec6.html#sec6.1.1 + + Log the error, then let failure module handle it. + + @param err: Exception instance or subclass. + """ + self.failed = 1 # save state for self.run() + # we should not generate the headers two times + if not self.sent_headers: + self.emit_http_headers(['Status: 500 MoinMoin Internal Error']) + from MoinMoin import failure + failure.handle(self, err) + + def make_unique_id(self, base, namespace=None): + """ + Generates a unique ID using a given base name. Appends a running count to the base. + + Needs to stay deterministic! + + @param base: the base of the id + @type base: unicode + @param namespace: the namespace for the ID, used when including pages + + @returns: a unique (relatively to the namespace) ID + @rtype: unicode + """ + if not isinstance(base, unicode): + base = unicode(str(base), 'ascii', 'ignore') + if not namespace in self._page_ids: + self._page_ids[namespace] = {} + count = self._page_ids[namespace].get(base, -1) + 1 + self._page_ids[namespace][base] = count + if not count: + return base + return u'%s-%d' % (base, count) + + def init_unique_ids(self): + '''Initialise everything needed for unique IDs''' + self._unique_id_stack = [] + self._page_ids = {None: {}} + self.include_id = None + self._include_stack = [] + + def push_unique_ids(self): + ''' + Used by the TOC macro, this ensures that the ID namespaces + are reset to the status when the current include started. + This guarantees that doing the ID enumeration twice results + in the same results, on any level. + ''' + self._unique_id_stack.append((self._page_ids, self.include_id)) + self.include_id, pids = self._include_stack[-1] + # make a copy of the containing ID namespaces, that is to say + # go back to the level we had at the previous include + self._page_ids = {} + for namespace in pids: + self._page_ids[namespace] = pids[namespace].copy() + + def pop_unique_ids(self): + ''' + Used by the TOC macro to reset the ID namespaces after + having parsed the page for TOC generation and after + printing the TOC. + ''' + self._page_ids, self.include_id = self._unique_id_stack.pop() + + def begin_include(self, base): + ''' + Called by the formatter when a document begins, which means + that include causing nested documents gives us an include + stack in self._include_id_stack. + ''' + pids = {} + for namespace in self._page_ids: + pids[namespace] = self._page_ids[namespace].copy() + self._include_stack.append((self.include_id, pids)) + self.include_id = self.make_unique_id(base) + # if it's the page name then set it to None so we don't + # prepend anything to IDs, but otherwise keep it. + if self.page and self.page.page_name == self.include_id: + self.include_id = None + + def end_include(self): + ''' + Called by the formatter when a document ends, restores + the current include ID to the previous one and discards + the page IDs state we kept around for push_unique_ids(). + ''' + self.include_id, pids = self._include_stack.pop() + + def httpDate(self, when=None, rfc='1123'): + """ Returns http date string, according to rfc2068 + + See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-3.3 + + A http 1.1 server should use only rfc1123 date, but cookie's + "expires" field should use the older obsolete rfc850 date. + + Note: we can not use strftime() because that honors the locale + and rfc2822 requires english day and month names. + + We can not use email.Utils.formatdate because it formats the + zone as '-0000' instead of 'GMT', and creates only rfc1123 + dates. This is a modified version of email.Utils.formatdate + from Python 2.4. + + @param when: seconds from epoch, as returned by time.time() + @param rfc: conform to rfc ('1123' or '850') + @rtype: string + @return: http date conforming to rfc1123 or rfc850 + """ + if when is None: + when = time.time() + now = time.gmtime(when) + month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', + 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now.tm_mon - 1] + if rfc == '1123': + day = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now.tm_wday] + date = '%02d %s %04d' % (now.tm_mday, month, now.tm_year) + elif rfc == '850': + day = ["Monday", "Tuesday", "Wednesday", "Thursday", + "Friday", "Saturday", "Sunday"][now.tm_wday] + date = '%02d-%s-%s' % (now.tm_mday, month, str(now.tm_year)[-2:]) + else: + raise ValueError("Invalid rfc value: %s" % rfc) + + return '%s, %s %02d:%02d:%02d GMT' % (day, date, now.tm_hour, + now.tm_min, now.tm_sec) + + def disableHttpCaching(self, level=1): + """ Prevent caching of pages that should not be cached. + + level == 1 means disabling caching when we have a cookie set + level == 2 means completely disabling caching (used by Page*Editor) + + This is important to prevent caches break acl by providing one + user pages meant to be seen only by another user, when both users + share the same caching proxy. + + AVOID using no-cache and no-store for attachments as it is completely broken on IE! + + Details: http://support.microsoft.com/support/kb/articles/Q234/0/67.ASP + """ + if level <= self.http_caching_disabled: + return # only make caching stricter + + if level == 1: + # Set Cache control header for http 1.1 caches + # See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2109.html#sec-4.2.3 + # and http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-14.9 + #self.setHttpHeader('Cache-Control: no-cache="set-cookie", private, max-age=0') + self.setHttpHeader('Cache-Control: private, must-revalidate, max-age=10') + elif level == 2: + self.setHttpHeader('Cache-Control: no-cache') + + # only do this once to avoid 'duplicate header' warnings + # (in case the caching disabling is being made stricter) + if not self.http_caching_disabled: + # Set Expires for http 1.0 caches (does not support Cache-Control) + when = time.time() - (3600 * 24 * 365) + self.setHttpHeader('Expires: %s' % self.httpDate(when=when)) + + # Set Pragma for http 1.0 caches + # See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-14.32 + # DISABLED for level == 1 to fix IE https file attachment downloading trouble. + if level == 2: + self.setHttpHeader('Pragma: no-cache') + + self.http_caching_disabled = level + + def finish(self): + """ General cleanup on end of request + + Delete circular references - all object that we create using self.name = class(self). + This helps Python to collect these objects and keep our memory footprint lower. + """ + for method in self._finishers: + method(self) + # only execute finishers once + self._finishers = [] + + for attr_name in [ + 'editlog', # avoid leaking file handles for open edit-log + 'theme', + 'dicts', + 'user', + 'rootpage', + 'page', + 'html_formatter', + 'formatter', + #'cfg', -- do NOT delattr cfg - it causes problems in the xapian indexing thread + ]: + try: + delattr(self, attr_name) + except: + pass + + def add_finisher(self, method): + self._finishers.append(method) + + # Debug ------------------------------------------------------------ + + def debugEnvironment(self, env): + """ Environment debugging aid """ + # Keep this one name per line so its easy to comment stuff + names = [ +# 'http_accept_language', +# 'http_host', +# 'http_referer', +# 'http_user_agent', +# 'is_ssl', + 'path_info', + 'query_string', +# 'remote_addr', + 'request_method', +# 'request_uri', +# 'saved_cookie', + 'script_name', +# 'server_name', +# 'server_port', + ] + names.sort() + attributes = [] + for name in names: + attributes.append(' %s = %r\n' % (name, getattr(self, name, None))) + attributes = ''.join(attributes) + + environment = [] + names = env.keys() + names.sort() + for key in names: + environment.append(' %s = %r\n' % (key, env[key])) + environment = ''.join(environment) + + data = '\nRequest Attributes\n%s\nEnvironment\n%s' % (attributes, environment) + f = open('/tmp/env.log', 'a') + try: + f.write(data) + finally: + f.close() + diff --git a/wiki-lenny/share/request.__init__.py b/wiki-lenny/share/request.__init__.py new file mode 100644 index 00000000..43c59778 --- /dev/null +++ b/wiki-lenny/share/request.__init__.py @@ -0,0 +1,1684 @@ +# -*- coding: iso-8859-1 -*- +""" + MoinMoin - RequestBase Implementation + + @copyright: 2001-2003 Juergen Hermann , + 2003-2008 MoinMoin:ThomasWaldmann + @license: GNU GPL, see COPYING for details. +""" + +# Support for remote IP address detection when using (reverse) proxy (or even proxies). +# If you exactly KNOW which (reverse) proxies you can trust, put them into the list +# below, so we can determine the "outside" IP as your trusted proxies see it. + +proxies_trusted = [] # trust noone! +#proxies_trusted = ['127.0.0.1', ] # can be a list of multiple IPs + +from MoinMoin import log +logging = log.getLogger(__name__) + +def find_remote_addr(addrs): + """ Find the last remote IP address before it hits our reverse proxies. + The LAST address in the list is the remote IP as detected by the server + (not taken from some x-forwarded-for header). + The FIRST address in the list might be the client's IP - if noone cheats + and everyone supports x-f-f header. + + See http://bob.pythonmac.org/archives/2005/09/23/apache-x-forwarded-for-caveat/ + + For debug loglevel, we log all . + + TODO: refactor request code to first do some basic IP init, then load configuration, + TODO: then do proxy processing. + TODO: add wikiconfig configurability for proxies_trusted + TODO: later, make it possible to put multipe remote IP addrs into edit-log + """ + logging.debug("request.find_remote_addr: addrs == %r" % addrs) + if proxies_trusted: + result = [addr for addr in addrs if addr not in proxies_trusted] + if result: + return result[-1] # last IP before it hit our trusted (reverse) proxies + return addrs[-1] # this is a safe remote_addr, not taken from x-f-f header + + +import os, re, time, sys, cgi, StringIO +import Cookie +import traceback + +from MoinMoin.Page import Page +from MoinMoin import config, wikiutil, user, caching, error +from MoinMoin.config import multiconfig +from MoinMoin.support.python_compatibility import set +from MoinMoin.util import IsWin9x +from MoinMoin.util.clock import Clock +from MoinMoin import auth +from urllib import quote, quote_plus + +# umask setting -------------------------------------------------------- +def set_umask(new_mask=0777^config.umask): + """ Set the OS umask value (and ignore potential failures on OSes where + this is not supported). + Default: the bitwise inverted value of config.umask + """ + try: + old_mask = os.umask(new_mask) + except: + # maybe we are on win32? + pass + +# We do this at least once per Python process, when request is imported. +# If other software parts (like twistd's daemonize() function) set an +# unwanted umask, we have to call this again to set the correct one: +set_umask() + +# Exceptions ----------------------------------------------------------- + +class MoinMoinFinish(Exception): + """ Raised to jump directly to end of run() function, where finish is called """ + + +class HeadersAlreadySentException(Exception): + """ Is raised if the headers were already sent when emit_http_headers is called.""" + + +class RemoteClosedConnection(Exception): + """ Remote end closed connection during request """ + +# Utilities + +def cgiMetaVariable(header, scheme='http'): + """ Return CGI meta variable for header name + + e.g 'User-Agent' -> 'HTTP_USER_AGENT' + See http://www.faqs.org/rfcs/rfc3875.html section 4.1.18 + """ + var = '%s_%s' % (scheme, header) + return var.upper().replace('-', '_') + + +# Request Base ---------------------------------------------------------- + +class RequestBase(object): + """ A collection for all data associated with ONE request. """ + + # Defaults (used by sub classes) + http_accept_language = 'en' + server_name = 'localhost' + server_port = '80' + + # Extra headers we support. Both standalone and twisted store + # headers as lowercase. + moin_location = 'x-moin-location' + proxy_host = 'x-forwarded-host' # original host: header as seen by the proxy (e.g. wiki.example.org) + proxy_xff = 'x-forwarded-for' # list of original remote_addrs as seen by the proxies (e.g. ,,,...) + + def __init__(self, properties={}): + + # twistd's daemonize() overrides our umask, so we reset it here every + # request. we do it for all request types to avoid similar problems. + set_umask() + + self._finishers = [] + + self._auth_redirected = False + + # Decode values collected by sub classes + self.path_info = self.decodePagename(self.path_info) + + self.failed = 0 + self._available_actions = None + self._known_actions = None + + # Pages meta data that we collect in one request + self.pages = {} + + self.sent_headers = None + self.user_headers = [] + self.cacheable = 0 # may this output get cached by http proxies/caches? + self.http_caching_disabled = 0 # see disableHttpCaching() + self.page = None + self._dicts = None + + # session handling. users cannot rely on a session being + # created, but we should always set request.session + self.session = {} + + # setuid handling requires an attribute in the request + # that stores the real user + self._setuid_real_user = None + + # Check for dumb proxy requests + # TODO relying on request_uri will not work on all servers, especially + # not on external non-Apache servers + self.forbidden = False + if self.request_uri.startswith('http://'): + self.makeForbidden403() + + # Init + else: + self.writestack = [] + self.clock = Clock() + self.clock.start('total') + self.clock.start('base__init__') + # order is important here! + self.__dict__.update(properties) + try: + self._load_multi_cfg() + except error.NoConfigMatchedError: + self.makeForbidden(404, 'No wiki configuration matching the URL found!\r\n') + return + + self.isSpiderAgent = self.check_spider() + + # Set decode charsets. Input from the user is always in + # config.charset, which is the page charsets. Except + # path_info, which may use utf-8, and handled by decodePagename. + self.decode_charsets = [config.charset] + + if self.query_string.startswith('action=xmlrpc'): + self.args = {} + self.form = {} + self.action = 'xmlrpc' + self.rev = None + else: + try: + self.args = self.form = self.setup_args() + except UnicodeError: + self.makeForbidden(403, "The input you sent could not be understood.") + return + self.action = self.form.get('action', ['show'])[0] + try: + self.rev = int(self.form['rev'][0]) + except: + self.rev = None + + from MoinMoin.Page import RootPage + self.rootpage = RootPage(self) + + from MoinMoin.logfile import editlog + self.editlog = editlog.EditLog(self) + + from MoinMoin import i18n + self.i18n = i18n + i18n.i18n_init(self) + + # authentication might require translated forms, so + # have a try at guessing the language from the browser + lang = i18n.requestLanguage(self, try_user=False) + self.getText = lambda text, i18n=self.i18n, request=self, lang=lang, **kw: i18n.getText(text, request, lang, **kw) + + # session handler start, auth + self.parse_cookie() + user_obj = self.cfg.session_handler.start(self, self.cfg.session_id_handler) + shfinisher = lambda request: self.cfg.session_handler.finish(request, request.user, + self.cfg.session_id_handler) + self.add_finisher(shfinisher) + # set self.user even if _handle_auth_form raises an Exception + self.user = None + self.user = self._handle_auth_form(user_obj) + del user_obj + self.cfg.session_handler.after_auth(self, self.cfg.session_id_handler, self.user) + if not self.user: + self.user = user.User(self, auth_method='request:invalid') + + # setuid handling, check isSuperUser() because the user + # might have lost the permission between requests + if 'setuid' in self.session and self.user.isSuperUser(): + self._setuid_real_user = self.user + uid = self.session['setuid'] + self.user = user.User(self, uid, auth_method='setuid') + # set valid to True so superusers can even switch + # to disable accounts + self.user.valid = True + + if self.action != 'xmlrpc': + if not self.forbidden and self.isForbidden(): + self.makeForbidden403() + if not self.forbidden and self.surge_protect(): + self.makeUnavailable503() + + self.pragma = {} + self.mode_getpagelinks = 0 # is > 0 as long as we are in a getPageLinks call + self.parsePageLinks_running = {} # avoid infinite recursion by remembering what we are already running + + self.lang = i18n.requestLanguage(self) + # Language for content. Page content should use the wiki default lang, + # but generated content like search results should use the user language. + self.content_lang = self.cfg.language_default + self.getText = lambda text, i18n=self.i18n, request=self, lang=self.lang, **kv: i18n.getText(text, request, lang, **kv) + + self.reset() + + from MoinMoin.formatter.text_html import Formatter + self.html_formatter = Formatter(self) + self.formatter = self.html_formatter + + self.clock.stop('base__init__') + + def surge_protect(self, kick_him=False): + """ check if someone requesting too much from us, + if kick_him is True, we unconditionally blacklist the current user/ip + """ + limits = self.cfg.surge_action_limits + if not limits: + return False + + if self.remote_addr.startswith('127.'): # localnet + return False + + validuser = self.user.valid + current_id = validuser and self.user.name or self.remote_addr + current_action = self.action + + default_limit = limits.get('default', (30, 60)) + + now = int(time.time()) + surgedict = {} + surge_detected = False + + try: + # if we have common farm users, we could also use scope='farm': + cache = caching.CacheEntry(self, 'surgeprotect', 'surge-log', scope='wiki', use_encode=True) + if cache.exists(): + data = cache.content() + data = data.split("\n") + for line in data: + try: + id, t, action, surge_indicator = line.split("\t") + t = int(t) + maxnum, dt = limits.get(action, default_limit) + if t >= now - dt: + events = surgedict.setdefault(id, {}) + timestamps = events.setdefault(action, []) + timestamps.append((t, surge_indicator)) + except StandardError: + pass + + maxnum, dt = limits.get(current_action, default_limit) + events = surgedict.setdefault(current_id, {}) + timestamps = events.setdefault(current_action, []) + surge_detected = len(timestamps) > maxnum + + surge_indicator = surge_detected and "!" or "" + timestamps.append((now, surge_indicator)) + if surge_detected: + if len(timestamps) < maxnum * 2: + timestamps.append((now + self.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out + + if current_action not in ('cache', 'AttachFile', ): # don't add cache/AttachFile accesses to all or picture galleries will trigger SP + current_action = 'all' # put a total limit on user's requests + maxnum, dt = limits.get(current_action, default_limit) + events = surgedict.setdefault(current_id, {}) + timestamps = events.setdefault(current_action, []) + + if kick_him: # ban this guy, NOW + timestamps.extend([(now + self.cfg.surge_lockout_time, "!")] * (2 * maxnum)) + + surge_detected = surge_detected or len(timestamps) > maxnum + + surge_indicator = surge_detected and "!" or "" + timestamps.append((now, surge_indicator)) + if surge_detected: + if len(timestamps) < maxnum * 2: + timestamps.append((now + self.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out + + data = [] + for id, events in surgedict.items(): + for action, timestamps in events.items(): + for t, surge_indicator in timestamps: + data.append("%s\t%d\t%s\t%s" % (id, t, action, surge_indicator)) + data = "\n".join(data) + cache.update(data) + except StandardError: + pass + + if surge_detected and validuser and self.user.auth_method in self.cfg.auth_methods_trusted: + logging.info("Trusted user %s would have triggered surge protection if not trusted." % self.user.name) + return False # do not subject trusted users to surge protection + + return surge_detected + + def getDicts(self): + """ Lazy initialize the dicts on the first access """ + if self._dicts is None: + from MoinMoin import wikidicts + dicts = wikidicts.GroupDict(self) + dicts.load_dicts() + self._dicts = dicts + return self._dicts + + def delDicts(self): + """ Delete the dicts, used by some tests """ + del self._dicts + self._dicts = None + + dicts = property(getDicts, None, delDicts) + + def _load_multi_cfg(self): + # protect against calling multiple times + if not hasattr(self, 'cfg'): + self.clock.start('load_multi_cfg') + self.cfg = multiconfig.getConfig(self.url) + self.clock.stop('load_multi_cfg') + + def setAcceptedCharsets(self, accept_charset): + """ Set accepted_charsets by parsing accept-charset header + + Set self.accepted_charsets to an ordered list based on http_accept_charset. + + Reference: http://www.w3.org/Protocols/rfc2616/rfc2616.txt + + TODO: currently no code use this value. + + @param accept_charset: accept-charset header + """ + charsets = [] + if accept_charset: + accept_charset = accept_charset.lower() + # Add iso-8859-1 if needed + if (not '*' in accept_charset and + 'iso-8859-1' not in accept_charset): + accept_charset += ',iso-8859-1' + + # Make a list, sorted by quality value, using Schwartzian Transform + # Create list of tuples (value, name) , sort, extract names + for item in accept_charset.split(','): + if ';' in item: + name, qval = item.split(';') + qval = 1.0 - float(qval.split('=')[1]) + else: + name, qval = item, 0 + charsets.append((qval, name)) + charsets.sort() + # Remove *, its not clear what we should do with it later + charsets = [name for qval, name in charsets if name != '*'] + + self.accepted_charsets = charsets + + def _setup_vars_from_std_env(self, env): + """ Set common request variables from CGI environment + + Parse a standard CGI environment as created by common web servers. + Reference: http://www.faqs.org/rfcs/rfc3875.html + + @param env: dict like object containing cgi meta variables + """ + # Values we can just copy + self.env = env + self.http_accept_language = env.get('HTTP_ACCEPT_LANGUAGE', self.http_accept_language) + self.server_name = env.get('SERVER_NAME', self.server_name) + self.server_port = env.get('SERVER_PORT', self.server_port) + self.saved_cookie = env.get('HTTP_COOKIE', '') + self.script_name = env.get('SCRIPT_NAME', '') + self.path_info = env.get('PATH_INFO', '') + self.query_string = env.get('QUERY_STRING', '') + self.request_method = env.get('REQUEST_METHOD', None) + self.remote_addr = env.get('REMOTE_ADDR', '') + self.http_user_agent = env.get('HTTP_USER_AGENT', '') + try: + self.content_length = int(env.get('CONTENT_LENGTH')) + except (TypeError, ValueError): + self.content_length = None + self.if_modified_since = env.get('If-modified-since') or env.get(cgiMetaVariable('If-modified-since')) + self.if_none_match = env.get('If-none-match') or env.get(cgiMetaVariable('If-none-match')) + + # REQUEST_URI is not part of CGI spec, but an addition of Apache. + self.request_uri = env.get('REQUEST_URI', '') + + # Values that need more work + self.setHttpReferer(env.get('HTTP_REFERER')) + self.setIsSSL(env) + self.setHost(env.get('HTTP_HOST')) + self.fixURI(env) + + self.setURL(env) + #self.debugEnvironment(env) + + def setHttpReferer(self, referer): + """ Set http_referer, making sure its ascii + + IE might send non-ascii value. + """ + value = '' + if referer: + value = unicode(referer, 'ascii', 'replace') + value = value.encode('ascii', 'replace') + self.http_referer = value + + def setIsSSL(self, env): + """ Set is_ssl + + @param env: dict like object containing cgi meta variables + """ + self.is_ssl = bool(env.get('SSL_PROTOCOL') or + env.get('SSL_PROTOCOL_VERSION') or + env.get('HTTPS') == 'on') + + def setHost(self, host=None): + """ Set http_host + + Create from server name and port if missing. Previous code + default to localhost. + """ + if not host: + port = '' + standardPort = ('80', '443')[self.is_ssl] + if self.server_port != standardPort: + port = ':' + self.server_port + host = self.server_name + port + self.http_host = host + + def fixURI(self, env): + """ Fix problems with script_name and path_info + + Handle the strange charset semantics on Windows and other non + posix systems. path_info is transformed into the system code + page by the web server. Additionally, paths containing dots let + most webservers choke. + + Broken environment variables in different environments: + path_info script_name + Apache1 X X PI does not contain dots + Apache2 X X PI is not encoded correctly + IIS X X path_info include script_name + Other ? - ? := Possible and even RFC-compatible. + - := Hopefully not. + + @param env: dict like object containing cgi meta variables + """ + # Fix the script_name when using Apache on Windows. + server_software = env.get('SERVER_SOFTWARE', '') + if os.name == 'nt' and 'Apache/' in server_software: + # Removes elements ending in '.' from the path. + self.script_name = '/'.join([x for x in self.script_name.split('/') + if not x.endswith('.')]) + + # Fix path_info + if os.name != 'posix' and self.request_uri != '': + # Try to recreate path_info from request_uri. + import urlparse + scriptAndPath = urlparse.urlparse(self.request_uri)[2] + path = scriptAndPath.replace(self.script_name, '', 1) + self.path_info = wikiutil.url_unquote(path, want_unicode=False) + elif os.name == 'nt': + # Recode path_info to utf-8 + path = wikiutil.decodeWindowsPath(self.path_info) + self.path_info = path.encode("utf-8") + + # Fix bug in IIS/4.0 when path_info contain script_name + if self.path_info.startswith(self.script_name): + self.path_info = self.path_info[len(self.script_name):] + + def setURL(self, env): + """ Set url, used to locate wiki config + + This is the place to manipulate url parts as needed. + + @param env: dict like object containing cgi meta variables or http headers. + """ + # proxy support + self.rewriteRemoteAddr(env) + self.rewriteHost(env) + + self.rewriteURI(env) + + if not self.request_uri: + self.request_uri = self.makeURI() + self.url = self.http_host + self.request_uri + + def rewriteHost(self, env): + """ Rewrite http_host transparently + + Get the proxy host using 'X-Forwarded-Host' header, added by + Apache 2 and other proxy software. + + TODO: Will not work for Apache 1 or others that don't add this header. + + TODO: If we want to add an option to disable this feature it + should be in the server script, because the config is not + loaded at this point, and must be loaded after url is set. + + @param env: dict like object containing cgi meta variables or http headers. + """ + proxy_host = (env.get(self.proxy_host) or + env.get(cgiMetaVariable(self.proxy_host))) + if proxy_host: + self.http_host = proxy_host + + def rewriteRemoteAddr(self, env): + """ Rewrite remote_addr transparently + + Get the proxy remote addr using 'X-Forwarded-For' header, added by + Apache 2 and other proxy software. + + TODO: Will not work for Apache 1 or others that don't add this header. + + TODO: If we want to add an option to disable this feature it + should be in the server script, because the config is not + loaded at this point, and must be loaded after url is set. + + @param env: dict like object containing cgi meta variables or http headers. + """ + xff = (env.get(self.proxy_xff) or + env.get(cgiMetaVariable(self.proxy_xff))) + if xff: + xff = [addr.strip() for addr in xff.split(',')] + xff.append(self.remote_addr) + self.remote_addr = find_remote_addr(xff) + + def rewriteURI(self, env): + """ Rewrite request_uri, script_name and path_info transparently + + Useful when running mod python or when running behind a proxy, + e.g run on localhost:8000/ and serve as example.com/wiki/. + + Uses private 'X-Moin-Location' header to set the script name. + This allow setting the script name when using Apache 2 + directive:: + + + RequestHeader set X-Moin-Location /my/wiki/ + + + TODO: does not work for Apache 1 and others that do not allow + setting custom headers per request. + + @param env: dict like object containing cgi meta variables or http headers. + """ + location = (env.get(self.moin_location) or + env.get(cgiMetaVariable(self.moin_location))) + if location is None: + return + + scriptAndPath = self.script_name + self.path_info + location = location.rstrip('/') + self.script_name = location + + # This may happen when using mod_python + if scriptAndPath.startswith(location): + self.path_info = scriptAndPath[len(location):] + + # Recreate the URI from the modified parts + if self.request_uri: + self.request_uri = self.makeURI() + + def makeURI(self): + """ Return uri created from uri parts """ + uri = self.script_name + wikiutil.url_quote(self.path_info) + if self.query_string: + uri += '?' + self.query_string + return uri + + def splitURI(self, uri): + """ Return path and query splited from uri + + Just like CGI environment, the path is unquoted, the query is not. + """ + if '?' in uri: + path, query = uri.split('?', 1) + else: + path, query = uri, '' + return wikiutil.url_unquote(path, want_unicode=False), query + + def _handle_auth_form(self, user_obj): + username = self.form.get('name', [None])[0] + password = self.form.get('password', [None])[0] + oid = self.form.get('openid_identifier', [None])[0] + login = 'login' in self.form + logout = 'logout' in self.form + stage = self.form.get('stage', [None])[0] + return self.handle_auth(user_obj, attended=True, username=username, + password=password, login=login, logout=logout, + stage=stage, openid_identifier=oid) + + def handle_auth(self, user_obj, attended=False, **kw): + username = kw.get('username') + password = kw.get('password') + oid = kw.get('openid_identifier') + login = kw.get('login') + logout = kw.get('logout') + stage = kw.get('stage') + extra = { + 'cookie': self.cookie, + } + if login: + extra['attended'] = attended + extra['username'] = username + extra['password'] = password + extra['openid_identifier'] = oid + if stage: + extra['multistage'] = True + login_msgs = [] + self._login_multistage = None + + if logout and 'setuid' in self.session: + del self.session['setuid'] + return user_obj + + for authmethod in self.cfg.auth: + if logout: + user_obj, cont = authmethod.logout(self, user_obj, **extra) + elif login: + if stage and authmethod.name != stage: + continue + ret = authmethod.login(self, user_obj, **extra) + user_obj = ret.user_obj + cont = ret.continue_flag + if stage: + stage = None + del extra['multistage'] + if ret.multistage: + self._login_multistage = ret.multistage + self._login_multistage_name = authmethod.name + return user_obj + if ret.redirect_to: + nextstage = auth.get_multistage_continuation_url(self, authmethod.name) + url = ret.redirect_to + url = url.replace('%return_form', quote_plus(nextstage)) + url = url.replace('%return', quote(nextstage)) + self._auth_redirected = True + self.http_redirect(url) + return user_obj + msg = ret.message + if msg and not msg in login_msgs: + login_msgs.append(msg) + else: + user_obj, cont = authmethod.request(self, user_obj, **extra) + if not cont: + break + + self._login_messages = login_msgs + return user_obj + + def handle_jid_auth(self, jid): + return user.get_by_jabber_id(self, jid) + + def parse_cookie(self): + try: + self.cookie = Cookie.SimpleCookie(self.saved_cookie) + except Cookie.CookieError: + self.cookie = None + + def reset(self): + """ Reset request state. + + Called after saving a page, before serving the updated + page. Solves some practical problems with request state + modified during saving. + + """ + # This is the content language and has nothing to do with + # The user interface language. The content language can change + # during the rendering of a page by lang macros + self.current_lang = self.cfg.language_default + + # caches unique ids + self.init_unique_ids() + + if hasattr(self, "_fmt_hd_counters"): + del self._fmt_hd_counters + + def loadTheme(self, theme_name): + """ Load the Theme to use for this request. + + @param theme_name: the name of the theme + @type theme_name: str + @rtype: int + @return: success code + 0 on success + 1 if user theme could not be loaded, + 2 if a hard fallback to modern theme was required. + """ + fallback = 0 + if theme_name == "": + theme_name = self.cfg.theme_default + + try: + Theme = wikiutil.importPlugin(self.cfg, 'theme', theme_name, 'Theme') + except wikiutil.PluginMissingError: + fallback = 1 + try: + Theme = wikiutil.importPlugin(self.cfg, 'theme', self.cfg.theme_default, 'Theme') + except wikiutil.PluginMissingError: + fallback = 2 + from MoinMoin.theme.modern import Theme + + self.theme = Theme(self) + return fallback + + def setContentLanguage(self, lang): + """ Set the content language, used for the content div + + Actions that generate content in the user language, like search, + should set the content direction to the user language before they + call send_title! + """ + self.content_lang = lang + self.current_lang = lang + + def getPragma(self, key, defval=None): + """ Query a pragma value (#pragma processing instruction) + + Keys are not case-sensitive. + """ + return self.pragma.get(key.lower(), defval) + + def setPragma(self, key, value): + """ Set a pragma value (#pragma processing instruction) + + Keys are not case-sensitive. + """ + self.pragma[key.lower()] = value + + def getPathinfo(self): + """ Return the remaining part of the URL. """ + return self.path_info + + def getScriptname(self): + """ Return the scriptname part of the URL ('/path/to/my.cgi'). """ + if self.script_name == '/': + return '' + return self.script_name + + def getKnownActions(self): + """ Create a dict of avaiable actions + + Return cached version if avaiable. + + @rtype: dict + @return: dict of all known actions + """ + try: + self.cfg.cache.known_actions # check + except AttributeError: + from MoinMoin import action + self.cfg.cache.known_actions = set(action.getNames(self.cfg)) + + # Return a copy, so clients will not change the set. + return self.cfg.cache.known_actions.copy() + + def getAvailableActions(self, page): + """ Get list of avaiable actions for this request + + The dict does not contain actions that starts with lower case. + Themes use this dict to display the actions to the user. + + @param page: current page, Page object + @rtype: dict + @return: dict of avaiable actions + """ + if self._available_actions is None: + # some actions might make sense for non-existing pages, so we just + # require read access here. Can be later refined to some action + # specific check: + if not self.user.may.read(page.page_name): + return [] + + # Filter non ui actions (starts with lower case letter) + actions = self.getKnownActions() + actions = [action for action in actions if not action[0].islower()] + + # Filter wiki excluded actions + actions = [action for action in actions if not action in self.cfg.actions_excluded] + + # Filter actions by page type, acl and user state + excluded = [] + if ((page.isUnderlayPage() and not page.isStandardPage()) or + not self.user.may.write(page.page_name) or + not self.user.may.delete(page.page_name)): + # Prevent modification of underlay only pages, or pages + # the user can't write and can't delete + excluded = [u'RenamePage', u'DeletePage', ] # AttachFile must NOT be here! + actions = [action for action in actions if not action in excluded] + + self._available_actions = set(actions) + + # Return a copy, so clients will not change the dict. + return self._available_actions.copy() + + def redirectedOutput(self, function, *args, **kw): + """ Redirect output during function, return redirected output """ + buf = StringIO.StringIO() + self.redirect(buf) + try: + function(*args, **kw) + finally: + self.redirect() + text = buf.getvalue() + buf.close() + return text + + def redirect(self, file=None): + """ Redirect output to file, or restore saved output """ + if file: + self.writestack.append(self.write) + self.write = file.write + else: + self.write = self.writestack.pop() + + def log(self, msg): + """ DEPRECATED - Log msg to logging framework + Please call logging.info(...) directly! + """ + msg = msg.strip() + # Encode unicode msg + if isinstance(msg, unicode): + msg = msg.encode(config.charset) + logging.info(msg) + + def timing_log(self, start, action): + """ Log to timing log (for performance analysis) """ + indicator = '' + if start: + total = "vvv" + else: + self.clock.stop('total') # make sure it is stopped + total_secs = self.clock.timings['total'] + # we add some stuff that is easy to grep when searching for peformance problems: + if total_secs > 50: + indicator += '!4!' + elif total_secs > 20: + indicator += '!3!' + elif total_secs > 10: + indicator += '!2!' + elif total_secs > 2: + indicator += '!1!' + total = self.clock.value('total') + # use + for existing pages, - for non-existing pages + if self.page is not None: + indicator += self.page.exists() and '+' or '-' + if self.isSpiderAgent: + indicator += "B" + + pid = os.getpid() + msg = 'Timing %5d %-6s %4s %-10s %s\n' % (pid, total, indicator, action, self.url) + logging.info(msg) + + def send_file(self, fileobj, bufsize=8192, do_flush=False): + """ Send a file to the output stream. + + @param fileobj: a file-like object (supporting read, close) + @param bufsize: size of chunks to read/write + @param do_flush: call flush after writing? + """ + while True: + buf = fileobj.read(bufsize) + if not buf: + break + self.write(buf) + if do_flush: + self.flush() + + def write(self, *data): + """ Write to output stream. """ + raise NotImplementedError + + def encode(self, data): + """ encode data (can be both unicode strings and strings), + preparing for a single write() + """ + wd = [] + for d in data: + try: + if isinstance(d, unicode): + # if we are REALLY sure, we can use "strict" + d = d.encode(config.charset, 'replace') + elif d is None: + continue + wd.append(d) + except UnicodeError: + logging.error("Unicode error on: %s" % repr(d)) + return ''.join(wd) + + def decodePagename(self, name): + """ Decode path, possibly using non ascii characters + + Does not change the name, only decode to Unicode. + + First split the path to pages, then decode each one. This enables + us to decode one page using config.charset and another using + utf-8. This situation happens when you try to add to a name of + an existing page. + + See http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 + + @param name: page name, string + @rtype: unicode + @return decoded page name + """ + # Split to pages and decode each one + pages = name.split('/') + decoded = [] + for page in pages: + # Recode from utf-8 into config charset. If the path + # contains user typed parts, they are encoded using 'utf-8'. + if config.charset != 'utf-8': + try: + page = unicode(page, 'utf-8', 'strict') + # Fit data into config.charset, replacing what won't + # fit. Better have few "?" in the name than crash. + page = page.encode(config.charset, 'replace') + except UnicodeError: + pass + + # Decode from config.charset, replacing what can't be decoded. + page = unicode(page, config.charset, 'replace') + decoded.append(page) + + # Assemble decoded parts + name = u'/'.join(decoded) + return name + + def normalizePagename(self, name): + """ Normalize page name + + Prevent creating page names with invisible characters or funny + whitespace that might confuse the users or abuse the wiki, or + just does not make sense. + + Restrict even more group pages, so they can be used inside acl lines. + + @param name: page name, unicode + @rtype: unicode + @return: decoded and sanitized page name + """ + # Strip invalid characters + name = config.page_invalid_chars_regex.sub(u'', name) + + # Split to pages and normalize each one + pages = name.split(u'/') + normalized = [] + for page in pages: + # Ignore empty or whitespace only pages + if not page or page.isspace(): + continue + + # Cleanup group pages. + # Strip non alpha numeric characters, keep white space + if wikiutil.isGroupPage(self, page): + page = u''.join([c for c in page + if c.isalnum() or c.isspace()]) + + # Normalize white space. Each name can contain multiple + # words separated with only one space. Split handle all + # 30 unicode spaces (isspace() == True) + page = u' '.join(page.split()) + + normalized.append(page) + + # Assemble components into full pagename + name = u'/'.join(normalized) + return name + + def read(self, n): + """ Read n bytes from input stream. """ + raise NotImplementedError + + def flush(self): + """ Flush output stream. """ + pass + + def check_spider(self): + """ check if the user agent for current request is a spider/bot """ + isSpider = False + ua = self.getUserAgent() + if ua and self.cfg.cache.ua_spiders: + isSpider = self.cfg.cache.ua_spiders.search(ua) is not None + return isSpider + + def isForbidden(self): + """ check for web spiders and refuse anything except viewing """ + forbidden = 0 + # we do not have a parsed query string here, so we can just do simple matching + qs = self.query_string + action = self.action + if ((qs != '' or self.request_method != 'GET') and + action != 'rss_rc' and + # allow spiders to get attachments and do 'show' + not (action == 'AttachFile' and 'do=get' in qs) and + action != 'show' and + action != 'sitemap' + ): + forbidden = self.isSpiderAgent + + if not forbidden and self.cfg.hosts_deny: + ip = self.remote_addr + for host in self.cfg.hosts_deny: + if host[-1] == '.' and ip.startswith(host): + forbidden = 1 + logging.debug("hosts_deny (net): %s" % str(forbidden)) + break + if ip == host: + forbidden = 1 + logging.debug("hosts_deny (ip): %s" % str(forbidden)) + break + return forbidden + + def setup_args(self): + """ Return args dict + First, we parse the query string (usually this is used in GET methods, + but TwikiDraw uses ?action=AttachFile&do=savedrawing plus posted stuff). + Second, we update what we got in first step by the stuff we get from + the form (or by a POST). We invoke _setup_args_from_cgi_form to handle + possible file uploads. + """ + args = cgi.parse_qs(self.query_string, keep_blank_values=1) + args = self.decodeArgs(args) + # if we have form data (in a POST), those override the stuff we already have: + if self.request_method == 'POST': + postargs = self._setup_args_from_cgi_form() + args.update(postargs) + return args + + def _setup_args_from_cgi_form(self, form=None): + """ Return args dict from a FieldStorage + + Create the args from a given form. Each key contain a list of values. + This method usually gets overridden in classes derived from this - it + is their task to call this method with an appropriate form parameter. + + @param form: a cgi.FieldStorage + @rtype: dict + @return: dict with form keys, each contains a list of values + """ + args = {} + for key in form: + values = form[key] + if not isinstance(values, list): + values = [values] + fixedResult = [] + for item in values: + if isinstance(item, cgi.FieldStorage) and item.filename: + fixedResult.append(item.file) # open data tempfile + # Save upload file name in a separate key + args[key + '__filename__'] = item.filename + else: + fixedResult.append(item.value) + args[key] = fixedResult + + return self.decodeArgs(args) + + def decodeArgs(self, args): + """ Decode args dict + + Decoding is done in a separate path because it is reused by + other methods and sub classes. + """ + decode = wikiutil.decodeUserInput + result = {} + for key in args: + if key + '__filename__' in args: + # Copy file data as is + result[key] = args[key] + elif key.endswith('__filename__'): + result[key] = decode(args[key], self.decode_charsets) + else: + result[key] = [decode(value, self.decode_charsets) for value in args[key]] + return result + + def getBaseURL(self): + """ Return a fully qualified URL to this script. """ + return self.getQualifiedURL(self.getScriptname()) + + def getQualifiedURL(self, uri=''): + """ Return an absolute URL starting with schema and host. + + Already qualified urls are returned unchanged. + + @param uri: server rooted uri e.g /scriptname/pagename. + It must start with a slash. Must be ascii and url encoded. + """ + import urlparse + scheme = urlparse.urlparse(uri)[0] + if scheme: + return uri + + scheme = ('http', 'https')[self.is_ssl] + result = "%s://%s%s" % (scheme, self.http_host, uri) + + # This might break qualified urls in redirects! + # e.g. mapping 'http://netloc' -> '/' + return wikiutil.mapURL(self, result) + + def getUserAgent(self): + """ Get the user agent. """ + return self.http_user_agent + + def makeForbidden(self, resultcode, msg): + statusmsg = { + 401: 'Authorization required', + 403: 'FORBIDDEN', + 404: 'Not found', + 503: 'Service unavailable', + } + headers = [ + 'Status: %d %s' % (resultcode, statusmsg[resultcode]), + 'Content-Type: text/plain; charset=utf-8' + ] + # when surge protection triggered, tell bots to come back later... + if resultcode == 503: + headers.append('Retry-After: %d' % self.cfg.surge_lockout_time) + self.emit_http_headers(headers) + self.write(msg) + self.forbidden = True + + def makeForbidden403(self): + self.makeForbidden(403, 'You are not allowed to access this!\r\n') + + def makeUnavailable503(self): + self.makeForbidden(503, "Warning:\r\n" + "You triggered the wiki's surge protection by doing too many requests in a short time.\r\n" + "Please make a short break reading the stuff you already got.\r\n" + "When you restart doing requests AFTER that, slow down or you might get locked out for a longer time!\r\n") + + def initTheme(self): + """ Set theme - forced theme, user theme or wiki default """ + ### HACK SAUVAGE 1/1 + if self.remote_addr == '138.231.136.67': + theme_name = 'crans-www' + elif self.cfg.theme_force: + ### FIN HACK 1/1 + theme_name = self.cfg.theme_default + else: + theme_name = self.user.theme_name + self.loadTheme(theme_name) + + def _try_redirect_spaces_page(self, pagename): + if '_' in pagename and not self.page.exists(): + pname = pagename.replace('_', ' ') + pg = Page(self, pname) + if pg.exists(): + url = pg.url(self) + self.http_redirect(url) + return True + return False + + def run(self): + # Exit now if __init__ failed or request is forbidden + if self.failed or self.forbidden or self._auth_redirected: + # Don't sleep() here, it binds too much of our resources! + return self.finish() + + _ = self.getText + self.clock.start('run') + + self.initTheme() + + action_name = self.action + if self.cfg.log_timing: + self.timing_log(True, action_name) + + if action_name == 'xmlrpc': + from MoinMoin import xmlrpc + if self.query_string == 'action=xmlrpc': + xmlrpc.xmlrpc(self) + elif self.query_string == 'action=xmlrpc2': + xmlrpc.xmlrpc2(self) + if self.cfg.log_timing: + self.timing_log(False, action_name) + return self.finish() + + # parse request data + try: + # The last component in path_info is the page name, if any + path = self.getPathinfo() + + # we can have all action URLs like this: /action/ActionName/PageName?action=ActionName&... + # this is just for robots.txt being able to forbid them for crawlers + prefix = self.cfg.url_prefix_action + if prefix is not None: + prefix = '/%s/' % prefix # e.g. '/action/' + if path.startswith(prefix): + # remove prefix and action name + path = path[len(prefix):] + action, path = (path.split('/', 1) + ['', ''])[:2] + path = '/' + path + + if path.startswith('/'): + pagename = self.normalizePagename(path) + else: + pagename = None + + # need to inform caches that content changes based on: + # * cookie (even if we aren't sending one now) + # * User-Agent (because a bot might be denied and get no content) + # * Accept-Language (except if moin is told to ignore browser language) + if self.cfg.language_ignore_browser: + self.setHttpHeader("Vary: Cookie,User-Agent") + else: + self.setHttpHeader("Vary: Cookie,User-Agent,Accept-Language") + + # Handle request. We have these options: + # 1. jump to page where user left off + if not pagename and self.user.remember_last_visit and action_name == 'show': + pagetrail = self.user.getTrail() + if pagetrail: + # Redirect to last page visited + last_visited = pagetrail[-1] + wikiname, pagename = wikiutil.split_interwiki(last_visited) + if wikiname != 'Self': + wikitag, wikiurl, wikitail, error = wikiutil.resolve_interwiki(self, wikiname, pagename) + url = wikiurl + wikiutil.quoteWikinameURL(wikitail) + else: + url = Page(self, pagename).url(self) + else: + # Or to localized FrontPage + url = wikiutil.getFrontPage(self).url(self) + self.http_redirect(url) + return self.finish() + + # 2. handle action + else: + # pagename could be empty after normalization e.g. '///' -> '' + # Use localized FrontPage if pagename is empty + if not pagename: + self.page = wikiutil.getFrontPage(self) + else: + self.page = Page(self, pagename) + if self._try_redirect_spaces_page(pagename): + return self.finish() + + msg = None + # Complain about unknown actions + if not action_name in self.getKnownActions(): + msg = _("Unknown action %(action_name)s.") % { + 'action_name': wikiutil.escape(action_name), } + + # Disallow non available actions + elif action_name[0].isupper() and not action_name in self.getAvailableActions(self.page): + msg = _("You are not allowed to do %(action_name)s on this page.") % { + 'action_name': wikiutil.escape(action_name), } + if not self.user.valid: + # Suggest non valid user to login + msg += " " + _("Login and try again.") + + if msg: + self.theme.add_msg(msg, "error") + self.page.send_page() + # Try action + else: + from MoinMoin import action + handler = action.getHandler(self, action_name) + if handler is None: + msg = _("You are not allowed to do %(action_name)s on this page.") % { + 'action_name': wikiutil.escape(action_name), } + if not self.user.valid: + # Suggest non valid user to login + msg += " " + _("Login and try again.") + self.theme.add_msg(msg, "error") + self.page.send_page() + else: + handler(self.page.page_name, self) + + # every action that didn't use to raise MoinMoinFinish must call this now: + # self.theme.send_closing_html() + + except MoinMoinFinish: + pass + except RemoteClosedConnection: + # at least clean up + pass + except SystemExit: + raise # fcgi uses this to terminate a thread + except Exception, err: + try: + # nothing we can do about further failures! + self.fail(err) + except: + pass + + if self.cfg.log_timing: + self.timing_log(False, action_name) + + return self.finish() + + def http_redirect(self, url): + """ Redirect to a fully qualified, or server-rooted URL + + @param url: relative or absolute url, ascii using url encoding. + """ + url = self.getQualifiedURL(url) + self.emit_http_headers(["Status: 302 Found", "Location: %s" % url]) + + def emit_http_headers(self, more_headers=[], testing=False): + """ emit http headers after some preprocessing / checking + + Makes sure we only emit headers once. + Encodes to ASCII if it gets unicode headers. + Make sure we have exactly one Content-Type and one Status header. + Make sure Status header string begins with a integer number. + + For emitting (testing == False), it calls the server specific + _emit_http_headers method. For testing, it returns the result. + + @param more_headers: list of additional header strings + @param testing: set to True by test code + """ + user_headers = self.user_headers + self.user_headers = [] + tracehere = ''.join(traceback.format_stack()[:-1]) + all_headers = [(hdr, tracehere) for hdr in more_headers] + user_headers + + if self.sent_headers: + # Send headers only once + logging.error("Attempt to send headers twice!") + logging.error("First attempt:\n%s" % self.sent_headers) + logging.error("Second attempt:\n%s" % tracehere) + raise HeadersAlreadySentException("emit_http_headers has already been called before!") + else: + self.sent_headers = tracehere + + # assemble dict of http headers + headers = {} + traces = {} + for header, trace in all_headers: + if isinstance(header, unicode): + header = header.encode('ascii') + key, value = header.split(':', 1) + lkey = key.lower() + value = value.lstrip() + if lkey in headers: + if lkey in ['vary', 'cache-control', 'content-language', ]: + # these headers (list might be incomplete) allow multiple values + # that can be merged into a comma separated list + headers[lkey] = headers[lkey][0], '%s, %s' % (headers[lkey][1], value) + traces[lkey] = trace + else: + logging.warning("Duplicate http header: %r (ignored)" % header) + logging.warning("Header added first at:\n%s" % traces[lkey]) + logging.warning("Header added again at:\n%s" % trace) + else: + headers[lkey] = (key, value) + traces[lkey] = trace + + if 'content-type' not in headers: + headers['content-type'] = ('Content-type', 'text/html; charset=%s' % config.charset) + + if 'status' not in headers: + headers['status'] = ('Status', '200 OK') + else: + # check if we got a valid status + try: + status = headers['status'][1] + int(status.split(' ', 1)[0]) + except: + logging.error("emit_http_headers called with invalid header Status: %r" % status) + headers['status'] = ('Status', '500 Server Error - invalid status header') + + header_format = '%s: %s' + st_header = header_format % headers['status'] + del headers['status'] + ct_header = header_format % headers['content-type'] + del headers['content-type'] + + headers = [header_format % kv_tuple for kv_tuple in headers.values()] # make a list of strings + headers = [st_header, ct_header] + headers # do NOT change order! + if not testing: + self._emit_http_headers(headers) + else: + return headers + + def _emit_http_headers(self, headers): + """ server specific method to emit http headers. + + @param headers: a list of http header strings in this FIXED order: + 1. status header (always present and valid, e.g. "200 OK") + 2. content type header (always present) + 3. other headers (optional) + """ + raise NotImplementedError + + def setHttpHeader(self, header): + """ Save header for later send. + + Attention: although we use a list here, some implementations use a dict, + thus multiple calls with the same header type do NOT work in the end! + """ + # save a traceback with the header for duplicate bug reporting + self.user_headers.append((header, ''.join(traceback.format_stack()[:-1]))) + + def fail(self, err): + """ Fail when we can't continue + + Send 500 status code with the error name. Reference: + http://www.w3.org/Protocols/rfc2616/rfc2616-sec6.html#sec6.1.1 + + Log the error, then let failure module handle it. + + @param err: Exception instance or subclass. + """ + self.failed = 1 # save state for self.run() + # we should not generate the headers two times + if not self.sent_headers: + self.emit_http_headers(['Status: 500 MoinMoin Internal Error']) + from MoinMoin import failure + failure.handle(self, err) + + def make_unique_id(self, base, namespace=None): + """ + Generates a unique ID using a given base name. Appends a running count to the base. + + Needs to stay deterministic! + + @param base: the base of the id + @type base: unicode + @param namespace: the namespace for the ID, used when including pages + + @returns: a unique (relatively to the namespace) ID + @rtype: unicode + """ + if not isinstance(base, unicode): + base = unicode(str(base), 'ascii', 'ignore') + if not namespace in self._page_ids: + self._page_ids[namespace] = {} + count = self._page_ids[namespace].get(base, -1) + 1 + self._page_ids[namespace][base] = count + if not count: + return base + return u'%s-%d' % (base, count) + + def init_unique_ids(self): + '''Initialise everything needed for unique IDs''' + self._unique_id_stack = [] + self._page_ids = {None: {}} + self.include_id = None + self._include_stack = [] + + def push_unique_ids(self): + ''' + Used by the TOC macro, this ensures that the ID namespaces + are reset to the status when the current include started. + This guarantees that doing the ID enumeration twice results + in the same results, on any level. + ''' + self._unique_id_stack.append((self._page_ids, self.include_id)) + self.include_id, pids = self._include_stack[-1] + # make a copy of the containing ID namespaces, that is to say + # go back to the level we had at the previous include + self._page_ids = {} + for namespace in pids: + self._page_ids[namespace] = pids[namespace].copy() + + def pop_unique_ids(self): + ''' + Used by the TOC macro to reset the ID namespaces after + having parsed the page for TOC generation and after + printing the TOC. + ''' + self._page_ids, self.include_id = self._unique_id_stack.pop() + + def begin_include(self, base): + ''' + Called by the formatter when a document begins, which means + that include causing nested documents gives us an include + stack in self._include_id_stack. + ''' + pids = {} + for namespace in self._page_ids: + pids[namespace] = self._page_ids[namespace].copy() + self._include_stack.append((self.include_id, pids)) + self.include_id = self.make_unique_id(base) + # if it's the page name then set it to None so we don't + # prepend anything to IDs, but otherwise keep it. + if self.page and self.page.page_name == self.include_id: + self.include_id = None + + def end_include(self): + ''' + Called by the formatter when a document ends, restores + the current include ID to the previous one and discards + the page IDs state we kept around for push_unique_ids(). + ''' + self.include_id, pids = self._include_stack.pop() + + def httpDate(self, when=None, rfc='1123'): + """ Returns http date string, according to rfc2068 + + See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-3.3 + + A http 1.1 server should use only rfc1123 date, but cookie's + "expires" field should use the older obsolete rfc850 date. + + Note: we can not use strftime() because that honors the locale + and rfc2822 requires english day and month names. + + We can not use email.Utils.formatdate because it formats the + zone as '-0000' instead of 'GMT', and creates only rfc1123 + dates. This is a modified version of email.Utils.formatdate + from Python 2.4. + + @param when: seconds from epoch, as returned by time.time() + @param rfc: conform to rfc ('1123' or '850') + @rtype: string + @return: http date conforming to rfc1123 or rfc850 + """ + if when is None: + when = time.time() + now = time.gmtime(when) + month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', + 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now.tm_mon - 1] + if rfc == '1123': + day = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now.tm_wday] + date = '%02d %s %04d' % (now.tm_mday, month, now.tm_year) + elif rfc == '850': + day = ["Monday", "Tuesday", "Wednesday", "Thursday", + "Friday", "Saturday", "Sunday"][now.tm_wday] + date = '%02d-%s-%s' % (now.tm_mday, month, str(now.tm_year)[-2:]) + else: + raise ValueError("Invalid rfc value: %s" % rfc) + + return '%s, %s %02d:%02d:%02d GMT' % (day, date, now.tm_hour, + now.tm_min, now.tm_sec) + + def disableHttpCaching(self, level=1): + """ Prevent caching of pages that should not be cached. + + level == 1 means disabling caching when we have a cookie set + level == 2 means completely disabling caching (used by Page*Editor) + + This is important to prevent caches break acl by providing one + user pages meant to be seen only by another user, when both users + share the same caching proxy. + + AVOID using no-cache and no-store for attachments as it is completely broken on IE! + + Details: http://support.microsoft.com/support/kb/articles/Q234/0/67.ASP + """ + if level <= self.http_caching_disabled: + return # only make caching stricter + + if level == 1: + # Set Cache control header for http 1.1 caches + # See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2109.html#sec-4.2.3 + # and http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-14.9 + #self.setHttpHeader('Cache-Control: no-cache="set-cookie", private, max-age=0') + self.setHttpHeader('Cache-Control: private, must-revalidate, max-age=10') + elif level == 2: + self.setHttpHeader('Cache-Control: no-cache') + + # only do this once to avoid 'duplicate header' warnings + # (in case the caching disabling is being made stricter) + if not self.http_caching_disabled: + # Set Expires for http 1.0 caches (does not support Cache-Control) + when = time.time() - (3600 * 24 * 365) + self.setHttpHeader('Expires: %s' % self.httpDate(when=when)) + + # Set Pragma for http 1.0 caches + # See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-14.32 + # DISABLED for level == 1 to fix IE https file attachment downloading trouble. + if level == 2: + self.setHttpHeader('Pragma: no-cache') + + self.http_caching_disabled = level + + def finish(self): + """ General cleanup on end of request + + Delete circular references - all object that we create using self.name = class(self). + This helps Python to collect these objects and keep our memory footprint lower. + """ + for method in self._finishers: + method(self) + # only execute finishers once + self._finishers = [] + + for attr_name in [ + 'editlog', # avoid leaking file handles for open edit-log + 'theme', + 'dicts', + 'user', + 'rootpage', + 'page', + 'html_formatter', + 'formatter', + #'cfg', -- do NOT delattr cfg - it causes problems in the xapian indexing thread + ]: + try: + delattr(self, attr_name) + except: + pass + + def add_finisher(self, method): + self._finishers.append(method) + + # Debug ------------------------------------------------------------ + + def debugEnvironment(self, env): + """ Environment debugging aid """ + # Keep this one name per line so its easy to comment stuff + names = [ +# 'http_accept_language', +# 'http_host', +# 'http_referer', +# 'http_user_agent', +# 'is_ssl', + 'path_info', + 'query_string', +# 'remote_addr', + 'request_method', +# 'request_uri', +# 'saved_cookie', + 'script_name', +# 'server_name', +# 'server_port', + ] + names.sort() + attributes = [] + for name in names: + attributes.append(' %s = %r\n' % (name, getattr(self, name, None))) + attributes = ''.join(attributes) + + environment = [] + names = env.keys() + names.sort() + for key in names: + environment.append(' %s = %r\n' % (key, env[key])) + environment = ''.join(environment) + + data = '\nRequest Attributes\n%s\nEnvironment\n%s' % (attributes, environment) + f = open('/tmp/env.log', 'a') + try: + f.write(data) + finally: + f.close() + diff --git a/wiki-lenny/static/crans.png b/wiki-lenny/static/crans.png index 62d1d761f43ececd6c611c61176fbe1bce33cb2f..039f806e70231b07599bd0a07cdd36829df84338 100644 GIT binary patch literal 19148 zcmV*mKuN!eP)Px#24YJ`L;(K){{a7>y{D4^000SaNLh0L0Jw_)0Jw_*p0XWK00007bV*G`2iOb= z4+$k3+rC-=03ZNKL_t(|+U2&fQ9|9kf{bq1zK7#;<{+bw*v^gg^V+-E6k4SJts{K~js;L6TM{Cm;Z&Rgz{sAabc#RsvrOR; z0_n?bjSPm9bbk;tv0oM_5#eh@`k9e^9!B;xRbUE1^#wi5@D}unYfOwVj&p#drOr9v za3r#T5rN{mapFk;?CkIC?BBbt9!Xz!luIAFlOd3H+@S(dmv0BOXRr}KG<&~E@B~}{ z*#IoX0(N={I9}i(AMj8Js1Qgyj%WZ@5imvd2X!4-1t2EHL6AViq(#jI3$(3ZDmJ%8 z3Q&cB3W2m;-MTJBa4cn-<@rei)(Ajy$ptO|DIO#LT) zgOv=}Z6$!A;G6&;UO8k1fbmdM@lb#n3h;(tl8%tI>!qW`!e_eJ>Ag*G0K~IaDK4F{ z>={KqgJ(dWdkSQUlKsbj3ZMo65fO+8obyp5X8-s+!NoMBlS#iLgh}EMNIS^AqnB#2 zMD+(52U`Ve6w`o7q{B^VK^IGb699}n^vc_bXpH(V*tek0Y00@QLRfJT0%=DA>Xet! zu5#;{9J~=77EsOOOt^37!gK)iwbeal46T&2M7$fb#qa9r(Lb;=@ zi~2UCMQdb#Z3*#^sZ+$omUh!IDTx26ZujPK-UU{t$ zNa4}F*d2~mM)^Q$$p@0S)4PX*!V)2wWC-li4cYI^QX;yLbFQ{U+-aztclYba&uhsE zVp{+ZBqAgPP6(v!dpI0zG6lXRl>?F(z?;Hiv;xK>!5(O`>Nw7ML}iUQ=VYhb?m|8l z&m9YSnoO0JzF0b8_0Ot45j&1;1~3XcF5$w8Z8s-6Sr2wWaTMRw}^GPgf|c5dgk=&Fi=PG7BI%G3*i!?t5`9 zlbjF^LF99U3E6Ewa*Mvh#&Q=#oC6leCJ7{t1Sl1lij6^N*sM$U2CxU^2S}WV?D_@& zI5KN_Vkk@syRnkoT&T)rg+~aa9doR2!wT{HwhWe{uz63+)2KisTPWYdFr)uafSb~- zn=WjxU)m|SaZwc3^{J{Dpo!bUm?#9&_P0vYp2l+VhfQfLQCK3|FBb8bwQ<&vFX6pE-`*Q2W*$ze9cvpUMJ}1RPsqLA~O*@CYqZuf<2*$lzG-{%-yj ze1`nmHMzCiR#>W_lsFBab;)QVnj-!^=iJ`B=wJIfRwtoQaK#0Ys`SSe-j^l3KU=bv zf$l>Hr101_WO1wox~x)NJK8*ABl%QW0o_9F0s@QVm^$8dkezDzVO!nePUu@BnB>NZ z;K^u!+^OCtEyr^r-iu+a={QR6g)uWcwKM!?8|>eL~$2HR%i1O^$VV=N4^N9tlSC| zR#mv5Nl9NbiWsR_myUMu^ZcK(NCf|qlb-%=Tdqsn_}+30Vr2`K0YF0v&%!$qYboLo z(S_1hI55V}bV&wq=ZST1{^iZ=dy-u`xjMm-03@Cwh%ySdE=X>x*yr=5DLrC47(9Lp zx0nU)_YjJ??zRA51QVb6Ppl&X&VRFOMv@13)Y_XXI7&#>uRZp2031_0ul~jhWU85zo@Ng^}HEU45XX2#1(F4MiCpir>$Y@yxg&3 zMt%A{OCADA1DIhmF03XqIv5;%Kqvv{28h5Nhjqoy#5$X5U$6V<(MAt;NM2mz;bH&) zh8=X#U}BLG00t6KKhAjt5gg8WigVtzn7Cs(R_90S-n@Nj05JK*3Vg`l@b~P!btH=e z1&~D7&X!!dyzA7=yl5zZ{s3U5S`Keq{(1+x6Rau;F9I-HDl(5Ye|?s@4_d z$chz$SaygIfJm~>!2sS>yteFKI2u4<;M9|&(bE1kWJE6}qVsas>AD6dH}6vT_OAl+ zYUQ{Fz+QlscYl>?s<=>O*Uu||L`Ecj7U!I}ojHEp+}l1t;RqNxCX%bk9l@=W<0R1< z$+dq4N1y#>v(Xn{dp$FZOS0@Cby-YfY}wce-v`hS-U9tPX=Y#-Y+pBf8$>lTE~qwR zr7v*KClQe$sM&BxHDThvIp=V!lv`K{MU3ER7Dn!GhR?qI=R|ZR5k+}cqBq+i>e<@O zIq%8WrbQ9a<$;=(;7jg3Tx`p5<{noiwZ36zK_23YhvswR|mptW2$}h zt`|IjWGhIRU0Q(B+!hcGKln#Oh?xtCs3Pd@uU*Jpu3ZA%N~Nl50CzE4H8Uzl`UNxN=X*WW9)Q>c zP^O3{GQ!t_Yeg#hu(Ca-(2(QL7!V&g^qXMpP%_K|h0g(r@Nfwi%T9H?XE!&!xcSW& zp5FY*v-O=D*2;@mrEE$Pm`Fo&%dwaR?>v^W4{GY1m&TgO(og;9|Lo_ZOl$MsKmNh7 zbFyx-DwpLYKq-#9*`;D7vnL=LameLI0Q9UP(A=iOu~Mbk#Vk_8E`Uq`tY+p9Z(wHp z_-qW;i`VXq3fGof3sUl;op9Z~dnHGVy^IWVDiM`t7mf&&yn7>&ESz(0n(^fRMl7-a z;BTDz^Fhacw*!u|)oyLN@58f>`~$oj{K&@|i8Ghv{E-)!@_@YWqi_8hL|6KG8}262 z_|e;^EdA{zA7ou*aiRsJ`)YWQ5MO%wyDJ|w>yk+TJ=5C@xhqbwN&u8g4?b7IGW39R zE;ggd<3+l)*Q}wHiVD|Ofrt(N{-wKB&piBeBUw5fL{%;*q|`0Z<#-SpGXHDFRzFX(h1WEWK~m@Oa|rak)Rb0GSf51B#$kgBWw z&GJ8A^LXp0pIBKC$tte0RZJ_al!`q0($U`p07IvrpI}Dx!|W>P(LyfBBMI&fckEKZ zy>b_F1b|sF;(&2xqVn`&tf2h2j-6=&=N(yU+yIe5L42f)zD^9-f zfGVm2dgP&3ehOe*F`ywLIF8-!SgG1ny?_cXBKBc}wm5N4741Z7J09Q--w5=B{12&y0#O7hij|5sO#%wi{c-J>&pr z%iI@^S^mdgEI?u91NqSJy3X8!YV&;`dGqUF?4PRG(|wkl^Y(P(mfJo)`=~!5U*an& zoDt!VniJ)7g zupIeq(1gOWnT)%m^`Cg`F~u&tj;(9&s-OSFvsn@$p1o!S`BGGJuTIVy?Rm>%=O+da zn}qIUDD?yYvgHi`0HqIDJ>#%vN=A;q{*y~iyhj2Fpso_IJ%Cie&`o>v%_l+Z7~N+j zB6QZ(p11JI@BhWr2Rj1J!Sw443ban1@^sihk~e&T1T#*j&{&3QuqfR;|AYlLi$h0)QncB4B&8Cm%y}Rz=+gBpLJ5@L6col#T z06Yo+gT8UC%2Iu$qj$f2$p4+au=deAKb2x0Cgixd#>LAk zZJIjgzXuqJ`E3TtF~P@|$XN(1N|e(%f$0QPzC)su}_{63L#&RWLxR%`RG zKRo@ghlC9;3cc*HYD|nuV8zQCi55vm$ManpNW*7eb_Qd{T|wNNw))S0U;oLIZ)O2h zB3Puj!kvQxM53ZiI^JhQ>7cz&yw(rEoIB~&FJFJW0G_x6VAW2UizRly_Ku^oA2Cl3TvLCi3ML%kJ0Q{_XPJryQyK+$~E#d0_F? zXa7|KPYU^(3(6j&X^+f(keG423)Ji;*paH=aOC_8zV$u|XWptnB+DO1r2LSa)RqZ` z4uNP$!N1C4mvrq&U}7%`@GTpK?RJy!opP7n4y&xFx zY*_n{!V<|=kW?0M+*LOnS~h;qNjg@}ts-Cd=$+TBe`?Mra2zZ9np~07F;J+PplY;J z&4$f$YnJ@{+@}T}b<)D&r(JY~{N6a{ymZ{8?*MpQ6KjfugE%0J_I>Q_g#gC5AaWe% zv-GA7C(OU#xMh+B)qzOV5jPd=oOp_{k=)uYT1!7SZ`aQw5m8Nm7=<=1e(uif0!~Vc zSsrL5=MXX|l!eLwNX5$fj}7u6Hh%QTzcrR9CM~i&7Hh}}XO64d`+$8lBns!8Q+jjV zqQyTu`^NN^`nK%rxbBPW(lHP4sG9A_kHy>g?87Ufefr%o@Til{QN&9ab-~XMUvcX- zFF`GUpBEPmJ^8${LC1dQ9dtD~!m+IyU$}JI=0!S%% zx3mYEAY08vLxYu2F&CQQm@~NR+sh$uXaz>$m9gQ}5X4!B_t*=XH!^Nj17#6~x#i zucD!1@>gmaKc08c(%)RX0EJ@yOtQ2xh)hMiM^b*s*0}0<9)@D`^sPL{F5cMJ)m;iY z*f-HcgwDn_%?fKEX18T4lu~523p+N6e1~p1j>UcGSMlL?0!U4~{;wyoM6y!GkPEE( z7oWa<<(J{00Ybe|$reGLN4S+d7sa`@P(nq(`rtz7!# z$~%AeI=V}TOku(0O|^>;swo-squ-twA6PTqh{a1qVnljjZYE1(AK|1r z8`{^cd_S|Leq+_X2c7D^7XmQgu;b1rGjiLiJFa_60Ikuwbiw@Ou+dW$8=l{E2~{Fi z1`;V;Je?Ym*eR@PfYj0PG8JzJutwWbOwbeHP)|W+db|)3(Llcs!yUU-;Zhk2x5{12 z?E;Y66+p`ER20A>ac~CpKmZy6^!195z#^3jR!6S-m1Cn04vCg-BHgr1`4~s zW5nXj=UJ&p0~J|dC>D=Si74+qz`s311t16Bowz)J#h^$@Uj_i&aY(}*lTV&gcP%fS zJux-Gdu)+7=PQY*uUCu&fRPc6aXW27Kg24dAQ;>d5wVP3+_?avKoqg$hRN5q0A%{? zKfBs2s~D=t*^pypnm_&31^*BN_W}vv-G27RZ&vKN@8PPZLf4}CS#K3seCiiTVD(J- z^|Q6EsuCr!OiAA0axPo{DJP7JR}49s8IhR)DmY+*bAyN+&K(P!H(H&|@3z%1eZFzU z8!J#)U9Ow1VJSew;WO5A#37fT2+$D#rgF{`M3k3*6r|mHvOO(d{qpr+^#PDhv(Ohz zUaTE)$9^Bo*hlxQ-9->B?bj#ORGZc$KP@R&B(rtyX-OnQK$Fc?xgAGJ3}*U&?OXe( zTmjBG-}K6}7fV!d-eMen`lb7oPnvePjxHx6N^hxORQvb)9;8T=Kmx4|;v89xJIu=(>hD*K*uR5Y9q)8Bta^CU!hg<#dYH7TVNjK40svI+ab%=q;DnzL z(QyFw5Su)SsNi02OFNk3*v;s!NKwhXspjN5LBV8t`I?TE`j-(+p6mA}lhtEJrkZM( zL9I6}tJjF^oJcUpZe6g>NUm*9&SR0vcAI>-#wujiJ$lz-O)}Cb+ii07fWuEX!>^^n z$)sEBUwmqX#^)vg7M^^1KQpR-$_Binfi5f3oG%=jZoyVUL)7*@eW6n+1z51EzOL zhOSbt7}Qc|x&hRp#qtD`*E9-R`^KOC8{|Ef)l5CcFDVG`DrHqr>J)LCR0^ZHv;SAW zIgFW+gz7)GuU>YC)zrXsKpOYcKm4@F2dkD(jkVFRXo`>4L-5BVn<(HVk9L{Anf5fPXX8Cf=D@*_1fF1*=@mdG=i3=Q+g08sTF zN0y8@_?b1vq!+#0p0)7frt z%Ss0VBrkaAZhE0uAhVho7v7Yt9(7f3E?DY1xEU)syk^D)@5QTz^+91NQAFC5Sl`{g zR#7@=VjX}%6fFMjO#+9ZyDP!136*=QRK0Ru)%wAKMBwiSc=)`0*jJA5SkOFx8CChH zL1QFnC|eGI$NCK#DZd`)oO3I)Q36q(HR^Zp(I+B!n>k5~NbcK-eR$>(KdXD`AAj5W z;amS{S^CNAj^l7mZj6K-vkOd|$K6lE*^gRkM<|iOay)4nZ|~CbT`4tX9R#b{;C_kHQ>-= zPpRB%|HI10?>SQoP@V@Q_lzhEqjm_|&;x&Pg%M4T-JV{P`~Hbovfrd*pA;5oLIPB* zX2u0SV$2weou$#xWs%C!Re@6`0V;v~Lg%90J6|Bx&wqR!ckB=R+#4c-nW!2vXzD44 zYc8hCS5T0J(bL-pfU`)nO!a(YD#@Xxp4_-nx46 z&(3~AcIQM8u~{nRs@k{in1jLrSYAzUDxK?|TBF=ieb5oZR11o8j#XX7p>oBu0l3h-e-N<~bhn?lI=%N-JkCarGBnRyA zog(DJxZAv_d8UEgFe4J*y?ofT$<1rt`&3|LQ-h|Rb_@|E1HBP3`lf%Q9$Cg^X`6r#KAv)2Zp(CkeeD!Rt+3B>qnO~t$O>Jrd4l!ph*zA zr(Cuo7gKH%m`Zn>JXA$BbIvWRrHMn4v@97rq0)#(5ZSPuizXV(7^7SpUJ50}eax6e7}j1)}Dj21s0HN@>89`4ZCR9QZ>YCZttswU2EZfW+yFS^oOi`8UEmPoWX(b+?%3(hY>XsUyXcbm zCc0&xnY>vQYmxHEg64G}UQjW7-}n4(YOXNTFr!tYXB=NXbnls+TUWo;xazH!Gi_Vi zg~b+uT*;8%C>Cf#<=~!XH`Z@pKjr_+WE!AR`x)@nZ%&rMmu}d!wC;r`9|z~o`X}#S z0(D2JxIxHNyglaPYY(ZKHuLMm{Q9hT;k1b^Raog>GtnL9$?7o!%y`A_0Tw<7K#rAK z*}8tgeGQ-gcM%GO9wS+TV06W>eGZP6_S-GMv*!Se%2Cq~Xk0P(gY0f^GG_Mo12pH{ zZrix{XPXy4`=K0Pp}QDbuI~3D0pvW8lXc-lT|F5sLJ*8ly!CFrim_G*ZeL1eLL z%&wez;8&ryKzW3i^tlWoGSqnJ{_JHnd++DxiseSHwA4Sh{+)a8$}XN#1&~YtX6yR- z^{wmYKVCWVz=>6(XMEFf95BYbo&zr(IPuuV6?2!mufahjenRN28y5az^WuNahgwE0 z>I0%6#y~a8*AW1uDx(U-*cD}w066RC-Sgn^S(hG1Mr5xdeCO^K0y0X?M8z}{nAUqS zq)#x&%!um0Iksg(qdRAoR#fRm03pBPS{3g>+UYU?(oQCw;Z~-VnUTH$V}ShPNm8v| z$^%loa@foO&xA5<4R5S}=id8~FHR#iM;0us#+7d_%XBt9+<(GRCwWz65D}S)it!+$ z9Ng|&{6tY7i-gX`wOfV2KLNd{r>d>q@2l&USt#tcGJFUyc)N{y)=uRE=5j!*U%lx> zZfBNkTLEFOQ{RS;738*MDc@_?4zKLRA;$b-N2VZ9Xj47MY{`(ynvj$3YzyE3GSr)-dEY+iKkYMJa8WrmPM&RByLzDK;-TMS&UNEd61R>Cr@_g z%8g9|74vcHbitYhAhYhxzg*q9bR_2=z>}*|&!iotQL?p#` z8DZgd0_d>;Tc~)GPSwx8M~uxzMW3h6%iwW zn4w4xcM}rCSk&)k+3D7;@Se^=ipARlW8}i(Spo+)wG$@YahRR zxr(W=TAS;PlCu8p=3pdCs;j2W9ALG#*)5AcXi`{fpfF=DKJ4`HGk-iOQc<;6wvxau zYR6!VG&dppxdYJZj_p8Q<;*l2f~@T1xFnnZ8_rc-m<^x5vbb!>l!M5KmUyiRkyejY z4jngi|Fgz9wrz86+W@!WI5xo3cBZo_-MoHL3taPMv3ss%kf!g9t zABqAJf@E*aUnVZmpu8x~w$I%wi2fCJWaPL*mIV+$ zgS%M43^P+Cqskgr&3mP4^vt7zKHD6)48s5nLv#UIB+(~Y(r0AppovG>R=UaR*!ofH z`cGbJUi-mkQhZ}W-_9fV#i>XLu~?wTnhhN4ZdZol zz|z5cPK=ce7|tSz5@JS_m|^5znsais8T@)wxZ@D#+_uv#TkGdP`CqHE$&wa>iX?GT zo9k9aD*KL-T?vZDt992}b;otjOuFfzSt?!qM9rxElOx7HVRy7U;GCtm)OB#%mLE>O zI3h7SyEx+k>)^N))2S?yZUnZtd)X{aYgZzWJaq1z z84yIW8)HE~#w0I*Pc_wUOf}WIGh+?Om5^Uo*Vw14B2Z02kR_j-t?$0}R>|ny4=ErJ z7>kU)=$hHfZ~6IL`37{XTiMpO>a({>M~*+B;8`+eOuGJoKZsWLeKO8pf)E_#J-(S>kf7S ziOh6B5Q$YFe8c+>{HJE-MT5;~qHKG3D9ntH?lX4Q&E>=P`C#2!cU;NsOa}U4)H)45 z8|q#;-qSpij%<*nY12|*-7W=w^23k?PUOPRm~@J>N+Sw}P6oJqlkD|R%~=G_l`%=* z0}efv3{y#OuD<8Sr6Mrw%{plBc3&1CkObyrjcq z;Yu%(p(xD;9ThGU4x%g2E&!-iq8&vyA&o-aXRl#hs!4l*S}Io%;qNxpZ7Y{Pr33_# zSv~8BVT#X7txG@Iy6M$_-L2ro{Z^WT@qj7(&vSw)6XQAFo0ffWLY zJGNa^jKn!9*iEj;%M`GV;>Jj11OO6o6c!{CxXstiz3qY4jf>v_0LO899ZXqoqjb=; z)1ECGGG!R_RS+`3iq+i@zcxrNMp$ik1U5(O#q_%5ha*=0wQU3LN_<5Gh4%P}#V zU;FoqS+=Y|J!km$F1bi^(`dz?uX!WW)Ucw6MXxF!GP+19mMFr4`>KI6YO zCtBLS66)&lUYo4WiU1LyR@hV}$JzuiwMhKEXrV-r$h2gwGMZCll2l@os%hD&y5}DM z+)j0_mz&r)=iG=V`iwmHCkN^*%7Tf( z_0md^A5I$iWgk%mX&}ZvOhHY1$!>{2>yTKdG=TKl2mbiOk!M};xcC~bU)^u^(cfQn z_x0~`E2DJHJ~-{rOZIu-a69R9sa>R2Q?P!XhJ z%56_y`O(=&{$6oi0>C~z^N33(-SFVW@qt74E?_npGyCj!@F}$aY~Mt3kSu?YMPj$N zw!0e+FK!1Vjq7Y_7_Pivya*{R6@4s|Rk`kH$4X^3EqeA<;Mut>l1LcQ(j+q@QDUY+ z%wUny{*{TU;WhE9n%#(*(SWy!0HA!>)C(F{&b=hN9#k`T0ksNiATSw4-h4m*o-3F> zowl87*Au6^r4|0Vr|&MgbsfUrYjV-?L^JUmc-DJQ{3NWU5G$*ZZB#Opkdz2?=^k&_67h{K5W|FMkHSD^?$66 z#)j4n3s)-^wOonOp-ITv0C?qy1HMYk@T^{UtaNkpn)g0He)*9KNFx253;4NXSx%-s zWv5y?tj?wmtD~_k)l}QiwsFzQrd4mh)7ezJ)QBW(Gf_1-$bBVZvBs5e{YT-(>8zzH ztkGhi7(1zpmsFG5Qi_r)Mz zp3zJ&=cQvO&KPm_l?P~;HwFO9Zu;pm5Hf15jwE&hL>#m`+32q!NVKmgFP zbitC<_ulkz_rl=CB2*Q-I6kM>93!t7NzPZ<9&4pTilJ2_f6&>mYD?Y6kH511o%?Qg ztaO`iOhqh?Y=2btl!u1PE$x97go*`H5HQKEoP2}Y0)WXq8$AHisQ4QZL@w>FDoD(H ze9qBVIaa1lB>_xCe85+~amvVZuR0KgQ_J%L`vpJ#&K>Wac;Go3pL_gXtD~)v+qUIo zpQr>DaL#QwPR43$S-Ihv2d{Yd+xs8EGwH431JZ#?{4uv}Yt=nBJhS9yXFn&!mO-2f zUE>?zKb1kOMCUyKlLPg{bGZOj&Oz#CXgL6oZmn+U0@c-@bl>og2UnMj-u;3;`^~;p)}L^hywb4~r%(CIKL&sNgJZ8peo2jrp(LxNF>SRp zX1bEO3mO#(W4CCO13<@`WtqiSpM6o^nTJm{k|mW^V?%nwKOb6IK4I?~yQAH(S{iw} zVUwNST$h$yFS*RB7`m2wkPTko(k1GPSu1iCw%G5KD~g3$=Bz-(Q(Qkpfj)m+6C24* znU=8e2TIJE+i+8$uuNQG2Hl_862&cl^-^K=B{)W%|FhBMlcpUVt*SnQ7%R2fTJFu> z6fU_L?&}%LZu;r#6R&^Zj*<~$&yfpBbF-iN_8+zHV{iXu;6m zt}~ZhFHs}SX#H$KYwKqV-bHsgRAS5A7uF$PLRTl5TkP;)UHddVUWaevMyra$Y^bX# zEaX?Ym>yutc{R+8#Xv?h06iX2ygxY{i6Xx!+tRFO-Wx#DStNpf+L=h1GvpV(LgR~S zXRQK2#oh-b27lw!y~@T-x`_;9c-Ad)(hZxg{OFvc?@_!LCcB&qFZ=GDd*1TcfW+Wo z-%wR$yWb8v`s8z}XB~O;vKxQ$hvr2eY*yq)9O-kCw6VB#ZZ--;pIKTyc(-5N`ulwzeD&tOGY%UDwGfA@c1zp{WZ*CWsMu@& z>Z$j?bcK;DWttR$uE;Wb1|R^sT-czsWPCe;Ntd*j2`ag6(WTHWuEV}7O4>?5~9(TK$>#{BSC*Ni&% zC(Y@GO)EFO^6U#)^;Jj~N0nd8yoMXV@$B$b7@S$W5GueW}_=$sK}{`h(_Bjr%cMiReg(O6l0;Lyo6C!IUF=A?6xZf;zg zY232jYHHY=Y1-Oox3{)CnY2ZQ!OUc7!Yr?$}M{q|WW zuYdCXp9%NOP;5Yj)dvB}cBs2-#|z-SaxlY8RE|7w0)RLNH^4z4a)`(Ru{1HGlb9)< z>8_L6)YSy(K4Su48fLVDjOZX1iH~4rd?GQUD*HSFss45}tX>MC(GZ#mjS6oeP#($L zjynz%K^ZA_K_xjc`PL^#M*H?VmkjeFMdq4w&WStD+J-k@{QB}g{c@wn?xCA&qhf1} zO)o#QeACO%d}q&F9=$3)XxRRWtTuFUTeQ5gCQ?>06g8tr8Z0d4f@D%7_c}xb&X_Uu z3D0}HGMy?P@X1q_;=ou$&Yh|}6Q=loYBL5^fcQ!3} z`c;8!kYiK%t1@q>S<~AUNGd?7zR$B%efFQthJXLkab`uIrx;^nRBt%@V8}_Qo__y3 zv(6AqzlykkX7^Q1AaJR96F};di~jF-mD6SpAA9L0f!xT zz+|OSzHlW|4enK<%K{A|h&8zu;A<=j_T=EAqbsJ+s~-8;bFX zwL9Omhu^q-%q73s5UK3DfQZH@tN_ota!%7H@6LGtJG0J)dZ?EuGo{NtsH{=};nDcs z>ucXT`Jgja-E+gQxNT*yW5h7InJlRq`GcQKgSxg*?D-O!?TWHU4z^LEcUL$#=RDKC zb>-&8|9;EUBEY;eua%>j^|>Uey`pRT!V>$|VI zbfupdx=go<73(6aXK8jR0(YqH`6oWDd;WlM> z{h9^!FFy5RR$GrLtfwk*XaOvMycda#C2cRa?u&9bcBXaxyZ7IZ;KL$%d@pu1kO%;# z{q40G6p8$lh<#1+dr8T-aGuF*uDjyn^S}967BDe|OUE=8R_uvUfyvGL=*)iZ*znAQ zpKN&M!6jySWn#pcmmg9(W{-(xW#3`U>?-M(OMQZDCfv5tnWn8P+g2`JugWtbT&NlZ?sZSTFhHWo zw>}w*^&j|;EP%L|-|KGC+_o3JchdC3vNt1+LOlmnWrUn0wH5TQuASZYrdiF6eA!Ju zc@Dq}0H|#DDSZZf{e-EdqxYC#l$Q4cV@B67F=E6Byt{JM26rsGy=`;b>Sdp7dg&kU zZhiOFIw`JGB}*MeUMWvzmx4Fj$rfoxBx84o8|Qwebj!wf*1tRFvFv&&7(t$5@4#!<63(z#W_0wtNjk<5sHjEpj4TOlWg|gH)(& z1poO$y99`ch^$oGx{V(^bZe%4OGb*7Rm`Uae>EW)Ogq&gbDXn?FAH*gt1)gbX5CYL_V(DI03p zWzUASUra^W)qCNUJD1>y?BetS5tnYZiNcYXa^69g)Z@@sLCA%ka%X9`yGm1*U6s?q9f-kfgTyrED!6=4QUk0cFCsP0l+h7;&~$UHSfay zf(z;HYBAxf_-~c=1XILqJxLWfP&Z1-ZT9YG6nlB1*b7~Dzcsy z@^u)+8a{DSMThjcEsd4haTbYNnSY}TNMiGvo7^O)v;Cj%pL)ne!UBj1IiZNg5~<4b zp$H;n;K(4d(`VvujIdkA!G9#COck$)yOjG{Q6JYcNLz?kb+k^|NBo@_bE$lO@ih#I z`nME8w_mPF)g@g}1E{cgV!s(fev_W%VHtI9u;`h{NqI=GB@3F0GH!AQu1;rB`F?gt zjP(9@W_`Txqi;Pg7Bfo^kf?lBCMt<*#40u&3KJ#($} zc{gUEP$AXSoSr>08j9Y~JbaY^iM%1C6sP#mxx%$e3T~Gu_z{zUwvd0Ld#~B*%Cv?Z31XABjL> z0tiiX>mrEkl?bG?u#nQwPpuPKMpe_MNQy)R(73;kXr3t$V7#OUfK;xW+<_}wB-IS4 zw1V^q1Z;B$fJf!adVz{*viw|=N0GZ|Gm0HsRP?l4zmIQ67*63phI1fg4P&f865V&`At({k(NFISV6s}H6 z0VGurZVS7%bI>4A!o+nBMjoW`C~xWcN~#eiC;E+t%3RB}xzK;|Xj(i1Tk z6?3SIq0^dmqZ-8Q3+|l>B>2#$8EN--6lOZ@VTn}KD_%YdZ!$u}$v!}bW9`AuqF%Cy zrjKRRO%|l`;a{ol0w^I`Hh{I&E$M(-c1#x>dr`>_yvNsb2^^$JNE#A7EW2fCkGzh| zin-UwK;1iKDM%x~&eNdN-c<~u%9>j!oD-Y&h@oxv^$98f017loL_t&(S%YZqB;cq_ zu{cr_0u3q>3o4CpU9zGcKTQPOYRV7C%H zlB|jfJG6>y@N!M$oQ~*X*^2k0YOR+y01N8*v9@Bd=_Y%NKqBecsX(Mks0Wq;4iFpZ zERvVTxGp8)Ks|zWr@MBtT$i6D6A3@p)kmcj;K?Z5p1dq{ zWCG+-g2Lid(JIN;=N`mE4!FdOk=?Li<1`GP-~G`& zXusfBRd>jxyjPGM7FTRY+~ZoZ-aAL395vH0b=zW*ypqJ+f*-K%}aq&;|3#8Z^!^+BHZ-Y&`U8 zHGsD~mR}3F4xp+9PADuG)q%o`nN&%uiKR(4)OzpKiy#UDCKW(}bVT>K4{K+LpNQtu zlJXHcl~PBt4Dwec9X+uR=Ax?>toy|{c)Nwi&YO#^&ZXN(Q&z7`jE zWK}bwQjp!lWeJj`)D?u1!Zi(gO>G4+io}j+_9yq@P8S0fhj0n_|bfc!AgJdeqBntQbN9y$PV56(h-DVp2e> zE9MMpn!Dnk?_&>x;bs7B0*Jsl$mZC1+>2O+!s1v}k);mM%J)z;dd37%6^V0>Xj%XM z0M?+ePYF^$@0Fyx_fiYMnpwen4pWyOPabzLHl-GgXnGW>@51*YxnY-%oo%&!#WdxR5)bhAR-h*9SCk@?O zZj;yZMG;Kpjsgv~H=uWhYo~KVirGDtKW{r7(-`3}0Jg-^0YGF;3+B{yDoTx(}pq#br$b0>W;^11m%o8omR zBatxhGyt0jl&9DPz;Vzx`cXWHe7!RfNGbtAUD=LY8su&g{pF#`jG6tQ*5rfaD*9_=Q0_tR9=9CGeY8mH*p&Z>26s-P!>*9Ss*X< z%?cFyb^|2WPGs%e+>VL-`oa)EyV${Y@%JyfK9e-o2hd1}ZDK zII;dH6fX1{1Q4VqEd*8wr0rvsQWkyzfH?265M8pluzvC$zP@N{PN!QXo2AlZ@GFij zpudNF<@El_H`hfrh@H2pzNa3~E)9<`M(WigelH$KpMir7hW#84$~aijcMeYPuyeX= zs$7n4HmhzrftF}6;^liOfkc#);v?0RazE-j1XBp4?H(WHzK`C6vyd-b7**`}s=`s7 zNWH4jP1#mJ%-l;gESPf+BUT=>Q_V~PBo6=FQ3#^Ywc9DAgSZe?BI`OWZ?9f{e|aXN zy>!zyN(PJ{0=4XzpH6rPq7X=57%-^{K;`yzxpiGPD@Qlpg8V&BdG7G54h^t@BE;RpW5*m{Ce}`DTNgzrW!c^!1*$;@5(gnY zR1Jbap+s>|8LaRKfwWz~Ba0xqE~~8|AiM^cskFEKCTr_lO<8eBwtR--dS2Eq1XBp4 zodPbE)ECplgeoap-a`;^4@)O%nmYm+qN|{ALI8z8+L=~KC)q2jyh35I0B@?Bd&HJ1 z7fxghMd3*?MpDhW-KNP1;SmCWpNczgfgQ6l5U1+c!WUOpi%b- zA&~wj?x6}yI^|TZ0F!FYZV03hNdNO-3L?Y;1RD?>f+z&i|16jQ?~Yfm0l^`NLLi0r z6TycN`{lGk5QRVr?`e_uIHbBmAcaRS-rEpBA&|mj8}4%mpzsKf@Cc9a2#@dxkMIbO boqPO$niU)`Qp1_O00000NkvXXu0mjfFsx9l literal 18580 zcmV)rK$*XZP)WFU8GbZ8({Xk{QrNlj4iWF>9@03ZNKL_t(|+U%WooLp7; z|KI1_d#7*TWH-sC_g)|&Kxlzb1*Aw-L_`n~;fo>y@b<&Ml{W&gXp2^E~Gq!N{- zfRB&=?mAW<7skiOKdTcUf6K4$ojD8J$$m{An?%#Vuk2g{1$xzAB?{6xOtG5C_B zXcB?|3V+Pzq4OW(tAE{yS)im*14x>In7rr`Xa~k z)F2zS`bRO}$3P#L>GJ_R-|u-pfB037no!9X-8v@%GG?*tv2}dm@+WC5Kw|WJ)P9rG zYf5-5aaZ)vyQx`s0Q(^1R6a1PJS?vU=mFFK(x=dd{xRSO0rr7FT*&XK-`=@~>9eZ& zoPY|`VfQ2J_~kWE)0+N_A~)i$Q>k1$kvT*B9O}}j@fj?!p>1C~-yd7fPXME~gRhMF zE`J^X1Ou`>Ak+_NOa3lypfWpOW6RNg_zA|&9?qOHFEb2D@dx!hder^AHvq`{18yPb z3uzdCSA+j=0lX;idqF_%=XwE9>P0l(#?vm1W7A(^IjrB`$-Namj`0~R(IgcLG(co0 z$Wa6>C3YE7O*L(H{&_v0z36xBE?^+or}9slg!`t&OqesAi~MdrsY|Mpzy0BT7A&5? z8Kqu6sYxcuxTKZY2~r9|FIUU>7wd$g2*=ckXN^RtGrBtusr2Pbnmla>wBS;5HrV zr+%N?>bl|ueeEKOzk3P~_*^Wp6&&q*rl3eXfVdxyIXm5Und8a?WXSe&2(&S z#*SE-Y4YfZ%MYX6c2K{6k9L619!l8-bD24|j0IkU8WHaq3|*tzkc<$5QYq1eLO6DBFo z!e%@{51r4XBgb(3h+?kPB;|?5(bN;+&G+hf{IrKy8#Zwc64VFN7z#0y{#OFN5U}?F z{l#-}J$ezXfS#`~Y(9Ft4S2_2# zmof&PedN)UAsDxuz@%fwaq^G=$GbEpNRe{#^i79DXT;*&z3u$@_wTdp{^hiWEmYXd z$%X@&S^Kpbm3WXJevPYZO8BZsvy_BPR!sW=pWfS!Ss-YgJl~zH>z#iF=*52v*l$~c z>ubkw1|VD-Go<7cDHtyVh5)ZpCB7(iMEQ%*#1K zFubXod)DsfrS2%vQzmnTu2Et;h{kS~UG@Y|9Xp<~`8A9f8lX~2bQEG}5#uU-9OgBc zZb$+NU<(IqN6~FLG{j8Wye`HX8pC=by#2Q={P>|YY+tpHaKu7ItrRc{8SF8L@k&lS zeEUR(Uv&gm7rB}1(U}Yg$06Jt zk%u!Zo4p4*d3=8-+eeo%*Y9G40Iv{u42?3E#&AtiDFqq|p#;jbX$_lfZVvKcdx*NI zO~|w`J0kRKYof6|O!%h7+*;yg@rMn3XV#B+Y>-`kK8sBu44?C}TZ8YN#@JJ*aLl-J zuGTe5QD7pcnrQU^u~Oid0==n+UsZpH+jB6zll55*@OdE67X*0yzX9wkzRVB-rIy2r zB@eJO572W3e&zfev5=$g;J&-hVCe;Oxhfo^d0z(~zOa=ozkiP=(}oi!Q8vDu;$bC} zlz8y%ZKpHs8F?K%!-^@>b%p{wvXw6iKz>%6(#mLG4XQlsM9AU`@Ak&yW{qsOf zE($3F{XZS-0CR@(X+X0ab~pB9uy%G-V7_xA*STw-Vc!jZ0k$>E}4x$yt1A9$Q|~Y0p=?XqvEWYOnhwz`+x8TEy?G-dYe*5lwljiA+~<%PgQD_$ zFSKjVYqbJIk%u8(gXsy2Q5+~5#fioz90y0rIJn&g<$wXGPz+@|#59R3d7l(`zi=2o zvmCDZ+H>s80==Ea^5g)vGl0xegJJi7cS%!FB`*ImH9{~}DZ*zwOhe)~?YpN?@yY2N zR^exc-^E;)&I}0^Qs5FGY(?;UuW@*?lJ7fU~MhGO_X0Bz>h3tLCQpF z(eu()_T&M421pOB!8fmlLq!5;k6*~q7yq7RIe_i-WBQ{XiIXaq#S>_R|ax=jt z8KDWhyV`l=uN!&UZ%~nLfnN7`INz&z5ul?XOUquOLU4;vE{`R891<@z`=MzWNAGG&IVk z$do6VAdoD(m_?)EVoYMNZX_9gS1XaIMT=X{1iX}ZeFi0o>(b?n$O3;Z22XDw+0mPW zW_kVJ4CI4xD#?V+y&8e%p5@$;tS~?`G)iPI*d0JRis*0FaPC8EsNKAIo%SF%EZ;*{E=Ha+f_drrvmETS zL93I+^ywAud~qRvNKv99R#U?7f&KYz?UY^vVoo~KP#(;l35I2=pZ&P`1OBdpbrRkKoq;VN z@NOM#@(_GC9Lvv=2|RH!UrBxr#%NqyOE}GHkzr{WUd){I%r*9;BtUvs?i@ZE|Mf>R zr@DyBfQvwhw>Lm64fG;yz}x4r9HdgH^imDR*z=2b`1YYAnR3KvzHDfHz^?(aIY`~G z;@&#YB!=yv_O#=KVgwz96SZhEB;~q>O9)U3nFe^$Ku_9#tsfBQWAcCdLF-_Ep8Yw) zWn? zPFS^H`}(8``;0O#M|<>mSKZ&y8KHUYej z@d4K6CYFEdQI-Q*T_>TCNfRbbT@e}@yQ%%Ko^46MZ*HXFw5h2aNWX!n1tPYBJ6~fD zuzU6}yeChgvB(_t@69JuJQOaaLMRhJ9W;Y!sd}=CqoFw4U!CJ#&FxzNm7YGyUDY(SWiA!H^-k zgBtDK8m5DaC_A*uY+e3jw04tCG6*3B!qmX*&!6S&g`+sxrBVEwb=-BwE39qo!Aa!7 zdHD*?K5Qgs2HXsfTD0H)4!8fmrL4E&fYk4NjlFli#dRHjwD2FTZU(7E^*oVTNu$R+laC38gO=g)HsuMk4e4vp=pbO$5^DqwUnHKS!< z@#(Ib)*ps8-~PVc-jWV>DWLxUar@8!6C)Nq$NZejm+he~4FJx%>-hZK5u77H)OYdj z^dE9ddpNbkhMvRYN&(I&VcJMrVFDC_w zb!G_M!-^>d>c|ZBb(}Q77rPm5Xt;97m~5+z%weHXS<+@8mj9Qq(tKna1sIk?`}jLK?5y9g;js(3=FI8b5^yu`({s7M!pCiA z{wA(nbAcU@)sfP&qv~spy+SKF$1SxXgUYlJ!YA~~TYb}i`k~YD^dBPoUfW`KwO_M{ zabG)z^M(ePxTTR5x4+60%i`ur4R8({Me)67arT_yd{Q~6wFh|O@+bIHdpJYirrYHU zjidx1Vj*lh!SL~MukJ>=M5lLBN2doXCb41~jxCT9uJsllA5vCSx-91Eoz<4vg>E=# z8tx&1UguLf;Jah-$tY4F+&UG8hMr|P>Tgnbe=INZX!il-jsEY(=y#lq|C&Wy=XY@# zpm_~$O)zw7usX34!*4JvIkl}d$TJByz5Mj=*?8qbzIxjdt{+y+>8DQN`^&E8?nU?U zbQ;i8p#Q3O$_dl;l2eN@rVAlOK7)eoC>wzi3Mr-RGguT-iW!uie!0h0Ipf?9Iu}l` z=Lg(WKJ_7w-t;_gZEK+?S;>`tTz|Zn>qeC`FKW{D<tW`G6&1c9CrMI zg39#&W3cD5y5!U7;*9A`_Pe-N3jCJC{@M<9Wob<9rp1hQ8%#+)sIHq0UC~~N^UIaA zE#1cduKo&TL;NhNDPb1yY&xq0+Bdw@Pn~a+Un7L*W6zE#tG%wY;ni!tu={MyP_pKB zzStPV(h;&uxV@Ko#~nLQD?dY*vNx_oN`0IR95q8L#cQ-ie)$f+y816EPioT13nWZ#<(aT91+p4d#8A28D(d=kyUZ0lGFK?*?iwG zBHLFvc@|uXSV&7@w$}xC^S85wvbBj-8WV$U`FbjZ5CY|(h1&htRvS_(q=x1(aAyt1 z9Y8MM!-0$D_P+n5$qfI~2kbj-D&=QRXVN8q;N_^5!Q$EaUFFd2B8S&i4Pd$Xhco=a zPNwcJs7V6&U5u0>?hvsR`#-+o;8!3TsqS&hxV=rE4MXMm#xwFwBgtemI7yc zOX?XKAiZd=>%5asJ;W%xT8LgNcSu?FZFY3W-(qzyyka)&yLo|{?()RbHMyFShMlyV zDA}YUvG0X8y<8O)T&oqSeC7-kUsj2Lk^x>brzPfX5miJKrqRI-C)NgSM zT?*Qov>{(I%1#UzuHq3JoT)pEb}{bIdfE;YabR;L``;gS)tq2xmqEhG1O9?#js8ytd!cr8$h}af98uEi zxs00Z-qO_T00N|2N0xXvRRFK=;On3HJ@2%IQ(nB3R9QTMQe9B&DAbSNWJU5B67+BS zre8EwFS(dru&YKp^6t~I+9zI$)z>NzPke@xkDtKt$+Z0DJU)NR3q1Mc`?9`5y1nJX z*k|8vm;uU{~-#HsEadbndHfTbj!jT2_Uy5;l@o|1E;!=|o%s!EqapkV4@8dHT3 z&<^W1##XiNpESR7DeF=0&~qP!=9HfhB^p(#ueGyz_4x21art4nH5rz z5A4~5IiRf3|AAodFXn|j-cfRaJpJ=suXEW~kLJfd7bpK>B|pq&_t(G3sRE>F(|GM) zS=ZQ;VM9&<{;cUtD)KN^fO=soT}i-S>z%TAl2&|SdUdthk&R#OS@R;KTaZHrnV8%H&!4tQtsW}LUcD&Eof*I1r#+ z`6|G!HlEtsj;blaH@p~ksTWU~7q??cy>@H4tPznZy67qCh_Ey93~hm4QMJIRxKtA| zNySR5>^Kmd_Tfu6hnT)}ov&qKWaO-w>nc9ENoXbM*=*fcj{xBf#;p3)^DC9r6T=BcvAd%<-BIkd2h5(O_LetY$hSY^9Wrxf zN%twhu!L_S#mEOb>2ws16az2CKDL}OIhZ^f{8|o{@6W5+FWc+?V6gY+BpMLhInK+W z0cxbgSJ%lyBy7YwLY51+``pTkaZTI9RiRct+qaY~eE7_*JCB}bo-V;96vlhcPjYv) z`ee}Sp|YWMC~Aj%af^B~*=VlxOuWG@dd-fNqdS;>+Lq!|OSrb9#T}ZlV?vYRl#U}0 zdr8^XisLJZ(iEoav2Bs9UsqO#U+86tYo#uvD@(&WNzdm1<*hpH*uH%#_`!%uv!b}W zY^H7-#W}+n96`9ls~vMBpS=4FJh%R;V!0bxn)9L3u`fMLEfx|X1iA)NNHhuBMZe?s zYxmO~!x1k=8h2hTXdIIOyi3O=1-hefwlvf3sDf4|W{96E3R@RH{k${4IPf2CmK=19 zdOo>wopukevtKt{880loX}sP)LW;VcqzOqqNo*6Y25v)xx@Bb0fQUw-McK z^lX`-mmZ${%_B6QhS2PI9SJ~q(OepM@|qKs9sGLF`#0}%ff&$s=nDnGhlXrT-Jb-iN&}_l&QYuU8yfEc7!B zBc#y#TE>;rvcIUSe*G}- zflWjEs4O8BHblcF@^GVa_z3Blm%3LehR@sAT2vmXc&o1bg@=DpwCfDlkgJo=upB40 z!|q-cJAD0?(`Tr8wVgD7Wushni9PlBc}B%?$>-Rb8CkZQ4{v{kcY0!|S{baLq4#3V zk6z?P8lkxJQ2}C%<}WP~Mo#CJZ%DVfapDKR|FU$Z#nla>$c;PT4tn{U%IV^eQc+RU znXCaVih~mQ0iu+oyCpz(OMq?5N5|t1B%E(YKQqSDzWPVaQU{7gv zztFWG_~)EZF5FieLLMbRXJBb6gcO=(YHZpXc=?T;jge~U@k>Nez8hBjak_9RJ!j~K zraRPku?f%?>XozoLLZ&{wb_Ymc;BvlevaaQCR^s>Q(VK3NqHiT)X=&RJPO|#-O?GE@monRmF@_Dc%B2*d z3h$_^J(JJ1oz;8N0AJ$Od*kn~dWzL)))tH23EOt!yPbAE-`-N$vKIh+>ql6Ip-EYcge-q)|(b8YyL#s?)+i6F4Z?-y{0BPbE>@PrALK z-VW{9>jZn!!4AlP(2W3kW%_%|t3%q}VA1|EJyKlaHj5XJk%wGYg~@RDkCZ!3;iRKGW~)b5TA078d)k_capo`v2HM8;4+edESfA$onXkNmOBH9&UXV|v7 z!WFGOQ_gIi`R*h0r}eby*8W(;0$fTdL!@lxnaAu?wlQ{IL$wfG+!kX0-FNVfpq(m& zRN5wK#f#G!DXi?EnXsC;gmk9|f+@#QTkWo{T;@^%BEu{x;xb~{rH$9hM7ea-7o|p) zrg9>?La~U*{CkS=ezlUjuh`Gq41UW`3UEAqWRj-8JvRH89S_qhe1{vAPv-)=Qckqa z=>F--q8+sr;CW&_e;i)SYqmmrYZv=|wt@r4jHCSb7c+jzm)3CObKGaXjR6Q<$ec6m zrEcT$TB%Zfua320Gh+ah`ZI*^_evi})!E^OqEy)PUyU{F%qllERx28t(*x=?@dRRhi%-EYym|HbYNvT` zs$3jg<7J>?r_~kcz$d3kWF@1%#zg2>A7nK+5EGCY5;5E}Bcx!Y$)2(;PV_-{A55OExUN zz^xW(0+&x`l&(>(z$$X{i=VBa`H&i_D+A1Q!2HQF_9Y!oHPV|C95Kh~dUKb0Lqutb zSvY^$z_x@pFr%=1XnKYZNuWAZtOt;W5VA-ZyB<~*NwkZOI=>5 z?IiLoW8%5#L-a0XcDAWlEUO&V5aAk;D^GRpviYI4YE^fPnC*b&plk=Vt(neQQad7F zrR`I}o>XOryv5i1rkwR`?7-VE#_D(XCrxK)C6)QcSs&Q#8;{lww#u`mc?G#X4B;}&?8pHq={6; z1cdI+vh1M6LxDxyWs~VZNrW8aWSU8O@uc)?mmJYCwZ}2H-2N(?hD)z+s4J6&e;wKR z5`Aav2#?fr_F4y8jkf>!Gi!2`yZm7wH!vNg94oE9JNK4*hH1W$U+_%0=zRC+&L^Y$ zmc9_H+vTKz!jG;QzqS$b2g&!p>dlp?7mwH*xk*+#f^=mdXlyfu|fe~?#sE_Lm^v-<}y=$BrA{gx4&thzq4mBR@a!< z72*E6ZdNs~sb196?*3dthGf~5ZO)%>-)t+KPfTT-Wn(9Hx|63r%rEM@h)x|!pxDE( zkV$8Kcf0{4yE_fx&gV{-i3%|ttcGqZax7WVtt>P9*2Bj-ha7b@NBIp(Vl_b;H)URM zt#|ULeiYvP#9k*9oU9dyh)HoTK>K2*iy}GRx^t5}#u?}Z;*hM0khM`#}?V!X< zpbtR1+<7Zum)%pB%gsJSdb}QF&QOE{PG~<`Z@xR5{8{BB&_N=!0Rz2kZ@6drX`5pc zCWhx~TKxS7I%&T71zvpgUG^r#o&pG&Ip?kBHgOsm;>ls z_3S+5S3F(cO@x7W-y1ZWQrO1M|4_iwPgf!U@!FM4&?S??F<$)5TDAb1g=45ZvlK!> zqrD|aZIPRiZ9(>03fp#IM+-Xu-J@en8OkA!Le+QD5sC;&JSJVt*UPK(4%E!(&K;dZ za@k?C^Nc07>GPOMk}Kx>Hljxa5Nl^%CKgc0GBse?|)M!PbD*)8ePNA}S_S zh9|!?s&>VWjXw7TEigY{dQ-xBPLPf*M?&)-5?HwxC(dn2yJy;=A zId+#aLp3AnZC%F8y{VhFU<@mnrV7oIXGH|X%U7w>q(IZ+AW}bli#1Pept01DUOT2C zrH$scak(vacc{8H%j&Fm!rd8Tn^U&-oUzV$X0-Gd%`5d*-+sDzs#2m?2I>lLQ50WE z1jAz?WF}L^EbQN|<&CdC$J*{_Dl`CcY+v)e*ZI8^I58V*<}ikyGn3Pj&v||`uif$j z%flA2s8zV$|0oXqqhj~_&!$q>nPxkLZhW3!BnOsz?cnaQ(%%AoSEW^s;QQa7{K(C% zTc1m$MNhLyYoDCPpNDGac&=Dw~+Sobs z!Qc-f*v_Mo-D^!0CQkZDID;rA55a%S0#0-5akiT@gi#mGW#qlf*_oE_zS7TSbIdqW z(VU?e(GewV0$6x-L-G8YMx<1!^)}o$@wgdxvWemJZb~dAqEnf%^>#4w+a=<=vq#)c|f?%6j54GtM`TW8yh8)51qI zv;$p49hHi!$=%BPv9|mp06!>pKYIozxHM)*OqM;ohEUR)@=#>gt|N>gtvZa(^@?$^ z$_uXdir6xB{Ta2xD7W1($a# z#0a+YR;=-l$GNI6kqNEtm^#n@XrZu5b{G3-l|5MYnqbfN(@*T#J^K{Knl|qGF81B_ zGS5chK~N%yZ34oNX846+G^t29TSm5rAO{6(U7*JKkl;}n#CM+kzoT{=uq&82;r8B?%G9R=A`|QEm zN1F?QvuF%uhmE4b6(1NoyW8kmx{bQFP>TH(@ukIMQHzLMkLRJLluAvY997U}ZRF{l z52`WzJ$CpM*cXn%6>xLB0N0;B;OqB(lF{eP;`4^2+_G(T;GMb=`__!g`7oD|MGN)H zD`se=`@>l6fOH}KexX-sJ7iHb-LsEMIVW_;Y0@y29qR|^IX47*)olD=o?gDEOmmM< z4QQi=KDBZDnRobVroC-7t$1Wz?Utq<9y@UoV| z=GT!OFErVaPh9-%&1abVq7wh8OM!hM5Uk7zV6qIrRY-N z7rmAg)zC%w-sQYx+1RFo?Wi~agE7oB*3G#{(&#cgKllU-7mj4o&|-!pS4-}S(zUmp z+AluIUsu=Cku2}d2;rzjC<*wX0ftJ6!pXUdU%the3IX{+nK%kHYY~IN!9` zeCZ!Ku>D3pQ(eU6$sL@lmh55A>XD`t!pLDmL>Iy+#Wl7RV{E?Hha9srq8tYhrNVIK zo2}ob%-uNvpKmKf0JX*LYP&tO;m1ENI^@3e?0JMOcrnFQ0gB() z!GV{z)0_irssAgk=F(v$RA*W9ESat%LFI01n8_7biG#LhN;FmD8|az6{xwkt;nRPDH}H;t(ud#j4L zO9pGCvK%M2+YYYpP_Zb$RK2)DrpMB&xL47{34dU=t;tb{&H#nW9w*pwS8(I?S9&I1 zQ>wYfkTvtl5Yji(C_Q0@QE~z*5nYyZAqucixfqgiUnX#Eg(EUI>J`(f2;6@bCoCGv zf#yS0$lAfU-U zn-Z%Wb>haGDm&uU+DM`Z4O6tjrg zPH)9tFq@k1oWv)_S29)#WLt=~<$G9v>J*OBWN!nn@1o%k@3S&uVc)cvbJ7{Srj)Am z9z%)@4WFDSt^_#9zDr$Pxsc;7Ux00B=(>ik2@EOY(T28yPKb#qpnnrbNw1225JQQAXq)8j(_qJt`Y(xYO?nH z$Jh}vk+q$?KP13s(os_6Wny4-H&vhgfSxut@4q^_yKqq8eQds~QR(@qU+ojg5nE}##j z@&B&Sf&t~s7Z>95yEqOo8@hNs6nE6iN7nLQa}QVh49X0hQnyBFk%#IjL%0yYT^bFp zs@it4GqbfdX-A>0m1vz3x}o?#z4dpbwA0nME_zg)?C>6sf>j(2E)av`86MsV34 zTKfUJo#%gccbT3)Ct@Lw__@07^)K$db@HOUW6h{eTeGL<**DAHUc19p7gAPqpA+iX zW%qP+Dbo%r%U1n6J`}*1@=U-t1!?Mu5d-Y0Lm6r$SbSR(TNnS5-yY~9cGclbmRaV? z1gM>Wt8e{ikLXQH+aj-)s(O`^7PmD-Ul$%8t)*O=okvpTB&vG~ZU8xyt;Y!-I2% zX5Mfthv|}8wnO;yPw;&5y8BkpcHatCC7$mBj0@&4;;{=kPm^emzsueK`wHtVl(evH zRBE>i{U>kp;bZTyBNRhLES$aV#L^k~@X?e{9Ku9^ZO!aVn6?H2yORU#Dv$IKfZEhV z*R4zW`CXsje@gw@C$h}|fpc%eUIU@iOa5Gv8D(gDA@y%VSJxz1<211u01zKx}t(X0Y<@829W^_q= z+6nJzCraOVXq7N&dE9z@Q{2z}kr#3lK)%8qNY-S(OFgiPonez7K5`*H0;mcwx+>7? zPuSkVhxA<*F0TVc3W!>SV>Z#&AdPSAWOH|X#uGA)iKXc6fxhr0Z$WBCP{w=akEA#U z!w2cz{*f;o-n)u&nxs*H6|o3eHqova-JM}NTSIiNuB9=tjCM}z$m&{J5BVwg{{CW4 zK4~(iYH6C@RyZHlv+UGg^JrF#jgv)0P9BDL3Y4G>FmTQL{h-Z~$BaWS_wsc|5uE%3 z9;SCvbDxU&))K~Fv5>2UfIY1l5+$SiIer2a_nyTC9vxpeM)>}Bc%&XpyX?KbgIDuNl!3r*=ds77A zF&n$SJ9B$UfHE)oA3n{e##b;|fY{&3-tWB3Q@>ipu5=lY22664htrSsLjkHKi23Rc zLV3XFPqCRDq0Todw`wE*sVjko*J<^6#@A4BBKuS-X6Yi7Ap2n9l^_#Z&j>chf$ z0e|mVEcn7{OK}x@;_@VB6Kjk2DmK8$lbJPtB!_4D>fR#H zmhII=xc1!4?Mbg@Pn6C@_i@kOcEbHwsQNY64pLI&8dN&{NEg)p1ng*~ z`Sw@XM&G?aoiiWiZ@>%mF1MY&2UNz~ODT{(BfdtxTSvGxWQiB8#Dj6nIA*yuylL?; zvF-@nfIX@dPmw1cA`r6)9q6J1umdi1m)>_^TI|Mm-&q{$)^WKtbeE3qj{mvlk7TY} z#}l=PY;5HH<9^PgUD1qk7|8pq`f>Oa0`y$h=JSgbVy($%u^~^qPrqRYgPHM^0&z_V zmjfzhvGL1K(@f?(#MDvhK@Klwj23q^P}jw}C)QJ!qcFq;b4bnTw;inGe#y_;L%l3k z?7`T6BX_uT%GcMkc5geOBnui6b>CTBoml=s9Oz)%D?9jkK2PgU&fx4>!#F#cr@9W_ zTD6b+tBded`0$nc@K^W<6uAim+ywkC0xlhoE^tYSOP9Dbi7o_MlCcv@Ul59iQ)6=plw ze|(=Ozx6V!x}tr?IVz7l$d(GuK>(crLIDPnHFiG|$1cFqHlU8z2N631z?y3rZ)nU? z3i~%};?|3N`v36J1jbizrV#P{O&hw{(;Sb))5%)+{=ro|QQ@Q5(9n0c(SD#aZk9_v z`x$6O70Ha)OaDK6IOnBr4?nVu<6ihY z$E7V}C$g#G@0wTND0?T;O#I0Dhq?PXUe~HGose$pLIDPfwr*kYxh~lNDl?%xC~(%UPgF zrr8b~ulXx2^j#lPq&Zg1o)|kzG|E5sN6vk1dnz(8eLZvmaNA3)r|q(x+o*F_u@ zizjl%;)$G*FQISkW?3GF!e=0~to3o?dl(Eh!sqWhOqfGGFjD)UjCuJQXj zzWE|6^Htba7Ll=*jf%T1l}dO3<8f#>4%m)E#I%W;Hj$`JBxVtf+C-xk(TGJXVi60+ zh=om}k@#x4sNP+u9pcY*D*cq4Fo84OY2Z}~>p)jrmbo%1!%2)iXb%%vwU2koJZy;B z*tWv9Z0yywv_>tgJWQ5LnmB_5dm${U2GGz4!}3l)jbI>?ZtuDH`kPKF~C_fxfwVsE7KBiE}l!(Ehli76bQ>9_P5PE_~-T1r$5)U5gQul&Q;@| zUx2m0gZCu}C1Blt*7P%qr?H!c&5b-Au?PiCf?J!|xwDn*6~I!G$E6`kJQ#i#`j7zL z>LPqiJ(#QZrQ}&34{dgZn>K83$*@-gsB>ph{q2+Z)YPFIlawQS+S!`b^N9k1PCu5I zLRmiB?AM>9p+I&YXaMUB@~;pn^U*Aq-PptPW8K^+K+db-ff>~-+R)JV;U`ZfP~u@` zva;Skz;CmeJz21yJ(OdUtR1yz|H)gduvKctjbwo3)UN$2Z~I)VNSt)q!>O%Ua%Eba z`XJBevA<_D?vbUqU%Z?zxpjO33|+&e3EXZS&)QnvIpsl~E1;6eBj40HGpV}wEUq3J zV5$&^m__87O+0kvlf047#$JGRWFUz$kTb^_#GrK`u=i)K?62o>^7KH?AOgn<8nh0m zkH>wFb)oO_gxBCxhG6{LU*x8;Z}HviXWViEM|q8SM4eKo%}wmcKI)wo_1Wpq0av-c-y24nxuH_&wkA-e|C5C?1o zgN~N0@8Z$XWdSY-(f1s$Ny>d5j(YG@)O`L48q%C1vMRtRUB*p_?O|3vw~5_ZnFSEH zp1_3ki48?=YIfYj5**L%000NzNkl!K4~&%01qU8pF0B2s8Y^OFt*hZX4|uy=*Yo_&J_#OCW-XEP^2f> zQK-bWE{;;EIhx6(FjQiYr-X=y&MJlFB=#0^RD7QyNc*@C}f(HrSMMORL2Z%qAcyKR@qIi+uU!dop2#Qyc2;xCBcu-uC9HN_G zvSg>r!}QGdynWrTs$Ta_&!!ue^rm}ycBVh?+g0_d>M?_6463e=oBN=Pf8y$llgVPs zn>O{pZ?ECpeRpEn?RaMwKRo^+&h6|j^Zg}*hZA5nMKNZXF?cQT67z?RKd{1X1|ER> zZyV=NOz8$x=q^?hQCSvjPaIgx(ZP0&l$l_kOZuKb*R z6nDS-G@gI(G|o5z?ai?J%Vm6X=3{(vb#J8|h6aq%FsY;|Y?sWgA_IJFQW^{2KUn}` z2L9D2aO#~WanU=RE*bkj{)JZ_`v6}CkjFp|fp1%^Slb)?&=)=RpxX-2)X$4hrUfwd zaEnU(69t?j&}4qx7Y?rQaGpS(;GKi_4sPN=j^_K0K;FT52P)?KR5;M2x^Cfs!o%IW zg{@~E!P&d+z{3D&Zy*0&_!;M4`W#=kG&@2fs$rcul|bH>O9KV)EPy{Xuq8sJZp>r_ zo&fki-oSJBpThgq;t_0sQ}~^? z@X_rPJXOC2&y|g8e~qZucTLF`U{UN7(JE6*F^ya_S;ZBLe{#^if)X_X6nh#}yVq~x z^4nkG&G){=rFsto+4QgaueTWt8I0U;^vS~$Lo zq_J5|=|j+)>O>t57fZ)fm^xDT6@N4#01$#*8X_-R_$?DR(z?(b_;b(eq)&W-_G)_| zzFdq1a>C)<_WH3>2s*WZ-Uc}tlwIq>hEFRfpl`~5K}X7-TeXxEeza`4gQ+uxk0nVp zeX-P{?z09PJq0?oD7qF_X)BVpW0Gt@jX7hTL`vD|O(slJT)S&~JxA%gem`?ql7q*5 zwB1HT(d#A+N6Ztcjo#*DOUxRLxkHK@(&m6lZ1$$s6IK&O(`g_Vaz3I#&+0beZsbC_eQ+aUpu2>6H<>XK&@J{n8E#N2=i8DVpqW5QIiyX0uM~0~BD^`kvp~@( zh+H(;=P@&;ZmS~*NtOatkC`x5v!)B!CBSncRGEs*0QpH?eLy}~n5`yFO69{xAk|bo z>=6>eUIr@PW~)O0`w-M!YDQ>c?y3FM6C`D_Sk_6}XwY-QlCj8yi$GuR;1#}Jk_<)y zm^F*mb^(8Crce^&mWC!JpvQg(bj2D+5rG-ZMv4$!)9foC&wO-3$28@DN`UVIdX3sk zGXP70tuKk=OY>g%R)^dQJkUPaxIyRz2>4EN8nSAU@C4dDcsm(MYljNb!?$f^tQ;$ z2)fZ}J+$_11Tt=Uoo1#~tK@Wq0w9Hj75`dG-BO_Q2)OMAr{HUYSep*eb+lz2CXL3n z2V~`JlJiH