Usuari:TronaBot/Python/Arxivador.py

Hi ha una versió actualitzada al wmf Labs.

#!/usr/bin/env python
# -*- coding: utf-8 -*-
#$ -i /data/project/cobain/public_html/pywikilab/temp
#$ -e /data/project/cobain/public_html/pywikilab/temp
#$ -o /data/project/cobain/public_html/pywikilab/temp

"""
archivebot.py - discussion page archiving bot.

usage:

	python pwb.py archivebot [OPTIONS] TEMPLATE_PAGE

Bot examines backlinks (Special:WhatLinksHere) to TEMPLATE_PAGE.
Then goes through all pages (unless a specific page specified using options)
and archives old discussions. This is done by breaking a page into threads,
then scanning each thread for timestamps. Threads older than a specified
threshold are then moved to another page (the archive), which can be named
either basing on the thread's name or then name can contain a counter which
will be incremented when the archive reaches a certain size.

Trancluded template may contain the following parameters:

{{TEMPLATE_PAGE
|archive             =
|algo                =
|counter             =
|maxarchivesize      =
|minthreadsleft      =
|minthreadstoarchive =
|archiveheader       =
|key                 =
}}

Meanings of parameters are:

archive              Name of the page to which archived threads will be put.
					 Must be a subpage of the current page. Variables are
					 supported.
algo                 specifies the maximum age of a thread. Must be in the form
					 old(<delay>) where <delay> specifies the age in hours or
					 days like 24h or 5d.
					 Default is old(24h)
counter              The current value of a counter which could be assigned as
					 variable. Will be actualized by bot. Initial value is 1.
maxarchivesize       The maximum archive size before incrementing the counter.
					 Value can be given with appending letter like K or M which
					 indicates KByte or MByte. Default value is 1000M.
minthreadsleft       Minimum number of threads that should be left on a page.
					 Default value is 5.
minthreadstoarchive  The minimum number of threads to archive at once. Default
					 value is 2.
archiveheader        Content that will be put on new archive pages as the
					 header. This parameter supports the use of variables.
					 Default value is {{talkarchive}}
key                  A secret key that (if valid) allows archives to not be
					 subpages of the page being archived.

Variables below can be used in the value for "archive" in the template above:

%(counter)d          the current value of the counter
%(year)d             year of the thread being archived
%(isoyear)d          ISO year of the thread being archived
%(isoweek)d          ISO week number of the thread being archived
%(quarter)d          quarter of the year of the thread being archived
%(month)d            month (as a number 1-12) of the thread being archived
%(monthname)s        English name of the month above
%(monthnameshort)s   first three letters of the name above
%(week)d             week number of the thread being archived

The ISO calendar starts with the Monday of the week which has at least four
days in the new Gregorian calendar. If January 1st is between Monday and
Thursday (including), the first week of that year started the Monday of that
week, which is in the year before if January 1st is not a Monday. If it's
between Friday or Sunday (including) the following week is then the first week
of the year. So up to three days are still counted as the year before.

See also:
 - http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
 - https://docs.python.org/3.4/library/datetime.html#datetime.date.isocalendar

Options (may be omitted):
  -help           show this help message and exit
  -calc:PAGE      calculate key for PAGE and exit
  -file:FILE      load list of pages from FILE
  -force          override security options
  -locale:LOCALE  switch to locale LOCALE
  -namespace:NS   only archive pages from a given namespace
  -page:PAGE      archive a single PAGE, default ns is a user talk page
  -salt:SALT      specify salt
"""
#
# (C) Misza13, 2006-2010
# (C) xqt, 2009-2014
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals

__version__ = '$Id$'
#
import datetime, locale, os, re, time,sys
from hashlib import md5
from math import ceil

sys.path.append("/shared/pywikipedia/core")
import pywikibot
from pywikibot import i18n
from pywikibot.textlib import TimeStripper
from pywikibot.textlib import to_local_digits

#pyuserlib
sys.path.append("/data/project/cobain/public_html/pyuserlib")
from util import ArgParser, yellow, blue

ZERO = datetime.timedelta(0)


strings = {
	"ca": {
		"win": "Catalan",
		"unix": "ca_ES.utf8",
		"done": "[Ff]et|[Nn]o fet|[Tt]ancat"
	},
}

class ArchiveBotSiteConfigError(pywikibot.Error):

	"""There is an error originated by archivebot's on-site configuration."""


class MalformedConfigError(ArchiveBotSiteConfigError):

	"""There is an error in the configuration template."""


class MissingConfigError(ArchiveBotSiteConfigError):

	"""
	The config is missing in the header.

	It's in one of the threads or transcluded from another page.
	"""


class AlgorithmError(MalformedConfigError):

	"""Invalid specification of archiving algorithm."""


class ArchiveSecurityError(ArchiveBotSiteConfigError):

	"""
	Page title is not a valid archive of page being archived.

	The page title is neither a subpage of the page being archived,
	nor does it match the key specified in the archive configuration template.
	"""


def str2localized_duration(site, string):
	"""
	Localise a shorthand duration.

	Translates a duration written in the shorthand notation (ex. "24h", "7d")
	into an expression in the local language of the wiki ("24 hours", "7 days").
	"""
	if string[-1] == 'd':
		template = site.mediawiki_message('Days')
	elif string[-1] == 'h':
		template = site.mediawiki_message('Hours')
	if template:
		exp = i18n.translate(site.code, template, int(string[:-1]))
		return to_local_digits(exp.replace('$1', string[:-1]), site.code)
	else:
		return to_local_digits(string, site.code)


def str2time(string):
	"""
	Return a timedelta for a shorthand duration.

	Accepts a string defining a time period:
	7d - 7 days
	36h - 36 hours
	Returns the corresponding timedelta object.
	"""
	if string.endswith('d'):
		return datetime.timedelta(days=int(string[:-1]))
	elif string.endswith('h'):
		return datetime.timedelta(hours=int(string[:-1]))
	else:
		return datetime.timedelta(seconds=int(string))


def str2size(string):
	"""
	Return a size for a shorthand size.

	Accepts a string defining a size:
	1337 - 1337 bytes
	150K - 150 kilobytes
	2M - 2 megabytes
	Returns a tuple (size,unit), where size is an integer and unit is
	'B' (bytes) or 'T' (threads).

	"""
	r = re.search(r'(\d+) *([BkKMT]?)', string)
	val, unit = (int(r.group(1)), r.group(2))
	if unit == 'M':
		val *= 1024
		unit = 'K'
	if unit in ('K', 'k'):
		val *= 1024
	if unit != 'T':
		unit = 'B'
	return val, unit


def generate_transclusions(site, template, namespaces=[]):
	pywikibot.output(u'Fetching template transclusions...')
	transclusion_page = pywikibot.Page(site, template, ns=10)
	return transclusion_page.getReferences(onlyTemplateInclusion=True,
										   follow_redirects=False,
										   namespaces=namespaces)


def template_title_regex(tpl_page):
	"""
	Return a regex that matches to variations of the template title.

	It supports the transcluding variant as well as localized namespaces and
	case-insensitivity depending on the namespace.

	@param tpl_page: The template page
	@type tpl_page: Page
	"""
	ns = tpl_page.site.namespaces[tpl_page.namespace()]
	marker = '?' if ns.id == 10 else ''
	title = tpl_page.title(withNamespace=False)
	if ns.case != 'case-sensitive':
		title = '[%s%s]%s' % (re.escape(title[0].upper()),
							  re.escape(title[0].lower()),
							  re.escape(title[1:]))
	else:
		title = re.escape(title)

	return re.compile(r'(?:(?:%s):)%s%s' % (u'|'.join(ns), marker, title))


class TZoneUTC(datetime.tzinfo):

	"""Class building a UTC tzinfo object."""

	def utcoffset(self, dt):  # pylint: disable=unused-argument
		return ZERO

	def tzname(self, dt):  # pylint: disable=unused-argument
		return 'UTC'

	def dst(self, dt):  # pylint: disable=unused-argument
		return ZERO

	def __repr__(self):
		return "%s()" % self.__class__.__name__


class DiscussionThread(object):

	"""
	An object representing a discussion thread on a page.

	It represents something that is of the form:

	== Title of thread ==

	Thread content here. ~~~~
	:Reply, etc. ~~~~
	"""

	def __init__(self, title, now, timestripper):
		self.title = title
		self.now = now
		self.ts = timestripper
		self.code = self.ts.site.code
		self.content = ""
		self.timestamp = None

	def __repr__(self):
		return '%s("%s",%d bytes)' \
			   % (self.__class__.__name__, self.title,
				  len(self.content.encode('utf-8')))

	def feed_line(self, line):
		if not self.content and not line:
			return

		self.content += line + '\n'
		timestamp = self.ts.timestripper(line)

		if not self.timestamp:  # first time
			self.timestamp = timestamp

		if timestamp:
			self.timestamp = max(self.timestamp, timestamp)

	def size(self):
		return len(self.title.encode('utf-8')) + len(
			self.content.encode('utf-8')) + 12

	def to_text(self):
		return u"== %s ==\n\n%s" % (self.title, self.content)

	def should_be_archived(self, archiver):
		algo = archiver.get_attr('algo')
		re_t = re.search(r'^old\((.*)\)$', algo)
		pywikibot.output(u"{} {} {}".format(algo, re_t.group(1) if re_t else "<no old found>", self.timestamp))
		#old
		if re_t:
			if not self.timestamp:
				return ''
			# TODO: handle this:
			# return 'unsigned'
			maxage = str2time(re_t.group(1))
			if self.now - self.timestamp > maxage:
				duration = str2localized_duration(archiver.site, re_t.group(1))
				return i18n.twtranslate(self.code,
										'archivebot-older-than',
										{'duration': duration})
		#done
		lang = archiver.site.language()
		if strings[lang]['done']:
			done = re.search(ur'\{\{ *(%s)[^}]*\}\}' % strings[lang]['done'], self.content, re.U)
			re_t = re.search(r'^done\((.*)\)$', algo)
			pywikibot.output(
				u"{} {} {} {}".format(
					algo,
					re_t.group(1) if re_t else "<no done found>",
					done.group(1) if done else "<no done algo found>",
					self.timestamp
				)
			)
			if done and re_t:
				maxage = str2time(re_t.group(1))
				if self.now - self.timestamp > maxage:
					duration = str2localized_duration(archiver.site, re_t.group(1))
					return i18n.twtranslate(self.code,
										'archivebot-older-than',
										{'duration': duration})
		return ''

class DiscussionPage(pywikibot.Page):

	"""
	A class that represents a single page of discussion threads.

	Feed threads to it and run an update() afterwards.
	"""

	def __init__(self, source, archiver, params=None):
		super(DiscussionPage, self).__init__(source)
		self.threads = []
		self.full = False
		self.archiver = archiver
		# for testing purposes we allow archiver to be None and we are able
		# to create the a DiscussionPage in this way:
		# >>> import pwb, pywikibot as py
		# >>> from scripts.archivebot import DiscussionPage
		# >>> d = DiscussionPage(py.Page(py.Site(), <talk page name>), None)
		if archiver is None:
			self.timestripper = TimeStripper(self.site)
		else:
			self.timestripper = self.archiver.timestripper
		self.params = params
		self.now = datetime.datetime.utcnow().replace(tzinfo=TZoneUTC())
		try:
			self.load_page()
		except pywikibot.NoPage:
			self.header = archiver.get_attr('archiveheader',
											i18n.twtranslate(
												self.site.code,
												'archivebot-archiveheader'))
			if self.params:
				self.header = self.header % self.params

	def load_page(self):
		"""Load the page to be archived and break it up into threads."""
		self.header = ''
		self.threads = []
		self.archives = {}
		self.archived_threads = 0
		lines = self.get().split('\n')
		found = False  # Reading header
		cur_thread = None
		for line in lines:
			thread_header = re.search('^== *([^=].*?) *== *$', line)
			if thread_header:
				found = True  # Reading threads now
				if cur_thread:
					self.threads.append(cur_thread)
				cur_thread = DiscussionThread(thread_header.group(1), self.now,
											  self.timestripper)
			else:
				if found:
					cur_thread.feed_line(line)
				else:
					self.header += line + '\n'
		if cur_thread:
			self.threads.append(cur_thread)
		# This extra info is not desirable when run under the unittest
		# framework, which may be run either directly or via setup.py
		if pywikibot.calledModuleName() not in ['archivebot_tests', 'setup']:
			pywikibot.output(u'%d Threads found on %s'
							 % (len(self.threads), self))

	def feed_thread(self, thread, max_archive_size=(250 * 1024, 'B')):
		self.threads.append(thread)
		self.archived_threads += 1
		if max_archive_size[1] == 'B':
			if self.size() >= max_archive_size[0]:
				self.full = True
		elif max_archive_size[1] == 'T':
			if len(self.threads) >= max_archive_size[0]:
				self.full = True
		return self.full

	def size(self):
		return len(self.header.encode('utf-8')) + sum(t.size()
													  for t in self.threads)

	def update(self, summary, sort_threads=False):
		global changes
		if sort_threads:
			pywikibot.output(u'Sorting threads...')
			self.threads.sort(key=lambda t: t.timestamp)
		newtext = re.sub('\n*$', '\n\n', self.header)  # Fix trailing newlines
		for t in self.threads:
			newtext += t.to_text()
		if self.full:
			summary += ' ' + i18n.twtranslate(self.site.code,
											  'archivebot-archive-full')
		if args.edit:
			self.text = newtext
			self.save(summary)
			changes+=1
		else:
			pywikibot.showDiff(self.text, newtext)

class PageArchiver(object):

	"""A class that encapsulates all archiving methods.

	__init__ expects a pywikibot.Page object.
	Execute by running the .run() method.
	"""

	algo = 'none'

	def __init__(self, page, tpl, salt, force=False, args_=None):
		self.attributes = {
			'algo': ['old(24h)', False],
			'archive': ['', False],
			'maxarchivesize': ['1000M', False],
			'counter': ['1', False],
			'key': ['', False],
		}
		self.salt = salt
		self.force = force
		self.site = page.site
		self.tpl = tpl and pywikibot.Page(self.site, tpl) or None
		self.args = args_
		self.timestripper = TimeStripper(site=self.site)
		self.page = DiscussionPage(page, self)
		self.load_config()
		self.comment_params = {
			'from': self.page.title(),
		}
		self.archives = {}
		self.archived_threads = 0
		self.month_num2orig_names = {}
		for n, (_long, _short) in enumerate(self.site.months_names):
			self.month_num2orig_names[n + 1] = {"long": _long, "short": _short}

	def get_attr(self, attr, default=''):
		return self.attributes.get(attr, [default])[0]

	def set_attr(self, attr, value, out=True):
		if attr == 'archive':
			value = value.replace('_', ' ')
		self.attributes[attr] = [value, out]

	def saveables(self):
		return [a for a in self.attributes if self.attributes[a][1] and
				a != 'maxage']

	def attr2text(self):
		if not self.tpl: return ""
		return '{{%s\n%s\n}}' \
			   % (self.tpl.title(withNamespace=(self.tpl.namespace() != 10)),
				  '\n'.join('|%s = %s' % (a, self.get_attr(a))
							for a in self.saveables()))

	def key_ok(self):
		s = md5()
		s.update(self.salt + '\n')
		s.update(self.page.title().encode('utf8') + '\n')
		return self.get_attr('key') == s.hexdigest()

	def load_config(self):
		if self.tpl:
			pywikibot.output(u'Looking for: {{%s}} in %s' % (self.tpl.title(), self.page))
		else:
			pywikibot.output(
				u'Looking for %s config in table %s' % (
					self.page,
					self.args['table'].title()
				)
			)
		if not self.args:
			for tpl in self.page.templatesWithParams():
				if tpl[0] == pywikibot.Page(self.site, self.tpl.title(), ns=10):
					for param in tpl[1]:
						item, value = param.split('=', 1)
						self.set_attr(item.strip(), value.strip())
					break
			else:
				raise MissingConfigError(u'Missing or malformed template')
		else:
			for arg in self.args:
				if self.args[arg]:
					self.set_attr(arg, self.args[arg])

		if not self.get_attr('algo', ''):
			raise MissingConfigError('Missing argument "algo" in template')

	def feed_archive(self, archive, thread, max_archive_size, params=None):
		"""Feed the thread to one of the archives.

		If it doesn't exist yet, create it.
		If archive name is an empty string (or None),
		discard the thread.
		Also checks for security violations.

		"""
		title = archive.title()
		if not title:
			return
		if not self.force \
		   and not self.page.title() + '/' == title[:len(self.page.title()) + 1] \
		   and not self.key_ok():
			raise ArchiveSecurityError(
				u"Archive page %s does not start with page title (%s)!"
				% (archive, self.page.title()))
		if title not in self.archives:
			self.archives[title] = DiscussionPage(archive, self, params)
		return self.archives[title].feed_thread(thread, max_archive_size)

	def get_current_index(self):
		subpages = pagegen.PrefixingPageGenerator(self.get('archive').split("%(counter)d")[0], includeredirects=False)
		i=0
		for subpage in subpages:
			if subpage.title().count("/") == self.get('archive').count("/"):
				i+=1
		i = i or 1
		return i

	def analyze_page(self):
		max_arch_size = str2size(self.get_attr('maxarchivesize'))
		#arch_counter = int(self.get_attr('counter', '1'))
		arch_counter = self.get_current_index() \
			if "counter" in self.get("archive") else 1
		oldthreads = self.page.threads
		self.page.threads = []
		whys = []
		pywikibot.output(u'Processing %d threads' % len(oldthreads))
		for t in oldthreads:
			if len(oldthreads) - self.archived_threads \
			   <= int(self.get_attr('minthreadsleft', 5)):
				self.page.threads.append(t)
				continue  # Because there's too little threads left.
			# TODO: Make an option so that unstamped (unsigned) posts get
			# archived.
			why = t.should_be_archived(self)
			if why:
				archive = self.get_attr('archive')
				lang = self.site.lang
				params = {
					'counter': to_local_digits(arch_counter, lang),
					'year': to_local_digits(t.timestamp.year, lang),
					'isoyear': to_local_digits(t.timestamp.isocalendar()[0], lang),
					'isoweek': to_local_digits(t.timestamp.isocalendar()[1], lang),
					'quarter': to_local_digits(
						int(ceil(float(t.timestamp.month) / 3)), lang),
					'month': to_local_digits(t.timestamp.month, lang),
					'monthname': self.month_num2orig_names[t.timestamp.month]['long'],
					'monthnameshort': self.month_num2orig_names[t.timestamp.month]['short'],
					'week': to_local_digits(
						int(time.strftime('%W', t.timestamp.timetuple())), lang),
				}
				archive = pywikibot.Page(self.site, archive % params)
				if self.feed_archive(archive, t, max_arch_size, params):
					arch_counter += 1
					self.set_attr('counter', str(arch_counter))
				whys.append(why)
				self.archived_threads += 1
			else:
				self.page.threads.append(t)
		return set(whys)

	def run(self):
		if not self.page.botMayEdit():
			return
		whys = self.analyze_page()
		mintoarchive = int(self.get_attr('minthreadstoarchive', 2))
		if self.archived_threads < mintoarchive:
			# We might not want to archive a measly few threads
			# (lowers edit frequency)
			pywikibot.output(u'Only %d (< %d) threads are old enough. Skipping'
							 % (self.archived_threads, mintoarchive))
			return
		if whys:
			# Search for the marker template
			if self.tpl:
				rx = re.compile(r'\{\{%s\s*?\n.*?\n\}\}'
								% (template_title_regex(self.tpl).pattern), re.DOTALL)
				if not rx.search(self.page.header):
					raise MalformedConfigError(
						"Couldn't find the template in the header"
					)
			else: rx=re.compile("")

			pywikibot.output(u'Archiving %d thread(s).' % self.archived_threads)
			# Save the archives first (so that bugs don't cause a loss of data)
			for a in sorted(self.archives.keys()):
				self.comment_params['count'] = self.archives[a].archived_threads
				comment = i18n.twntranslate(self.site.code,
											'archivebot-archive-summary',
											self.comment_params)
				self.archives[a].update(comment)

			# Save the page itself
			self.page.header = rx.sub(self.attr2text(), self.page.header)
			self.comment_params['count'] = self.archived_threads
			comma = self.site.mediawiki_message('comma-separator')
			self.comment_params['archives'] \
				= comma.join(a.title(asLink=True)
							 for a in self.archives.values())
			self.comment_params['why'] = comma.join(whys)
			comment = i18n.twntranslate(self.site.code,
										'archivebot-page-summary',
										self.comment_params)
			self.page.update(comment)


def multipage_archiver(title, salt, force, ns_list=[]):
	global visited
	table = pywikibot.Page(site, title)
	params = re.compile(
		ur"\| *(?P<page>.+?) *\|\| *(?P<archive>.+?) *\|\| *(?P<algo>.+?) *\|\| *(?P<maxarchivesize>.+?) *\|\| *"
		ur"(?P<minthreadsleft>.+?) *\|\| *(?P<minthreadstoarchive>.+?) *\|\| *(?:<nowiki>)?(?P<archiveheader>.+?)(?:</nowiki>)?? *\|\|(?P<key>.*?)\n"
	)
	keys = ["archive", "algo", "maxarchivesize", "minthreadsleft", "minthreadstoarchive", "archiveheader", "key"]
	titles={}
	titles_by_namespace={}
	sorted_titles = []
	for v in params.findall(table.text):
		title = v[0]
		if ns_list and pywikibot.Page(site, title).namespace() not in ns_list: continue
		titles[title] = dict(zip(keys, [x.replace("~", title) if x and x.strip() not in u"-—–" else None for x in v[1:]]))
		titles[title]['table'] = table
		sorted_titles.append(title)

	for title in sorted_titles:
		if title.startswith(u"Viquipèdia:La taverna/"):	visited.append(title)
		if title in visited: continue
		pywikibot.output(u"[%s] Parsing threads from [[%s]]" % (
			yellow(time.strftime("%c")),
			blue(title)))
		page = pywikibot.Page(site, title)
		a = PageArchiver(page, "", salt, force, titles[title])
		a.run()
		time.sleep(args.sleep)
		visited.append(title)
		if args.debug and changes >= args.debug: break

def main():
	"""
	Process command line arguments and invoke bot.

	If args is an empty list, sys.argv is used.

	@param args: command line arguments
	@type args: list of unicode
	"""

	if args.calc:
		if not args.salt:
			pywikibot.bot.suggest_help(missing_parameters=['--salt'])
			return False
		page = pywikibot.Page(site, args.calc)
		if page.exists():
			args.calc = page.title()
		else:
			pywikibot.output(u'NOTE: the specified page "%s" does not (yet) exist.' % args.calc)
		s = md5()
		s.update(args.salt + '\n')
		s.update(args.calc + '\n')
		pywikibot.output(u'key = ' + s.hexdigest())
		return

	if args.namespace is not None:
		ns = [str(args.namespace)]
	else:
		ns = []

	if args.fromtable:
		archivers = multipage_archiver(args.fromtable, args.salt, args.force, ns)

	for a in args.positional:
		pagelist = []
		a = pywikibot.Page(site, a, ns=10).title()
		if not args.filename and not args.pagename:
			for pg in generate_transclusions(site, a, ns):
				pagelist.append(pg)
		if args.filename:
			for pg in open(args.filename, 'r').readlines():
				pagelist.append(pywikibot.Page(site, pg, ns=10))
		if args.pagename:
			pagelist.append(pywikibot.Page(site, args.pagename, ns=3))
		pagelist.sort()
		for pg in iter(pagelist):
			pywikibot.output(u'Processing %s' % pg)
			# Catching exceptions, so that errors in one page do not bail out
			# the entire process
			try:
				archiver = PageArchiver(pg, a, args.salt, args.force)
				archiver.run()
			except ArchiveBotSiteConfigError as e:
				# no stack trace for errors originated by pages on-site
				pywikibot.error('Missing or malformed template in page %s: %s'
								% (pg, e))
			except Exception:
				pywikibot.error(u'Error occurred while processing page %s' % pg)
				pywikibot.exception(tb=True)
			finally:
				time.sleep(args.sleep)

def get_months():
	#2014-04-14 Coet. Keep the month used on Wikipedia site to use later and
	#replace by the month used by the system
	year = range(1,13)
	try:
		#linux
		sys_months = [locale.nl_langinfo(getattr(locale, "ABMON_%i" % i)) for i in year]
	except:
		#windows
		sys_months = [time.strftime("%b", time.strptime("01-%02i-2014" % i , "%d-%m-%Y")) for i in year]
	params = {
		'action': 'query',
		'meta': 'allmessages',
		'ammessages': sys_months,
		'amlang': site.lang,
	}
	q = pywikibot.data.api.Request(site=site, **params)
	data = q.submit()
	wiki_months = [x['*'] for x in data['query']['allmessages']]
	return dict(zip(wiki_months, sys_months))

def test():
	month_num2orig_names = {}
	for n, (_long, _short) in enumerate(site.months_names):
		month_num2orig_names[n + 1] = {"long": _long, "short": _short}
	origNames2monthNum = {}
	for n, (_long, _short) in enumerate(site.months_names, start=1):
		origNames2monthNum.update({_long: n, _short: n})
		# in some cases month in ~~~~ might end without dot even if
		# site.months_names do not.
		if _short.endswith('.'):
			origNames2monthNum[_short.rstrip(".")] = n
	print dict(enumerate(site.months_names, 1))
	ts = TimeStripper(site)
	timestamp = ts.timestripper(u"Disculpes pel retard, ho tenia oblidat. --V.Riullop (parlem-ne) 19:20, 24 juny 2015 (CEST)")
	maxage = str2time("30d")
	now = datetime.datetime.utcnow().replace(tzinfo=TZoneUTC())
	print "\nNOW %s\nTIMESTAMP %s\nMAXAGE %s\nSUB %s\nRES %s" % (
		now, timestamp, maxage,now - timestamp, now - timestamp > maxage
	)


if __name__ == '__main__':
	#archivebot.py -t Usuari:ArxivaBot/Arxivador Usuari:VriuBot/Arxivador
	p = ArgParser()
	p.set_argument("f", "--file")
	p.set_argument("L", "--locale", default="ca")
	p.set_argument("T", "tz", "--timezone")
	p.set_argument("c", "--calc")
	p.set_argument("s", "--salt")
	p.set_argument("F", "--force", type="bool", default=False)
	p.set_argument("n", "--filename")
	p.set_argument("P", "page", "--pagename")
	p.set_argument("N", "ns", "--namespace", type="int")
	p.set_argument("S", "--sleep", type="int", default=10)
	p.set_argument("t", "ft", "--fromtable")
	p.set_argument("D", "--debug", type="int", default=0)
	p.set_argument("E", "--edit")
	p.set_argument("h", "--help")
	p.set_argument("--test")
	args = p.parse()

	changes = 0; visited = []
	site = pywikibot.Site(user="ArxivaBot")
	site.login()

	if args.help:
		print "archivebot.py -t Usuari:ArxivaBot/Arxivador Usuari:VriuBot/Arxivador -E"

	if args.test:
		test()
	else:
		print "BEGIN"
		main()