diff --git a/lib/README.md b/lib/README.md
index cf73cb0219caa5f5ed7d9878c9e2689ce3dcf772..bd4daba3106129b4a0d9dd2cbba5bead838a9117 100644
--- a/lib/README.md
+++ b/lib/README.md
@@ -17,14 +17,24 @@ Add the output to the list below to the appropriate location (based on the top-l
 Packages List
 =========
 ```
+babelfish=0.5.5
 beautifulsoup4==4.5.3
 bencode==1.0
 # certgen.py==d52975c # source: https://github.com/pyca/pyopenssl/blob/d52975cef3a36e18552aeb23de7c06aa73d76454/examples/certgen.py
+git+https://github.com/kurtmckee/feedparser.git@f1dd1bb923ebfe6482fc2521c1f150b4032289ec#egg=feedparser
 html5lib==0.999999999
   - six [required: Any, installed: 1.10.0]
   - webencodings [required: Any, installed: 0.5.1]
+IMDbPY==5.1.1
 Mako==1.0.6
   - MarkupSafe [required: >=0.9.2, installed: 1.0]
+markdown2==2.3.4
+PyGithub==1.34
+  - pyjwt [required: Any, installed: 1.5.0]
+PySocks==1.6.7
+  - win-inet-pton==1.0.1
+python-dateutil==2.6.0
+  - six [required: >=1.5, installed: 1.10.0]
 python-twitter==3.3
   - future [required: Any, installed: 0.16.0] # <-- Not really needed, so not installed
   - requests [required: Any, installed: 2.18.1]
@@ -43,4 +53,11 @@ tornado==4.5.1
   - certifi [required: Any, installed: 2017.4.17]
   - singledispatch [required: Any, installed: 3.4.0.3]
     - six [required: Any, installed: 1.10.0]
+tzlocal==1.4
+  - pytz [required: Any, installed: 2016.4]
+Unidecode==0.04.20
+validators==0.10
+  - decorator [required: >=3.4.0, installed: 4.0.10]
+  - six [required: >=1.4.0, installed: 1.10.0]
+xmltodict==0.11.0
 ```
diff --git a/lib/babelfish/data/get_files.py b/lib/babelfish/data/get_files.py
deleted file mode 100644
index aaa090cccc0ee6aa898fae793811e65b0fa4e501..0000000000000000000000000000000000000000
--- a/lib/babelfish/data/get_files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2013 the BabelFish authors. All rights reserved.
-# Use of this source code is governed by the 3-clause BSD license
-# that can be found in the LICENSE file.
-#
-from __future__ import unicode_literals
-import os.path
-import tempfile
-import zipfile
-import requests
-
-
-DATA_DIR = os.path.dirname(__file__)
-
-# iso-3166-1.txt
-print('Downloading ISO-3166-1 standard (ISO country codes)...')
-with open(os.path.join(DATA_DIR, 'iso-3166-1.txt'), 'w') as f:
-    r = requests.get('http://www.iso.org/iso/home/standards/country_codes/country_names_and_code_elements_txt.htm')
-    f.write(r.content.strip())
-
-# iso-639-3.tab
-print('Downloading ISO-639-3 standard (ISO language codes)...')
-with tempfile.TemporaryFile() as f:
-    r = requests.get('http://www-01.sil.org/iso639-3/iso-639-3_Code_Tables_20130531.zip')
-    f.write(r.content)
-    with zipfile.ZipFile(f) as z:
-        z.extract('iso-639-3.tab', DATA_DIR)
-
-# iso-15924
-print('Downloading ISO-15924 standard (ISO script codes)...')
-with tempfile.TemporaryFile() as f:
-    r = requests.get('http://www.unicode.org/iso15924/iso15924.txt.zip')
-    f.write(r.content)
-    with zipfile.ZipFile(f) as z:
-        z.extract('iso15924-utf8-20131012.txt', DATA_DIR)
-
-# opensubtitles supported languages
-print('Downloading OpenSubtitles supported languages...')
-with open(os.path.join(DATA_DIR, 'opensubtitles_languages.txt'), 'w') as f:
-    r = requests.get('http://www.opensubtitles.org/addons/export_languages.php')
-    f.write(r.content)
-
-print('Done!')
diff --git a/lib/dateutil/__init__.py b/lib/dateutil/__init__.py
index 1f160ea5ac51320449913d7f01e5fb8e3d40d31c..ba89aa70bd7ae634e020296db4becdeab9830533 100644
--- a/lib/dateutil/__init__.py
+++ b/lib/dateutil/__init__.py
@@ -1,2 +1,2 @@
 # -*- coding: utf-8 -*-
-__version__ = "2.5.0"
+__version__ = "2.6.0"
diff --git a/lib/dateutil/_common.py b/lib/dateutil/_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd2a338608830d65ba75bfd25bd1f7e285f6547c
--- /dev/null
+++ b/lib/dateutil/_common.py
@@ -0,0 +1,33 @@
+"""
+Common code used in multiple modules.
+"""
+
+class weekday(object):
+    __slots__ = ["weekday", "n"]
+
+    def __init__(self, weekday, n=None):
+        self.weekday = weekday
+        self.n = n
+
+    def __call__(self, n):
+        if n == self.n:
+            return self
+        else:
+            return self.__class__(self.weekday, n)
+
+    def __eq__(self, other):
+        try:
+            if self.weekday != other.weekday or self.n != other.n:
+                return False
+        except AttributeError:
+            return False
+        return True
+
+    __hash__ = None
+
+    def __repr__(self):
+        s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
+        if not self.n:
+            return s
+        else:
+            return "%s(%+d)" % (s, self.n)
diff --git a/lib/dateutil/easter.py b/lib/dateutil/easter.py
index 8d30c4ebdab098c9f35d934e6e91d5e7a49e4764..e4def97f966c83d08878da16ff105eb857b5fa4b 100644
--- a/lib/dateutil/easter.py
+++ b/lib/dateutil/easter.py
@@ -33,9 +33,9 @@ def easter(year, method=EASTER_WESTERN):
 
     These methods are represented by the constants:
 
-    EASTER_JULIAN   = 1
-    EASTER_ORTHODOX = 2
-    EASTER_WESTERN  = 3
+    * ``EASTER_JULIAN   = 1``
+    * ``EASTER_ORTHODOX = 2``
+    * ``EASTER_WESTERN  = 3``
 
     The default method is method 3.
 
diff --git a/lib/dateutil/parser.py b/lib/dateutil/parser.py
index 759094089d9752b9aa5bf902ca325a90391a1242..147b3f2ca27082c2b4c51036b3130b396d1dc827 100644
--- a/lib/dateutil/parser.py
+++ b/lib/dateutil/parser.py
@@ -56,6 +56,10 @@ class _timelex(object):
         if isinstance(instream, text_type):
             instream = StringIO(instream)
 
+        if getattr(instream, 'read', None) is None:
+            raise TypeError('Parser must be a string or character stream, not '
+                            '{itype}'.format(itype=instream.__class__.__name__))
+
         self.instream = instream
         self.charstack = []
         self.tokenstack = []
@@ -464,7 +468,10 @@ class _ymd(list):
                     self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \
                    (yearfirst and self[1] <= 12 and self[2] <= 31):
                     # 99-01-01
-                    year, month, day = self
+                    if dayfirst and self[2] <= 12:
+                        year, day, month = self
+                    else:
+                        year, month, day = self
                 elif self[0] > 12 or (dayfirst and self[1] <= 12):
                     # 13-01-01
                     day, month, year = self
diff --git a/lib/dateutil/relativedelta.py b/lib/dateutil/relativedelta.py
index 0217d57f5f87c38d1b5a580891c04222904498e0..7e3bd12ac84a4069b7ebdb0d0973a20987ea4f8e 100644
--- a/lib/dateutil/relativedelta.py
+++ b/lib/dateutil/relativedelta.py
@@ -8,39 +8,12 @@ from math import copysign
 from six import integer_types
 from warnings import warn
 
-__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
-
-
-class weekday(object):
-    __slots__ = ["weekday", "n"]
-
-    def __init__(self, weekday, n=None):
-        self.weekday = weekday
-        self.n = n
-
-    def __call__(self, n):
-        if n == self.n:
-            return self
-        else:
-            return self.__class__(self.weekday, n)
-
-    def __eq__(self, other):
-        try:
-            if self.weekday != other.weekday or self.n != other.n:
-                return False
-        except AttributeError:
-            return False
-        return True
-
-    def __repr__(self):
-        s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
-        if not self.n:
-            return s
-        else:
-            return "%s(%+d)" % (s, self.n)
+from ._common import weekday
 
 MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
 
+__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
+
 
 class relativedelta(object):
     """
@@ -69,7 +42,7 @@ class relativedelta(object):
             Relative information, may be negative (argument is plural); adding
             or subtracting a relativedelta with relative information performs
             the corresponding aritmetic operation on the original datetime value
-            with the information in the relativedelta.  
+            with the information in the relativedelta.
 
         weekday:
             One of the weekday instances (MO, TU, etc). These instances may
@@ -299,16 +272,16 @@ class relativedelta(object):
 
         >>> relativedelta(days=1.5, hours=2).normalized()
         relativedelta(days=1, hours=14)
-        
+
         :return:
             Returns a :class:`dateutil.relativedelta.relativedelta` object.
         """
         # Cascade remainders down (rounding each to roughly nearest microsecond)
         days = int(self.days)
-        
+
         hours_f = round(self.hours + 24 * (self.days - days), 11)
         hours = int(hours_f)
-        
+
         minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
         minutes = int(minutes_f)
 
@@ -347,8 +320,25 @@ class relativedelta(object):
                                  second=other.second or self.second,
                                  microsecond=(other.microsecond or
                                               self.microsecond))
+        if isinstance(other, datetime.timedelta):
+            return self.__class__(years=self.years,
+                                  months=self.months,
+                                  days=self.days + other.days,
+                                  hours=self.hours,
+                                  minutes=self.minutes,
+                                  seconds=self.seconds + other.seconds,
+                                  microseconds=self.microseconds + other.microseconds,
+                                  leapdays=self.leapdays,
+                                  year=self.year,
+                                  month=self.month,
+                                  day=self.day,
+                                  weekday=self.weekday,
+                                  hour=self.hour,
+                                  minute=self.minute,
+                                  second=self.second,
+                                  microsecond=self.microsecond)
         if not isinstance(other, datetime.date):
-            raise TypeError("unsupported type for add operation")
+            return NotImplemented
         elif self._has_time and not isinstance(other, datetime.datetime):
             other = datetime.datetime.fromordinal(other.toordinal())
         year = (self.year or other.year)+self.years
@@ -397,7 +387,7 @@ class relativedelta(object):
 
     def __sub__(self, other):
         if not isinstance(other, relativedelta):
-            raise TypeError("unsupported type for sub operation")
+            return NotImplemented   # In case the other object defines __rsub__
         return self.__class__(years=self.years - other.years,
                              months=self.months - other.months,
                              days=self.days - other.days,
@@ -454,7 +444,11 @@ class relativedelta(object):
     __nonzero__ = __bool__
 
     def __mul__(self, other):
-        f = float(other)
+        try:
+            f = float(other)
+        except TypeError:
+            return NotImplemented
+
         return self.__class__(years=int(self.years * f),
                              months=int(self.months * f),
                              days=int(self.days * f),
@@ -476,7 +470,7 @@ class relativedelta(object):
 
     def __eq__(self, other):
         if not isinstance(other, relativedelta):
-            return False
+            return NotImplemented
         if self.weekday or other.weekday:
             if not self.weekday or not other.weekday:
                 return False
@@ -501,11 +495,18 @@ class relativedelta(object):
                 self.second == other.second and
                 self.microsecond == other.microsecond)
 
+    __hash__ = None
+
     def __ne__(self, other):
         return not self.__eq__(other)
 
     def __div__(self, other):
-        return self.__mul__(1/float(other))
+        try:
+            reciprocal = 1 / float(other)
+        except TypeError:
+            return NotImplemented
+
+        return self.__mul__(reciprocal)
 
     __truediv__ = __div__
 
diff --git a/lib/dateutil/rrule.py b/lib/dateutil/rrule.py
index 22d6dfca1f3ac0d7a7c8b0cddac93580ec494520..da94351b9eae1ac229581a674cecad21ad517b1e 100644
--- a/lib/dateutil/rrule.py
+++ b/lib/dateutil/rrule.py
@@ -16,9 +16,11 @@ except ImportError:
     from fractions import gcd
 
 from six import advance_iterator, integer_types
-from six.moves import _thread
+from six.moves import _thread, range
 import heapq
 
+from ._common import weekday as weekdaybase
+
 # For warning about deprecation of until and count
 from warnings import warn
 
@@ -58,37 +60,15 @@ FREQNAMES = ['YEARLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY','SECONDLY']
 easter = None
 parser = None
 
-
-class weekday(object):
-    __slots__ = ["weekday", "n"]
-
-    def __init__(self, weekday, n=None):
+class weekday(weekdaybase):
+    """
+    This version of weekday does not allow n = 0.
+    """
+    def __init__(self, wkday, n=None):
         if n == 0:
-            raise ValueError("Can't create weekday with n == 0")
-
-        self.weekday = weekday
-        self.n = n
+            raise ValueError("Can't create weekday with n==0")
 
-    def __call__(self, n):
-        if n == self.n:
-            return self
-        else:
-            return self.__class__(self.weekday, n)
-
-    def __eq__(self, other):
-        try:
-            if self.weekday != other.weekday or self.n != other.n:
-                return False
-        except AttributeError:
-            return False
-        return True
-
-    def __repr__(self):
-        s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
-        if not self.n:
-            return s
-        else:
-            return "%s(%+d)" % (s, self.n)
+        super(weekday, self).__init__(wkday, n)
 
 MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
 
@@ -707,7 +687,7 @@ class rrule(rrulebase):
             parts.append('INTERVAL=' + str(self._interval))
 
         if self._wkst:
-            parts.append('WKST=' + str(self._wkst))
+            parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
 
         if self._count:
             parts.append('COUNT=' + str(self._count))
@@ -751,6 +731,21 @@ class rrule(rrulebase):
         output.append(';'.join(parts))
         return '\n'.join(output)
 
+    def replace(self, **kwargs):
+        """Return new rrule with same attributes except for those attributes given new
+           values by whichever keyword arguments are specified."""
+        new_kwargs = {"interval": self._interval,
+                      "count": self._count,
+                      "dtstart": self._dtstart,
+                      "freq": self._freq,
+                      "until": self._until,
+                      "wkst": self._wkst,
+                      "cache": False if self._cache is None else True }
+        new_kwargs.update(self._original_rule)
+        new_kwargs.update(kwargs)
+        return rrule(**new_kwargs)
+
+
     def _iter(self):
         year, month, day, hour, minute, second, weekday, yearday, _ = \
             self._dtstart.timetuple()
diff --git a/lib/dateutil/test/_common.py b/lib/dateutil/test/_common.py
deleted file mode 100644
index 775738c69307a515e2d665f7dffd36dccb8e78f6..0000000000000000000000000000000000000000
--- a/lib/dateutil/test/_common.py
+++ /dev/null
@@ -1,102 +0,0 @@
-from __future__ import unicode_literals
-try:
-    import unittest2 as unittest
-except ImportError:
-    import unittest
-
-import os
-import subprocess
-import warnings
-
-
-class WarningTestMixin(object):
-    # Based on https://stackoverflow.com/a/12935176/467366
-    class _AssertWarnsContext(warnings.catch_warnings):
-        def __init__(self, expected_warnings, parent, **kwargs):
-            super(WarningTestMixin._AssertWarnsContext, self).__init__(**kwargs)
-
-            self.parent = parent
-            try:
-                self.expected_warnings = list(expected_warnings)
-            except TypeError:
-                self.expected_warnings = [expected_warnings]
-
-            self._warning_log = []
-
-        def __enter__(self, *args, **kwargs):
-            rv = super(WarningTestMixin._AssertWarnsContext, self).__enter__(*args, **kwargs)
-
-            if self._showwarning is not self._module.showwarning:
-                super_showwarning = self._module.showwarning
-            else:
-                super_showwarning = None
-
-            def showwarning(*args, **kwargs):
-                if super_showwarning is not None:
-                    super_showwarning(*args, **kwargs)
-
-                self._warning_log.append(warnings.WarningMessage(*args, **kwargs))
-
-            self._module.showwarning = showwarning
-            return rv
-
-        def __exit__(self, *args, **kwargs):
-            super(WarningTestMixin._AssertWarnsContext, self).__exit__(self, *args, **kwargs)
-
-            self.parent.assertTrue(any(issubclass(item.category, warning)
-                                       for warning in self.expected_warnings
-                                       for item in self._warning_log))
-
-    def assertWarns(self, warning, callable=None, *args, **kwargs):
-        warnings.simplefilter('always')
-        context = self.__class__._AssertWarnsContext(warning, self)
-        if callable is None:
-            return context
-        else:
-            with context:
-                callable(*args, **kwargs)
-
-
-class TZWinContext(object):
-    """ Context manager for changing local time zone on Windows """
-    @classmethod
-    def tz_change_allowed(cls):
-        # Allowing dateutil to change the local TZ is set as a local environment
-        # flag.
-        return bool(os.environ.get('DATEUTIL_MAY_CHANGE_TZ', False))
-
-    def __init__(self, tzname):
-        self.tzname = tzname
-        self._old_tz = None
-
-    def __enter__(self):
-        if not self.tz_change_allowed():
-            raise ValueError('Environment variable DATEUTIL_MAY_CHANGE_TZ ' + 
-                             'must be true.')
-
-        self._old_tz = self.get_current_tz()
-        self.set_current_tz(self.tzname)
-
-    def __exit__(self, type, value, traceback):
-        if self._old_tz is not None:
-            self.set_current_tz(self._old_tz)
-
-    def get_current_tz(self):
-        p = subprocess.Popen(['tzutil', '/g'], stdout=subprocess.PIPE)
-
-        ctzname, err = p.communicate()
-        ctzname = ctzname.decode()     # Popen returns 
-
-        if p.returncode:
-            raise OSError('Failed to get current time zone: ' + err)
-
-        return ctzname
-
-    def set_current_tz(self, tzname):
-        p = subprocess.Popen('tzutil /s "' + tzname + '"')
-
-        out, err = p.communicate()
-
-        if p.returncode:
-            raise OSError('Failed to set current time zone: ' +
-                          (err or 'Unknown error.'))
\ No newline at end of file
diff --git a/lib/dateutil/test/test_easter.py b/lib/dateutil/test/test_easter.py
deleted file mode 100644
index 6897e88bef07ef3dc5acbd7da5ea24593ddae13c..0000000000000000000000000000000000000000
--- a/lib/dateutil/test/test_easter.py
+++ /dev/null
@@ -1,99 +0,0 @@
-from dateutil.easter import easter
-from dateutil.easter import EASTER_WESTERN, EASTER_ORTHODOX, EASTER_JULIAN
-
-from datetime import date
-
-try:
-    import unittest2 as unittest
-except ImportError:
-    import unittest
-
-# List of easters between 1990 and 2050
-western_easter_dates = [
-    date(1990, 4, 15), date(1991, 3, 31), date(1992, 4, 19), date(1993, 4, 11),
-    date(1994, 4,  3), date(1995, 4, 16), date(1996, 4,  7), date(1997, 3, 30),
-    date(1998, 4, 12), date(1999, 4,  4),
-
-    date(2000, 4, 23), date(2001, 4, 15), date(2002, 3, 31), date(2003, 4, 20),
-    date(2004, 4, 11), date(2005, 3, 27), date(2006, 4, 16), date(2007, 4,  8),
-    date(2008, 3, 23), date(2009, 4, 12),
-
-    date(2010, 4,  4), date(2011, 4, 24), date(2012, 4,  8), date(2013, 3, 31), 
-    date(2014, 4, 20), date(2015, 4,  5), date(2016, 3, 27), date(2017, 4, 16),
-    date(2018, 4,  1), date(2019, 4, 21),
-
-    date(2020, 4, 12), date(2021, 4,  4), date(2022, 4, 17), date(2023, 4,  9),
-    date(2024, 3, 31), date(2025, 4, 20), date(2026, 4,  5), date(2027, 3, 28),
-    date(2028, 4, 16), date(2029, 4,  1),
-
-    date(2030, 4, 21), date(2031, 4, 13), date(2032, 3, 28), date(2033, 4, 17),
-    date(2034, 4,  9), date(2035, 3, 25), date(2036, 4, 13), date(2037, 4,  5),
-    date(2038, 4, 25), date(2039, 4, 10),
-
-    date(2040, 4,  1), date(2041, 4, 21), date(2042, 4,  6), date(2043, 3, 29),
-    date(2044, 4, 17), date(2045, 4,  9), date(2046, 3, 25), date(2047, 4, 14),
-    date(2048, 4,  5), date(2049, 4, 18), date(2050, 4, 10)
-    ]
-
-orthodox_easter_dates = [
-    date(1990, 4, 15), date(1991, 4,  7), date(1992, 4, 26), date(1993, 4, 18),
-    date(1994, 5,  1), date(1995, 4, 23), date(1996, 4, 14), date(1997, 4, 27),
-    date(1998, 4, 19), date(1999, 4, 11),
-
-    date(2000, 4, 30), date(2001, 4, 15), date(2002, 5,  5), date(2003, 4, 27),
-    date(2004, 4, 11), date(2005, 5,  1), date(2006, 4, 23), date(2007, 4,  8),
-    date(2008, 4, 27), date(2009, 4, 19),
-
-    date(2010, 4,  4), date(2011, 4, 24), date(2012, 4, 15), date(2013, 5,  5),
-    date(2014, 4, 20), date(2015, 4, 12), date(2016, 5,  1), date(2017, 4, 16),
-    date(2018, 4,  8), date(2019, 4, 28),
-
-    date(2020, 4, 19), date(2021, 5,  2), date(2022, 4, 24), date(2023, 4, 16),
-    date(2024, 5,  5), date(2025, 4, 20), date(2026, 4, 12), date(2027, 5,  2),
-    date(2028, 4, 16), date(2029, 4,  8),
-
-    date(2030, 4, 28), date(2031, 4, 13), date(2032, 5,  2), date(2033, 4, 24),
-    date(2034, 4,  9), date(2035, 4, 29), date(2036, 4, 20), date(2037, 4,  5),
-    date(2038, 4, 25), date(2039, 4, 17),
-
-    date(2040, 5,  6), date(2041, 4, 21), date(2042, 4, 13), date(2043, 5,  3),
-    date(2044, 4, 24), date(2045, 4,  9), date(2046, 4, 29), date(2047, 4, 21),
-    date(2048, 4,  5), date(2049, 4, 25), date(2050, 4, 17)
-]
-
-# A random smattering of Julian dates.
-# Pulled values from http://www.kevinlaughery.com/east4099.html
-julian_easter_dates = [
-    date( 326, 4,  3), date( 375, 4,  5), date( 492, 4,  5), date( 552, 3, 31),
-    date( 562, 4,  9), date( 569, 4, 21), date( 597, 4, 14), date( 621, 4, 19),
-    date( 636, 3, 31), date( 655, 3, 29), date( 700, 4, 11), date( 725, 4,  8),
-    date( 750, 3, 29), date( 782, 4,  7), date( 835, 4, 18), date( 849, 4, 14),
-    date( 867, 3, 30), date( 890, 4, 12), date( 922, 4, 21), date( 934, 4,  6),
-    date(1049, 3, 26), date(1058, 4, 19), date(1113, 4,  6), date(1119, 3, 30),
-    date(1242, 4, 20), date(1255, 3, 28), date(1257, 4,  8), date(1258, 3, 24),
-    date(1261, 4, 24), date(1278, 4, 17), date(1333, 4,  4), date(1351, 4, 17),
-    date(1371, 4,  6), date(1391, 3, 26), date(1402, 3, 26), date(1412, 4,  3),
-    date(1439, 4,  5), date(1445, 3, 28), date(1531, 4,  9), date(1555, 4, 14)
-]
-
-
-class EasterTest(unittest.TestCase):
-    def testEasterWestern(self):
-        for easter_date in western_easter_dates:
-            self.assertEqual(easter_date, 
-                             easter(easter_date.year, EASTER_WESTERN))
-
-    def testEasterOrthodox(self):
-        for easter_date in orthodox_easter_dates:
-            self.assertEqual(easter_date,
-                             easter(easter_date.year, EASTER_ORTHODOX))
-
-    def testEasterJulian(self):
-        for easter_date in julian_easter_dates:
-            self.assertEqual(easter_date,
-                             easter(easter_date.year, EASTER_JULIAN))
-
-    def testEasterBadMethod(self):
-        # Invalid methods raise ValueError
-        with self.assertRaises(ValueError):
-            easter(1975, 4)
diff --git a/lib/dateutil/test/test_imports.py b/lib/dateutil/test/test_imports.py
deleted file mode 100644
index 1d8ac171e03ef6d996298ee048fa8b368c3771c7..0000000000000000000000000000000000000000
--- a/lib/dateutil/test/test_imports.py
+++ /dev/null
@@ -1,149 +0,0 @@
-import sys
-
-try:
-    import unittest2 as unittest
-except ImportError:
-    import unittest
-
-
-class ImportEasterTest(unittest.TestCase):
-    """ Test that dateutil.easter-related imports work properly """
-
-    def testEasterDirect(self):
-        import dateutil.easter
-
-    def testEasterFrom(self):
-        from dateutil import easter
-
-    def testEasterStar(self):
-        from dateutil.easter import easter
-
-
-class ImportParserTest(unittest.TestCase):
-    """ Test that dateutil.parser-related imports work properly """
-    def testParserDirect(self):
-        import dateutil.parser
-
-    def testParserFrom(self):
-        from dateutil import parser
-
-    def testParserAll(self):
-        # All interface
-        from dateutil.parser import parse
-        from dateutil.parser import parserinfo
-
-        # Other public classes
-        from dateutil.parser import parser
-
-        for var in (parse, parserinfo, parser):
-            self.assertIsNot(var, None)
-
-
-class ImportRelativeDeltaTest(unittest.TestCase):
-    """ Test that dateutil.relativedelta-related imports work properly """
-    def testRelativeDeltaDirect(self):
-        import dateutil.relativedelta
-
-    def testRelativeDeltaFrom(self):
-        from dateutil import relativedelta
-
-    def testRelativeDeltaAll(self):
-        from dateutil.relativedelta import relativedelta
-        from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
-
-        for var in (relativedelta, MO, TU, WE, TH, FR, SA, SU):
-            self.assertIsNot(var, None)
-
-        # In the public interface but not in all
-        from dateutil.relativedelta import weekday
-        self.assertIsNot(weekday, None)
-
-
-class ImportRRuleTest(unittest.TestCase):
-    """ Test that dateutil.rrule related imports work properly """
-    def testRRuleDirect(self):
-        import dateutil.rrule
-
-    def testRRuleFrom(self):
-        from dateutil import rrule
-
-    def testRRuleAll(self):
-        from dateutil.rrule import rrule
-        from dateutil.rrule import rruleset
-        from dateutil.rrule import rrulestr
-        from dateutil.rrule import YEARLY, MONTHLY, WEEKLY, DAILY
-        from dateutil.rrule import HOURLY, MINUTELY, SECONDLY
-        from dateutil.rrule import MO, TU, WE, TH, FR, SA, SU
-
-        rr_all = (rrule, rruleset, rrulestr,
-                  YEARLY, MONTHLY, WEEKLY, DAILY,
-                  HOURLY, MINUTELY, SECONDLY,
-                  MO, TU, WE, TH, FR, SA, SU)
-
-        for var in rr_all:
-            self.assertIsNot(var, None)
-
-        # In the public interface but not in all
-        from dateutil.rrule import weekday
-        self.assertIsNot(weekday, None)
-
-
-class ImportTZTest(unittest.TestCase):
-    """ Test that dateutil.tz related imports work properly """
-    def testTzDirect(self):
-        import dateutil.tz
-
-    def testTzFrom(self):
-        from dateutil import tz
-
-    def testTzAll(self):
-        from dateutil.tz import tzutc
-        from dateutil.tz import tzoffset
-        from dateutil.tz import tzlocal
-        from dateutil.tz import tzfile
-        from dateutil.tz import tzrange
-        from dateutil.tz import tzstr
-        from dateutil.tz import tzical
-        from dateutil.tz import gettz
-        from dateutil.tz import tzwin
-        from dateutil.tz import tzwinlocal
-
-        tz_all = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
-                  "tzstr", "tzical", "gettz"]
-
-        tz_all += ["tzwin", "tzwinlocal"] if sys.platform.startswith("win") else []
-        lvars = locals()
-
-        for var in tz_all:
-            self.assertIsNot(lvars[var], None)
-
-
-@unittest.skipUnless(sys.platform.startswith('win'), "Requires Windows")
-class ImportTZWinTest(unittest.TestCase):
-    """ Test that dateutil.tzwin related imports work properly """
-    def testTzwinDirect(self):
-        import dateutil.tzwin
-
-    def testTzwinFrom(self):
-        from dateutil import tzwin
-
-    def testTzwinStar(self):
-        tzwin_all = ["tzwin", "tzwinlocal"]
-
-
-class ImportZoneInfoTest(unittest.TestCase):
-    def testZoneinfoDirect(self):
-        import dateutil.zoneinfo
-
-    def testZoneinfoFrom(self):
-        from dateutil import zoneinfo
-
-    def testZoneinfoStar(self):
-        from dateutil.zoneinfo import gettz
-        from dateutil.zoneinfo import gettz_db_metadata
-        from dateutil.zoneinfo import rebuild
-
-        zi_all = (gettz, gettz_db_metadata, rebuild)
-
-        for var in zi_all:
-            self.assertIsNot(var, None)
diff --git a/lib/dateutil/test/test_parser.py b/lib/dateutil/test/test_parser.py
deleted file mode 100644
index 572e58905fc7dd1eeebebe0e4902c3f5c594015e..0000000000000000000000000000000000000000
--- a/lib/dateutil/test/test_parser.py
+++ /dev/null
@@ -1,779 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-from ._common import unittest
-
-from datetime import datetime, timedelta, date
-
-from dateutil.tz import tzoffset
-from dateutil.parser import *
-
-from six import assertRaisesRegex, PY3
-
-class ParserTest(unittest.TestCase):
-
-    def setUp(self):
-        self.tzinfos = {"BRST": -10800}
-        self.brsttz = tzoffset("BRST", -10800)
-        self.default = datetime(2003, 9, 25)
-
-        # Parser should be able to handle bytestring and unicode
-        base_str = '2014-05-01 08:00:00'
-        try:
-            # Python 2.x
-            self.uni_str = unicode(base_str)
-            self.str_str = str(base_str)
-        except NameError:
-            self.uni_str = str(base_str)
-            self.str_str = bytes(base_str.encode())
-
-    def testEmptyString(self):
-        with self.assertRaises(ValueError):
-            parse('')
-
-    def testDateCommandFormat(self):
-        self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
-                               tzinfos=self.tzinfos),
-                         datetime(2003, 9, 25, 10, 36, 28,
-                                  tzinfo=self.brsttz))
-
-    def testDateCommandFormatUnicode(self):
-        self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
-                               tzinfos=self.tzinfos),
-                         datetime(2003, 9, 25, 10, 36, 28,
-                                  tzinfo=self.brsttz))
-
-
-    def testDateCommandFormatReversed(self):
-        self.assertEqual(parse("2003 10:36:28 BRST 25 Sep Thu",
-                               tzinfos=self.tzinfos),
-                         datetime(2003, 9, 25, 10, 36, 28,
-                                  tzinfo=self.brsttz))
-
-    def testDateCommandFormatWithLong(self):
-        if not PY3:
-            self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
-                                   tzinfos={"BRST": long(-10800)}),
-                             datetime(2003, 9, 25, 10, 36, 28,
-                                      tzinfo=self.brsttz))
-    def testDateCommandFormatIgnoreTz(self):
-        self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
-                               ignoretz=True),
-                         datetime(2003, 9, 25, 10, 36, 28))
-
-    def testDateCommandFormatStrip1(self):
-        self.assertEqual(parse("Thu Sep 25 10:36:28 2003"),
-                         datetime(2003, 9, 25, 10, 36, 28))
-
-    def testDateCommandFormatStrip2(self):
-        self.assertEqual(parse("Thu Sep 25 10:36:28", default=self.default),
-                         datetime(2003, 9, 25, 10, 36, 28))
-
-    def testDateCommandFormatStrip3(self):
-        self.assertEqual(parse("Thu Sep 10:36:28", default=self.default),
-                         datetime(2003, 9, 25, 10, 36, 28))
-
-    def testDateCommandFormatStrip4(self):
-        self.assertEqual(parse("Thu 10:36:28", default=self.default),
-                         datetime(2003, 9, 25, 10, 36, 28))
-
-    def testDateCommandFormatStrip5(self):
-        self.assertEqual(parse("Sep 10:36:28", default=self.default),
-                         datetime(2003, 9, 25, 10, 36, 28))
-
-    def testDateCommandFormatStrip6(self):
-        self.assertEqual(parse("10:36:28", default=self.default),
-                         datetime(2003, 9, 25, 10, 36, 28))
-
-    def testDateCommandFormatStrip7(self):
-        self.assertEqual(parse("10:36", default=self.default),
-                         datetime(2003, 9, 25, 10, 36))
-
-    def testDateCommandFormatStrip8(self):
-        self.assertEqual(parse("Thu Sep 25 2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateCommandFormatStrip9(self):
-        self.assertEqual(parse("Sep 25 2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateCommandFormatStrip10(self):
-        self.assertEqual(parse("Sep 2003", default=self.default),
-                         datetime(2003, 9, 25))
-
-    def testDateCommandFormatStrip11(self):
-        self.assertEqual(parse("Sep", default=self.default),
-                         datetime(2003, 9, 25))
-
-    def testDateCommandFormatStrip12(self):
-        self.assertEqual(parse("2003", default=self.default),
-                         datetime(2003, 9, 25))
-
-    def testDateRCommandFormat(self):
-        self.assertEqual(parse("Thu, 25 Sep 2003 10:49:41 -0300"),
-                         datetime(2003, 9, 25, 10, 49, 41,
-                                  tzinfo=self.brsttz))
-
-    def testISOFormat(self):
-        self.assertEqual(parse("2003-09-25T10:49:41.5-03:00"),
-                         datetime(2003, 9, 25, 10, 49, 41, 500000,
-                                  tzinfo=self.brsttz))
-
-    def testISOFormatStrip1(self):
-        self.assertEqual(parse("2003-09-25T10:49:41-03:00"),
-                         datetime(2003, 9, 25, 10, 49, 41,
-                                  tzinfo=self.brsttz))
-
-    def testISOFormatStrip2(self):
-        self.assertEqual(parse("2003-09-25T10:49:41"),
-                         datetime(2003, 9, 25, 10, 49, 41))
-
-    def testISOFormatStrip3(self):
-        self.assertEqual(parse("2003-09-25T10:49"),
-                         datetime(2003, 9, 25, 10, 49))
-
-    def testISOFormatStrip4(self):
-        self.assertEqual(parse("2003-09-25T10"),
-                         datetime(2003, 9, 25, 10))
-
-    def testISOFormatStrip5(self):
-        self.assertEqual(parse("2003-09-25"),
-                         datetime(2003, 9, 25))
-
-    def testISOStrippedFormat(self):
-        self.assertEqual(parse("20030925T104941.5-0300"),
-                         datetime(2003, 9, 25, 10, 49, 41, 500000,
-                                  tzinfo=self.brsttz))
-
-    def testISOStrippedFormatStrip1(self):
-        self.assertEqual(parse("20030925T104941-0300"),
-                         datetime(2003, 9, 25, 10, 49, 41,
-                                  tzinfo=self.brsttz))
-
-    def testISOStrippedFormatStrip2(self):
-        self.assertEqual(parse("20030925T104941"),
-                         datetime(2003, 9, 25, 10, 49, 41))
-
-    def testISOStrippedFormatStrip3(self):
-        self.assertEqual(parse("20030925T1049"),
-                         datetime(2003, 9, 25, 10, 49, 0))
-
-    def testISOStrippedFormatStrip4(self):
-        self.assertEqual(parse("20030925T10"),
-                         datetime(2003, 9, 25, 10))
-
-    def testISOStrippedFormatStrip5(self):
-        self.assertEqual(parse("20030925"),
-                         datetime(2003, 9, 25))
-
-    def testPythonLoggerFormat(self):
-        self.assertEqual(parse("2003-09-25 10:49:41,502"),
-                         datetime(2003, 9, 25, 10, 49, 41, 502000))
-
-    def testNoSeparator1(self):
-        self.assertEqual(parse("199709020908"),
-                         datetime(1997, 9, 2, 9, 8))
-
-    def testNoSeparator2(self):
-        self.assertEqual(parse("19970902090807"),
-                         datetime(1997, 9, 2, 9, 8, 7))
-
-    def testDateWithDash1(self):
-        self.assertEqual(parse("2003-09-25"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDash2(self):
-        self.assertEqual(parse("2003-Sep-25"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDash3(self):
-        self.assertEqual(parse("25-Sep-2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDash4(self):
-        self.assertEqual(parse("25-Sep-2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDash5(self):
-        self.assertEqual(parse("Sep-25-2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDash6(self):
-        self.assertEqual(parse("09-25-2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDash7(self):
-        self.assertEqual(parse("25-09-2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDash8(self):
-        self.assertEqual(parse("10-09-2003", dayfirst=True),
-                         datetime(2003, 9, 10))
-
-    def testDateWithDash9(self):
-        self.assertEqual(parse("10-09-2003"),
-                         datetime(2003, 10, 9))
-
-    def testDateWithDash10(self):
-        self.assertEqual(parse("10-09-03"),
-                         datetime(2003, 10, 9))
-
-    def testDateWithDash11(self):
-        self.assertEqual(parse("10-09-03", yearfirst=True),
-                         datetime(2010, 9, 3))
-
-    def testDateWithDot1(self):
-        self.assertEqual(parse("2003.09.25"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDot2(self):
-        self.assertEqual(parse("2003.Sep.25"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDot3(self):
-        self.assertEqual(parse("25.Sep.2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDot4(self):
-        self.assertEqual(parse("25.Sep.2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDot5(self):
-        self.assertEqual(parse("Sep.25.2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDot6(self):
-        self.assertEqual(parse("09.25.2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDot7(self):
-        self.assertEqual(parse("25.09.2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithDot8(self):
-        self.assertEqual(parse("10.09.2003", dayfirst=True),
-                         datetime(2003, 9, 10))
-
-    def testDateWithDot9(self):
-        self.assertEqual(parse("10.09.2003"),
-                         datetime(2003, 10, 9))
-
-    def testDateWithDot10(self):
-        self.assertEqual(parse("10.09.03"),
-                         datetime(2003, 10, 9))
-
-    def testDateWithDot11(self):
-        self.assertEqual(parse("10.09.03", yearfirst=True),
-                         datetime(2010, 9, 3))
-
-    def testDateWithSlash1(self):
-        self.assertEqual(parse("2003/09/25"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSlash2(self):
-        self.assertEqual(parse("2003/Sep/25"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSlash3(self):
-        self.assertEqual(parse("25/Sep/2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSlash4(self):
-        self.assertEqual(parse("25/Sep/2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSlash5(self):
-        self.assertEqual(parse("Sep/25/2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSlash6(self):
-        self.assertEqual(parse("09/25/2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSlash7(self):
-        self.assertEqual(parse("25/09/2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSlash8(self):
-        self.assertEqual(parse("10/09/2003", dayfirst=True),
-                         datetime(2003, 9, 10))
-
-    def testDateWithSlash9(self):
-        self.assertEqual(parse("10/09/2003"),
-                         datetime(2003, 10, 9))
-
-    def testDateWithSlash10(self):
-        self.assertEqual(parse("10/09/03"),
-                         datetime(2003, 10, 9))
-
-    def testDateWithSlash11(self):
-        self.assertEqual(parse("10/09/03", yearfirst=True),
-                         datetime(2010, 9, 3))
-
-    def testDateWithSpace1(self):
-        self.assertEqual(parse("2003 09 25"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSpace2(self):
-        self.assertEqual(parse("2003 Sep 25"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSpace3(self):
-        self.assertEqual(parse("25 Sep 2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSpace4(self):
-        self.assertEqual(parse("25 Sep 2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSpace5(self):
-        self.assertEqual(parse("Sep 25 2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSpace6(self):
-        self.assertEqual(parse("09 25 2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSpace7(self):
-        self.assertEqual(parse("25 09 2003"),
-                         datetime(2003, 9, 25))
-
-    def testDateWithSpace8(self):
-        self.assertEqual(parse("10 09 2003", dayfirst=True),
-                         datetime(2003, 9, 10))
-
-    def testDateWithSpace9(self):
-        self.assertEqual(parse("10 09 2003"),
-                         datetime(2003, 10, 9))
-
-    def testDateWithSpace10(self):
-        self.assertEqual(parse("10 09 03"),
-                         datetime(2003, 10, 9))
-
-    def testDateWithSpace11(self):
-        self.assertEqual(parse("10 09 03", yearfirst=True),
-                         datetime(2010, 9, 3))
-
-    def testDateWithSpace12(self):
-        self.assertEqual(parse("25 09 03"),
-                         datetime(2003, 9, 25))
-
-    def testStrangelyOrderedDate1(self):
-        self.assertEqual(parse("03 25 Sep"),
-                         datetime(2003, 9, 25))
-
-    def testStrangelyOrderedDate2(self):
-        self.assertEqual(parse("2003 25 Sep"),
-                         datetime(2003, 9, 25))
-
-    def testStrangelyOrderedDate3(self):
-        self.assertEqual(parse("25 03 Sep"),
-                         datetime(2025, 9, 3))
-
-    def testHourWithLetters(self):
-        self.assertEqual(parse("10h36m28.5s", default=self.default),
-                         datetime(2003, 9, 25, 10, 36, 28, 500000))
-
-    def testHourWithLettersStrip1(self):
-        self.assertEqual(parse("10h36m28s", default=self.default),
-                         datetime(2003, 9, 25, 10, 36, 28))
-
-    def testHourWithLettersStrip2(self):
-        self.assertEqual(parse("10h36m", default=self.default),
-                         datetime(2003, 9, 25, 10, 36))
-
-    def testHourWithLettersStrip3(self):
-        self.assertEqual(parse("10h", default=self.default),
-                         datetime(2003, 9, 25, 10))
-
-    def testHourWithLettersStrip4(self):
-        self.assertEqual(parse("10 h 36", default=self.default),
-                         datetime(2003, 9, 25, 10, 36))
-
-    def testAMPMNoHour(self):
-        with self.assertRaises(ValueError):
-            parse("AM")
-
-        with self.assertRaises(ValueError):
-            parse("Jan 20, 2015 PM")
-
-    def testHourAmPm1(self):
-        self.assertEqual(parse("10h am", default=self.default),
-                         datetime(2003, 9, 25, 10))
-
-    def testHourAmPm2(self):
-        self.assertEqual(parse("10h pm", default=self.default),
-                         datetime(2003, 9, 25, 22))
-
-    def testHourAmPm3(self):
-        self.assertEqual(parse("10am", default=self.default),
-                         datetime(2003, 9, 25, 10))
-
-    def testHourAmPm4(self):
-        self.assertEqual(parse("10pm", default=self.default),
-                         datetime(2003, 9, 25, 22))
-
-    def testHourAmPm5(self):
-        self.assertEqual(parse("10:00 am", default=self.default),
-                         datetime(2003, 9, 25, 10))
-
-    def testHourAmPm6(self):
-        self.assertEqual(parse("10:00 pm", default=self.default),
-                         datetime(2003, 9, 25, 22))
-
-    def testHourAmPm7(self):
-        self.assertEqual(parse("10:00am", default=self.default),
-                         datetime(2003, 9, 25, 10))
-
-    def testHourAmPm8(self):
-        self.assertEqual(parse("10:00pm", default=self.default),
-                         datetime(2003, 9, 25, 22))
-
-    def testHourAmPm9(self):
-        self.assertEqual(parse("10:00a.m", default=self.default),
-                         datetime(2003, 9, 25, 10))
-
-    def testHourAmPm10(self):
-        self.assertEqual(parse("10:00p.m", default=self.default),
-                         datetime(2003, 9, 25, 22))
-
-    def testHourAmPm11(self):
-        self.assertEqual(parse("10:00a.m.", default=self.default),
-                         datetime(2003, 9, 25, 10))
-
-    def testHourAmPm12(self):
-        self.assertEqual(parse("10:00p.m.", default=self.default),
-                         datetime(2003, 9, 25, 22))
-
-    def testAMPMRange(self):
-        with self.assertRaises(ValueError):
-            parse("13:44 AM")
-
-        with self.assertRaises(ValueError):
-            parse("January 25, 1921 23:13 PM")
-
-    def testPertain(self):
-        self.assertEqual(parse("Sep 03", default=self.default),
-                         datetime(2003, 9, 3))
-        self.assertEqual(parse("Sep of 03", default=self.default),
-                         datetime(2003, 9, 25))
-
-    def testWeekdayAlone(self):
-        self.assertEqual(parse("Wed", default=self.default),
-                         datetime(2003, 10, 1))
-
-    def testLongWeekday(self):
-        self.assertEqual(parse("Wednesday", default=self.default),
-                         datetime(2003, 10, 1))
-
-    def testLongMonth(self):
-        self.assertEqual(parse("October", default=self.default),
-                         datetime(2003, 10, 25))
-
-    def testZeroYear(self):
-        self.assertEqual(parse("31-Dec-00", default=self.default),
-                         datetime(2000, 12, 31))
-
-    def testFuzzy(self):
-        s = "Today is 25 of September of 2003, exactly " \
-            "at 10:49:41 with timezone -03:00."
-        self.assertEqual(parse(s, fuzzy=True),
-                         datetime(2003, 9, 25, 10, 49, 41,
-                                  tzinfo=self.brsttz))
-
-    def testFuzzyWithTokens(self):
-        s = "Today is 25 of September of 2003, exactly " \
-            "at 10:49:41 with timezone -03:00."
-        self.assertEqual(parse(s, fuzzy_with_tokens=True),
-                         (datetime(2003, 9, 25, 10, 49, 41,
-                                   tzinfo=self.brsttz),
-                         ('Today is ', 'of ', ', exactly at ',
-                          ' with timezone ', '.')))
-
-    def testFuzzyAMPMProblem(self):
-        # Sometimes fuzzy parsing results in AM/PM flag being set without
-        # hours - if it's fuzzy it should ignore that.
-        s1 = "I have a meeting on March 1, 1974."
-        s2 = "On June 8th, 2020, I am going to be the first man on Mars"
-
-        # Also don't want any erroneous AM or PMs changing the parsed time
-        s3 = "Meet me at the AM/PM on Sunset at 3:00 AM on December 3rd, 2003"
-        s4 = "Meet me at 3:00AM on December 3rd, 2003 at the AM/PM on Sunset"
-
-        self.assertEqual(parse(s1, fuzzy=True), datetime(1974, 3, 1))
-        self.assertEqual(parse(s2, fuzzy=True), datetime(2020, 6, 8))
-        self.assertEqual(parse(s3, fuzzy=True), datetime(2003, 12, 3, 3))
-        self.assertEqual(parse(s4, fuzzy=True), datetime(2003, 12, 3, 3))
-
-    def testFuzzyIgnoreAMPM(self):
-        s1 = "Jan 29, 1945 14:45 AM I going to see you there?"
-
-        self.assertEqual(parse(s1, fuzzy=True), datetime(1945, 1, 29, 14, 45))
-
-    def testExtraSpace(self):
-        self.assertEqual(parse("  July   4 ,  1976   12:01:02   am  "),
-                         datetime(1976, 7, 4, 0, 1, 2))
-
-    def testRandomFormat1(self):
-        self.assertEqual(parse("Wed, July 10, '96"),
-                         datetime(1996, 7, 10, 0, 0))
-
-    def testRandomFormat2(self):
-        self.assertEqual(parse("1996.07.10 AD at 15:08:56 PDT",
-                               ignoretz=True),
-                         datetime(1996, 7, 10, 15, 8, 56))
-
-    def testRandomFormat3(self):
-        self.assertEqual(parse("1996.July.10 AD 12:08 PM"),
-                         datetime(1996, 7, 10, 12, 8))
-
-    def testRandomFormat4(self):
-        self.assertEqual(parse("Tuesday, April 12, 1952 AD 3:30:42pm PST",
-                               ignoretz=True),
-                         datetime(1952, 4, 12, 15, 30, 42))
-
-    def testRandomFormat5(self):
-        self.assertEqual(parse("November 5, 1994, 8:15:30 am EST",
-                               ignoretz=True),
-                         datetime(1994, 11, 5, 8, 15, 30))
-
-    def testRandomFormat6(self):
-        self.assertEqual(parse("1994-11-05T08:15:30-05:00",
-                               ignoretz=True),
-                         datetime(1994, 11, 5, 8, 15, 30))
-
-    def testRandomFormat7(self):
-        self.assertEqual(parse("1994-11-05T08:15:30Z",
-                               ignoretz=True),
-                         datetime(1994, 11, 5, 8, 15, 30))
-
-    def testRandomFormat8(self):
-        self.assertEqual(parse("July 4, 1976"), datetime(1976, 7, 4))
-
-    def testRandomFormat9(self):
-        self.assertEqual(parse("7 4 1976"), datetime(1976, 7, 4))
-
-    def testRandomFormat10(self):
-        self.assertEqual(parse("4 jul 1976"), datetime(1976, 7, 4))
-
-    def testRandomFormat11(self):
-        self.assertEqual(parse("7-4-76"), datetime(1976, 7, 4))
-
-    def testRandomFormat12(self):
-        self.assertEqual(parse("19760704"), datetime(1976, 7, 4))
-
-    def testRandomFormat13(self):
-        self.assertEqual(parse("0:01:02", default=self.default),
-                         datetime(2003, 9, 25, 0, 1, 2))
-
-    def testRandomFormat14(self):
-        self.assertEqual(parse("12h 01m02s am", default=self.default),
-                         datetime(2003, 9, 25, 0, 1, 2))
-
-    def testRandomFormat15(self):
-        self.assertEqual(parse("0:01:02 on July 4, 1976"),
-                         datetime(1976, 7, 4, 0, 1, 2))
-
-    def testRandomFormat16(self):
-        self.assertEqual(parse("0:01:02 on July 4, 1976"),
-                         datetime(1976, 7, 4, 0, 1, 2))
-
-    def testRandomFormat17(self):
-        self.assertEqual(parse("1976-07-04T00:01:02Z", ignoretz=True),
-                         datetime(1976, 7, 4, 0, 1, 2))
-
-    def testRandomFormat18(self):
-        self.assertEqual(parse("July 4, 1976 12:01:02 am"),
-                         datetime(1976, 7, 4, 0, 1, 2))
-
-    def testRandomFormat19(self):
-        self.assertEqual(parse("Mon Jan  2 04:24:27 1995"),
-                         datetime(1995, 1, 2, 4, 24, 27))
-
-    def testRandomFormat20(self):
-        self.assertEqual(parse("Tue Apr 4 00:22:12 PDT 1995", ignoretz=True),
-                         datetime(1995, 4, 4, 0, 22, 12))
-
-    def testRandomFormat21(self):
-        self.assertEqual(parse("04.04.95 00:22"),
-                         datetime(1995, 4, 4, 0, 22))
-
-    def testRandomFormat22(self):
-        self.assertEqual(parse("Jan 1 1999 11:23:34.578"),
-                         datetime(1999, 1, 1, 11, 23, 34, 578000))
-
-    def testRandomFormat23(self):
-        self.assertEqual(parse("950404 122212"),
-                         datetime(1995, 4, 4, 12, 22, 12))
-
-    def testRandomFormat24(self):
-        self.assertEqual(parse("0:00 PM, PST", default=self.default,
-                               ignoretz=True),
-                         datetime(2003, 9, 25, 12, 0))
-
-    def testRandomFormat25(self):
-        self.assertEqual(parse("12:08 PM", default=self.default),
-                         datetime(2003, 9, 25, 12, 8))
-
-    def testRandomFormat26(self):
-        self.assertEqual(parse("5:50 A.M. on June 13, 1990"),
-                         datetime(1990, 6, 13, 5, 50))
-
-    def testRandomFormat27(self):
-        self.assertEqual(parse("3rd of May 2001"), datetime(2001, 5, 3))
-
-    def testRandomFormat28(self):
-        self.assertEqual(parse("5th of March 2001"), datetime(2001, 3, 5))
-
-    def testRandomFormat29(self):
-        self.assertEqual(parse("1st of May 2003"), datetime(2003, 5, 1))
-
-    def testRandomFormat30(self):
-        self.assertEqual(parse("01h02m03", default=self.default),
-                         datetime(2003, 9, 25, 1, 2, 3))
-
-    def testRandomFormat31(self):
-        self.assertEqual(parse("01h02", default=self.default),
-                         datetime(2003, 9, 25, 1, 2))
-
-    def testRandomFormat32(self):
-        self.assertEqual(parse("01h02s", default=self.default),
-                         datetime(2003, 9, 25, 1, 0, 2))
-
-    def testRandomFormat33(self):
-        self.assertEqual(parse("01m02", default=self.default),
-                         datetime(2003, 9, 25, 0, 1, 2))
-
-    def testRandomFormat34(self):
-        self.assertEqual(parse("01m02h", default=self.default),
-                         datetime(2003, 9, 25, 2, 1))
-
-    def testRandomFormat35(self):
-        self.assertEqual(parse("2004 10 Apr 11h30m", default=self.default),
-                         datetime(2004, 4, 10, 11, 30))
-
-    def test_99_ad(self):
-        self.assertEqual(parse('0099-01-01T00:00:00'),
-                         datetime(99, 1, 1, 0, 0))
-
-    def test_31_ad(self):
-        self.assertEqual(parse('0031-01-01T00:00:00'),
-                         datetime(31, 1, 1, 0, 0))
-
-    def testInvalidDay(self):
-        with self.assertRaises(ValueError):
-            parse("Feb 30, 2007")
-
-    def testUnspecifiedDayFallback(self):
-        # Test that for an unspecified day, the fallback behavior is correct.
-        self.assertEqual(parse("April 2009", default=datetime(2010, 1, 31)),
-                         datetime(2009, 4, 30))
-
-    def testUnspecifiedDayFallbackFebNoLeapYear(self):        
-        self.assertEqual(parse("Feb 2007", default=datetime(2010, 1, 31)),
-                         datetime(2007, 2, 28))
-
-    def testUnspecifiedDayFallbackFebLeapYear(self):        
-        self.assertEqual(parse("Feb 2008", default=datetime(2010, 1, 31)),
-                         datetime(2008, 2, 29))
-
-    def testErrorType01(self):
-        self.assertRaises(ValueError,
-                          parse, 'shouldfail')
-
-    def testCorrectErrorOnFuzzyWithTokens(self):
-        assertRaisesRegex(self, ValueError, 'Unknown string format',
-                          parse, '04/04/32/423', fuzzy_with_tokens=True)
-        assertRaisesRegex(self, ValueError, 'Unknown string format',
-                          parse, '04/04/04 +32423', fuzzy_with_tokens=True)
-        assertRaisesRegex(self, ValueError, 'Unknown string format',
-                          parse, '04/04/0d4', fuzzy_with_tokens=True)
-
-    def testIncreasingCTime(self):
-        # This test will check 200 different years, every month, every day,
-        # every hour, every minute, every second, and every weekday, using
-        # a delta of more or less 1 year, 1 month, 1 day, 1 minute and
-        # 1 second.
-        delta = timedelta(days=365+31+1, seconds=1+60+60*60)
-        dt = datetime(1900, 1, 1, 0, 0, 0, 0)
-        for i in range(200):
-            self.assertEqual(parse(dt.ctime()), dt)
-            dt += delta
-
-    def testIncreasingISOFormat(self):
-        delta = timedelta(days=365+31+1, seconds=1+60+60*60)
-        dt = datetime(1900, 1, 1, 0, 0, 0, 0)
-        for i in range(200):
-            self.assertEqual(parse(dt.isoformat()), dt)
-            dt += delta
-
-    def testMicrosecondsPrecisionError(self):
-        # Skip found out that sad precision problem. :-(
-        dt1 = parse("00:11:25.01")
-        dt2 = parse("00:12:10.01")
-        self.assertEqual(dt1.microsecond, 10000)
-        self.assertEqual(dt2.microsecond, 10000)
-
-    def testMicrosecondPrecisionErrorReturns(self):
-        # One more precision issue, discovered by Eric Brown.  This should
-        # be the last one, as we're no longer using floating points.
-        for ms in [100001, 100000, 99999, 99998,
-                    10001,  10000,  9999,  9998,
-                     1001,   1000,   999,   998,
-                      101,    100,    99,    98]:
-            dt = datetime(2008, 2, 27, 21, 26, 1, ms)
-            self.assertEqual(parse(dt.isoformat()), dt)
-
-    def testHighPrecisionSeconds(self):
-        self.assertEqual(parse("20080227T21:26:01.123456789"),
-                          datetime(2008, 2, 27, 21, 26, 1, 123456))
-
-    def testCustomParserInfo(self):
-        # Custom parser info wasn't working, as Michael Elsdörfer discovered.
-        from dateutil.parser import parserinfo, parser
-
-        class myparserinfo(parserinfo):
-            MONTHS = parserinfo.MONTHS[:]
-            MONTHS[0] = ("Foo", "Foo")
-        myparser = parser(myparserinfo())
-        dt = myparser.parse("01/Foo/2007")
-        self.assertEqual(dt, datetime(2007, 1, 1))
-
-    def testParseStr(self):
-        self.assertEqual(parse(self.str_str),
-                         parse(self.uni_str))
-
-    def testParserParseStr(self):
-        from dateutil.parser import parser
-
-        self.assertEqual(parser().parse(self.str_str),
-                         parser().parse(self.uni_str))
-
-    def testParseUnicodeWords(self):
-
-        class rus_parserinfo(parserinfo):
-            MONTHS = [("янв", "Январь"),
-                      ("фев", "Февраль"),
-                      ("мар", "Март"),
-                      ("апр", "Апрель"),
-                      ("май", "Май"),
-                      ("июн", "Июнь"),
-                      ("июл", "Июль"),
-                      ("авг", "Август"),
-                      ("сен", "Сентябрь"),
-                      ("окт", "Октябрь"),
-                      ("ноя", "Ноябрь"),
-                      ("дек", "Декабрь")]
-
-        self.assertEqual(parse('10 Сентябрь 2015 10:20',
-                               parserinfo=rus_parserinfo()),
-                         datetime(2015, 9, 10, 10, 20))
-
-    def testParseWithNulls(self):
-        # This relies on the from __future__ import unicode_literals, because
-        # explicitly specifying a unicode literal is a syntax error in Py 3.2
-        # May want to switch to u'...' if we ever drop Python 3.2 support.
-        pstring = '\x00\x00August 29, 1924'
-
-        self.assertEqual(parse(pstring),
-                         datetime(1924, 8, 29))
-
diff --git a/lib/dateutil/test/test_relativedelta.py b/lib/dateutil/test/test_relativedelta.py
deleted file mode 100644
index 122776ac616fd1213baa1d75ad57894023a0d5e3..0000000000000000000000000000000000000000
--- a/lib/dateutil/test/test_relativedelta.py
+++ /dev/null
@@ -1,484 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-from ._common import unittest, WarningTestMixin
-
-import calendar
-from datetime import datetime, date
-
-from dateutil.relativedelta import *
-
-class RelativeDeltaTest(WarningTestMixin, unittest.TestCase):
-    now = datetime(2003, 9, 17, 20, 54, 47, 282310)
-    today = date(2003, 9, 17)
-
-    def testInheritance(self):
-        # Ensure that relativedelta is inheritance-friendly.
-        class rdChildClass(relativedelta):
-            pass
-
-        ccRD = rdChildClass(years=1, months=1, days=1, leapdays=1, weeks=1,
-                            hours=1, minutes=1, seconds=1, microseconds=1)
-
-        rd = relativedelta(years=1, months=1, days=1, leapdays=1, weeks=1,
-                           hours=1, minutes=1, seconds=1, microseconds=1)
-
-        self.assertEqual(type(ccRD + rd), type(ccRD),
-                         msg='Addition does not inherit type.')
-
-        self.assertEqual(type(ccRD - rd), type(ccRD),
-                         msg='Subtraction does not inherit type.')
-
-        self.assertEqual(type(-ccRD), type(ccRD),
-                         msg='Negation does not inherit type.')
-
-        self.assertEqual(type(ccRD * 5.0), type(ccRD),
-                         msg='Multiplication does not inherit type.')
-        
-        self.assertEqual(type(ccRD / 5.0), type(ccRD),
-                         msg='Division does not inherit type.')
-
-    def testMonthEndMonthBeginning(self):
-        self.assertEqual(relativedelta(datetime(2003, 1, 31, 23, 59, 59),
-                                       datetime(2003, 3, 1, 0, 0, 0)),
-                         relativedelta(months=-1, seconds=-1))
-
-        self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0),
-                                       datetime(2003, 1, 31, 23, 59, 59)),
-                         relativedelta(months=1, seconds=1))
-
-    def testMonthEndMonthBeginningLeapYear(self):
-        self.assertEqual(relativedelta(datetime(2012, 1, 31, 23, 59, 59),
-                                       datetime(2012, 3, 1, 0, 0, 0)),
-                         relativedelta(months=-1, seconds=-1))
-
-        self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0),
-                                       datetime(2003, 1, 31, 23, 59, 59)),
-                         relativedelta(months=1, seconds=1))
-
-    def testNextMonth(self):
-        self.assertEqual(self.now+relativedelta(months=+1),
-                         datetime(2003, 10, 17, 20, 54, 47, 282310))
-
-    def testNextMonthPlusOneWeek(self):
-        self.assertEqual(self.now+relativedelta(months=+1, weeks=+1),
-                         datetime(2003, 10, 24, 20, 54, 47, 282310))
-
-    def testNextMonthPlusOneWeek10am(self):
-        self.assertEqual(self.today +
-                         relativedelta(months=+1, weeks=+1, hour=10),
-                         datetime(2003, 10, 24, 10, 0))
-
-    def testNextMonthPlusOneWeek10amDiff(self):
-        self.assertEqual(relativedelta(datetime(2003, 10, 24, 10, 0),
-                                       self.today),
-                         relativedelta(months=+1, days=+7, hours=+10))
-
-    def testOneMonthBeforeOneYear(self):
-        self.assertEqual(self.now+relativedelta(years=+1, months=-1),
-                         datetime(2004, 8, 17, 20, 54, 47, 282310))
-
-    def testMonthsOfDiffNumOfDays(self):
-        self.assertEqual(date(2003, 1, 27)+relativedelta(months=+1),
-                         date(2003, 2, 27))
-        self.assertEqual(date(2003, 1, 31)+relativedelta(months=+1),
-                         date(2003, 2, 28))
-        self.assertEqual(date(2003, 1, 31)+relativedelta(months=+2),
-                         date(2003, 3, 31))
-
-    def testMonthsOfDiffNumOfDaysWithYears(self):
-        self.assertEqual(date(2000, 2, 28)+relativedelta(years=+1),
-                         date(2001, 2, 28))
-        self.assertEqual(date(2000, 2, 29)+relativedelta(years=+1),
-                         date(2001, 2, 28))
-
-        self.assertEqual(date(1999, 2, 28)+relativedelta(years=+1),
-                         date(2000, 2, 28))
-        self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1),
-                         date(2000, 3, 1))
-        self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1),
-                         date(2000, 3, 1))
-
-        self.assertEqual(date(2001, 2, 28)+relativedelta(years=-1),
-                         date(2000, 2, 28))
-        self.assertEqual(date(2001, 3, 1)+relativedelta(years=-1),
-                         date(2000, 3, 1))
-
-    def testNextFriday(self):
-        self.assertEqual(self.today+relativedelta(weekday=FR),
-                         date(2003, 9, 19))
-
-    def testNextFridayInt(self):
-        self.assertEqual(self.today+relativedelta(weekday=calendar.FRIDAY),
-                         date(2003, 9, 19))
-
-    def testLastFridayInThisMonth(self):
-        self.assertEqual(self.today+relativedelta(day=31, weekday=FR(-1)),
-                         date(2003, 9, 26))
-
-    def testNextWednesdayIsToday(self):
-        self.assertEqual(self.today+relativedelta(weekday=WE),
-                         date(2003, 9, 17))
-
-    def testNextWenesdayNotToday(self):
-        self.assertEqual(self.today+relativedelta(days=+1, weekday=WE),
-                         date(2003, 9, 24))
-
-    def test15thISOYearWeek(self):
-        self.assertEqual(date(2003, 1, 1) +
-                         relativedelta(day=4, weeks=+14, weekday=MO(-1)),
-                         date(2003, 4, 7))
-
-    def testMillenniumAge(self):
-        self.assertEqual(relativedelta(self.now, date(2001, 1, 1)),
-                         relativedelta(years=+2, months=+8, days=+16,
-                                       hours=+20, minutes=+54, seconds=+47,
-                                       microseconds=+282310))
-
-    def testJohnAge(self):
-        self.assertEqual(relativedelta(self.now,
-                                       datetime(1978, 4, 5, 12, 0)),
-                         relativedelta(years=+25, months=+5, days=+12,
-                                       hours=+8, minutes=+54, seconds=+47,
-                                       microseconds=+282310))
-
-    def testJohnAgeWithDate(self):
-        self.assertEqual(relativedelta(self.today,
-                                       datetime(1978, 4, 5, 12, 0)),
-                         relativedelta(years=+25, months=+5, days=+11,
-                                       hours=+12))
-
-    def testYearDay(self):
-        self.assertEqual(date(2003, 1, 1)+relativedelta(yearday=260),
-                         date(2003, 9, 17))
-        self.assertEqual(date(2002, 1, 1)+relativedelta(yearday=260),
-                         date(2002, 9, 17))
-        self.assertEqual(date(2000, 1, 1)+relativedelta(yearday=260),
-                         date(2000, 9, 16))
-        self.assertEqual(self.today+relativedelta(yearday=261),
-                         date(2003, 9, 18))
-
-    def testYearDayBug(self):
-        # Tests a problem reported by Adam Ryan.
-        self.assertEqual(date(2010, 1, 1)+relativedelta(yearday=15),
-                         date(2010, 1, 15))
-
-    def testNonLeapYearDay(self):
-        self.assertEqual(date(2003, 1, 1)+relativedelta(nlyearday=260),
-                         date(2003, 9, 17))
-        self.assertEqual(date(2002, 1, 1)+relativedelta(nlyearday=260),
-                         date(2002, 9, 17))
-        self.assertEqual(date(2000, 1, 1)+relativedelta(nlyearday=260),
-                         date(2000, 9, 17))
-        self.assertEqual(self.today+relativedelta(yearday=261),
-                         date(2003, 9, 18))
-
-    def testAddition(self):
-        self.assertEqual(relativedelta(days=10) +
-                         relativedelta(years=1, months=2, days=3, hours=4,
-                                       minutes=5, microseconds=6),
-                         relativedelta(years=1, months=2, days=13, hours=4,
-                                       minutes=5, microseconds=6))
-
-    def testAdditionToDatetime(self):
-        self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1),
-                         datetime(2000, 1, 2))
-
-    def testRightAdditionToDatetime(self):
-        self.assertEqual(relativedelta(days=1) + datetime(2000, 1, 1),
-                         datetime(2000, 1, 2))
-
-    def testAdditionInvalidType(self):
-        with self.assertRaises(TypeError):
-            relativedelta(days=3) + 9
-
-    def testSubtraction(self):
-        self.assertEqual(relativedelta(days=10) -
-                         relativedelta(years=1, months=2, days=3, hours=4,
-                                       minutes=5, microseconds=6),
-                         relativedelta(years=-1, months=-2, days=7, hours=-4,
-                                       minutes=-5, microseconds=-6))
-
-    def testRightSubtractionFromDatetime(self):
-        self.assertEqual(datetime(2000, 1, 2) - relativedelta(days=1),
-                         datetime(2000, 1, 1))
-
-    def testSubractionWithDatetime(self):
-        self.assertRaises(TypeError, lambda x, y: x - y,
-                          (relativedelta(days=1), datetime(2000, 1, 1)))
-
-    def testSubtractionInvalidType(self):
-        with self.assertRaises(TypeError):
-            relativedelta(hours=12) - 14
-
-    def testMultiplication(self):
-        self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1) * 28,
-                         datetime(2000, 1, 29))
-        self.assertEqual(datetime(2000, 1, 1) + 28 * relativedelta(days=1),
-                         datetime(2000, 1, 29))
-
-    def testDivision(self):
-        self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=28) / 28,
-                         datetime(2000, 1, 2))
-
-    def testBoolean(self):
-        self.assertFalse(relativedelta(days=0))
-        self.assertTrue(relativedelta(days=1))
-
-    def testComparison(self):
-        d1 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1, 
-                           minutes=1, seconds=1, microseconds=1)
-        d2 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1, 
-                           minutes=1, seconds=1, microseconds=1)
-        d3 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1, 
-                           minutes=1, seconds=1, microseconds=2)
-
-        self.assertEqual(d1, d2)
-        self.assertNotEqual(d1, d3)
-
-    def testInequalityTypeMismatch(self):
-        # Different type
-        self.assertFalse(relativedelta(year=1) == 19)
-
-    def testInequalityWeekdays(self):
-        # Different weekdays
-        no_wday = relativedelta(year=1997, month=4)
-        wday_mo_1 = relativedelta(year=1997, month=4, weekday=MO(+1))
-        wday_mo_2 = relativedelta(year=1997, month=4, weekday=MO(+2))
-        wday_tu = relativedelta(year=1997, month=4, weekday=TU)
-        
-        self.assertTrue(wday_mo_1 == wday_mo_1)
-        
-        self.assertFalse(no_wday == wday_mo_1)
-        self.assertFalse(wday_mo_1 == no_wday)
-
-        self.assertFalse(wday_mo_1 == wday_mo_2)
-        self.assertFalse(wday_mo_2 == wday_mo_1)
-       
-        self.assertFalse(wday_mo_1 == wday_tu)
-        self.assertFalse(wday_tu == wday_mo_1)
-
-    def testMonthOverflow(self):
-        self.assertEqual(relativedelta(months=273),
-                         relativedelta(years=22, months=9))
-
-    def testWeeks(self):
-        # Test that the weeks property is working properly.
-        rd = relativedelta(years=4, months=2, weeks=8, days=6)
-        self.assertEqual((rd.weeks, rd.days), (8, 8 * 7 + 6))
-        
-        rd.weeks = 3
-        self.assertEqual((rd.weeks, rd.days), (3, 3 * 7 + 6))
-
-    def testRelativeDeltaRepr(self):
-        self.assertEqual(repr(relativedelta(years=1, months=-1, days=15)),
-                         'relativedelta(years=+1, months=-1, days=+15)')
-
-        self.assertEqual(repr(relativedelta(months=14, seconds=-25)),
-                         'relativedelta(years=+1, months=+2, seconds=-25)')
-
-        self.assertEqual(repr(relativedelta(month=3, hour=3, weekday=SU(3))),
-                         'relativedelta(month=3, weekday=SU(+3), hour=3)')
-
-    def testRelativeDeltaFractionalYear(self):
-        with self.assertRaises(ValueError):
-            relativedelta(years=1.5)
-
-    def testRelativeDeltaFractionalMonth(self):
-        with self.assertRaises(ValueError):
-            relativedelta(months=1.5)
-
-    def testRelativeDeltaFractionalAbsolutes(self):
-        # Fractional absolute values will soon be unsupported,
-        # check for the deprecation warning.
-        with self.assertWarns(DeprecationWarning):
-            relativedelta(year=2.86)
-        
-        with self.assertWarns(DeprecationWarning):
-            relativedelta(month=1.29)
-
-        with self.assertWarns(DeprecationWarning):
-            relativedelta(day=0.44)
-
-        with self.assertWarns(DeprecationWarning):
-            relativedelta(hour=23.98)
-
-        with self.assertWarns(DeprecationWarning):
-            relativedelta(minute=45.21)
-
-        with self.assertWarns(DeprecationWarning):
-            relativedelta(second=13.2)
-
-        with self.assertWarns(DeprecationWarning):
-            relativedelta(microsecond=157221.93)
-
-    def testRelativeDeltaFractionalRepr(self):
-        rd = relativedelta(years=3, months=-2, days=1.25)
-
-        self.assertEqual(repr(rd),
-                         'relativedelta(years=+3, months=-2, days=+1.25)')
-
-        rd = relativedelta(hours=0.5, seconds=9.22)
-        self.assertEqual(repr(rd),
-                         'relativedelta(hours=+0.5, seconds=+9.22)')
-
-    def testRelativeDeltaFractionalWeeks(self):
-        # Equivalent to days=8, hours=18
-        rd = relativedelta(weeks=1.25)
-        d1 = datetime(2009, 9, 3, 0, 0)
-        self.assertEqual(d1 + rd,
-                         datetime(2009, 9, 11, 18))
-
-    def testRelativeDeltaFractionalDays(self):
-        rd1 = relativedelta(days=1.48)
-
-        d1 = datetime(2009, 9, 3, 0, 0)
-        self.assertEqual(d1 + rd1,
-                         datetime(2009, 9, 4, 11, 31, 12))
-
-        rd2 = relativedelta(days=1.5)
-        self.assertEqual(d1 + rd2,
-                         datetime(2009, 9, 4, 12, 0, 0))
-
-    def testRelativeDeltaFractionalHours(self):
-        rd = relativedelta(days=1, hours=12.5)
-        d1 = datetime(2009, 9, 3, 0, 0)
-        self.assertEqual(d1 + rd,
-                         datetime(2009, 9, 4, 12, 30, 0))
-
-    def testRelativeDeltaFractionalMinutes(self):
-        rd = relativedelta(hours=1, minutes=30.5)
-        d1 = datetime(2009, 9, 3, 0, 0)
-        self.assertEqual(d1 + rd,
-                         datetime(2009, 9, 3, 1, 30, 30))
-
-    def testRelativeDeltaFractionalSeconds(self):
-        rd = relativedelta(hours=5, minutes=30, seconds=30.5)
-        d1 = datetime(2009, 9, 3, 0, 0)
-        self.assertEqual(d1 + rd,
-                         datetime(2009, 9, 3, 5, 30, 30, 500000))
-
-    def testRelativeDeltaFractionalPositiveOverflow(self):
-        # Equivalent to (days=1, hours=14)
-        rd1 = relativedelta(days=1.5, hours=2)
-        d1 = datetime(2009, 9, 3, 0, 0)
-        self.assertEqual(d1 + rd1,
-                         datetime(2009, 9, 4, 14, 0, 0))
-
-        # Equivalent to (days=1, hours=14, minutes=45)
-        rd2 = relativedelta(days=1.5, hours=2.5, minutes=15)
-        d1 = datetime(2009, 9, 3, 0, 0)
-        self.assertEqual(d1 + rd2,
-                         datetime(2009, 9, 4, 14, 45))
-
-        # Carry back up - equivalent to (days=2, hours=2, minutes=0, seconds=1)
-        rd3 = relativedelta(days=1.5, hours=13, minutes=59.5, seconds=31)
-        self.assertEqual(d1 + rd3,
-                         datetime(2009, 9, 5, 2, 0, 1))
-
-    def testRelativeDeltaFractionalNegativeDays(self):
-        # Equivalent to (days=-1, hours=-1)
-        rd1 = relativedelta(days=-1.5, hours=11)
-        d1 = datetime(2009, 9, 3, 12, 0)
-        self.assertEqual(d1 + rd1,
-                         datetime(2009, 9, 2, 11, 0, 0))
-
-        # Equivalent to (days=-1, hours=-9)
-        rd2 = relativedelta(days=-1.25, hours=-3)
-        self.assertEqual(d1 + rd2,
-            datetime(2009, 9, 2, 3))
-
-    def testRelativeDeltaNormalizeFractionalDays(self):
-        # Equivalent to (days=2, hours=18)
-        rd1 = relativedelta(days=2.75)
-
-        self.assertEqual(rd1.normalized(), relativedelta(days=2, hours=18))
-
-        # Equvalent to (days=1, hours=11, minutes=31, seconds=12)
-        rd2 = relativedelta(days=1.48)
-
-        self.assertEqual(rd2.normalized(),
-            relativedelta(days=1, hours=11, minutes=31, seconds=12))
-
-    def testRelativeDeltaNormalizeFractionalDays(self):
-        # Equivalent to (hours=1, minutes=30)
-        rd1 = relativedelta(hours=1.5)
-
-        self.assertEqual(rd1.normalized(), relativedelta(hours=1, minutes=30))
-
-        # Equivalent to (hours=3, minutes=17, seconds=5, microseconds=100)
-        rd2 = relativedelta(hours=3.28472225)
-
-        self.assertEqual(rd2.normalized(),
-            relativedelta(hours=3, minutes=17, seconds=5, microseconds=100))
-
-    def testRelativeDeltaNormalizeFractionalMinutes(self):
-        # Equivalent to (minutes=15, seconds=36)
-        rd1 = relativedelta(minutes=15.6)
-
-        self.assertEqual(rd1.normalized(),
-            relativedelta(minutes=15, seconds=36))
-
-        # Equivalent to (minutes=25, seconds=20, microseconds=25000)
-        rd2 = relativedelta(minutes=25.33375)
-
-        self.assertEqual(rd2.normalized(),
-            relativedelta(minutes=25, seconds=20, microseconds=25000))
-
-    def testRelativeDeltaNormalizeFractionalSeconds(self):
-        # Equivalent to (seconds=45, microseconds=25000)
-        rd1 = relativedelta(seconds=45.025)
-        self.assertEqual(rd1.normalized(),
-            relativedelta(seconds=45, microseconds=25000))
-
-    def testRelativeDeltaFractionalPositiveOverflow(self):
-        # Equivalent to (days=1, hours=14)
-        rd1 = relativedelta(days=1.5, hours=2)
-        self.assertEqual(rd1.normalized(),
-            relativedelta(days=1, hours=14))
-
-        # Equivalent to (days=1, hours=14, minutes=45)
-        rd2 = relativedelta(days=1.5, hours=2.5, minutes=15)
-        self.assertEqual(rd2.normalized(),
-            relativedelta(days=1, hours=14, minutes=45))
-
-        # Carry back up - equivalent to:
-        # (days=2, hours=2, minutes=0, seconds=2, microseconds=3)
-        rd3 = relativedelta(days=1.5, hours=13, minutes=59.50045,
-                            seconds=31.473, microseconds=500003)
-        self.assertEqual(rd3.normalized(),
-            relativedelta(days=2, hours=2, minutes=0,
-                          seconds=2, microseconds=3))
-
-    def testRelativeDeltaFractionalNegativeOverflow(self):
-        # Equivalent to (days=-1)
-        rd1 = relativedelta(days=-0.5, hours=-12)
-        self.assertEqual(rd1.normalized(),
-            relativedelta(days=-1))
-
-        # Equivalent to (days=-1)
-        rd2 = relativedelta(days=-1.5, hours=12)
-        self.assertEqual(rd2.normalized(),
-            relativedelta(days=-1))
-
-        # Equivalent to (days=-1, hours=-14, minutes=-45)
-        rd3 = relativedelta(days=-1.5, hours=-2.5, minutes=-15)
-        self.assertEqual(rd3.normalized(),
-            relativedelta(days=-1, hours=-14, minutes=-45))
-
-        # Equivalent to (days=-1, hours=-14, minutes=+15)
-        rd4 = relativedelta(days=-1.5, hours=-2.5, minutes=45)
-        self.assertEqual(rd4.normalized(),
-            relativedelta(days=-1, hours=-14, minutes=+15))
-
-        # Carry back up - equivalent to:
-        # (days=-2, hours=-2, minutes=0, seconds=-2, microseconds=-3)
-        rd3 = relativedelta(days=-1.5, hours=-13, minutes=-59.50045,
-                            seconds=-31.473, microseconds=-500003)
-        self.assertEqual(rd3.normalized(),
-            relativedelta(days=-2, hours=-2, minutes=0,
-                          seconds=-2, microseconds=-3))
-
-    def testInvalidYearDay(self):
-        with self.assertRaises(ValueError):
-            relativedelta(yearday=367)
-
diff --git a/lib/dateutil/test/test_rrule.py b/lib/dateutil/test/test_rrule.py
deleted file mode 100644
index acaa6e972028451f982584b25991076d97e95fa5..0000000000000000000000000000000000000000
--- a/lib/dateutil/test/test_rrule.py
+++ /dev/null
@@ -1,4676 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-from ._common import WarningTestMixin, unittest
-
-import calendar
-from datetime import datetime, date
-from six import PY3
-
-from dateutil.rrule import *
-
-
-class RRuleTest(WarningTestMixin, unittest.TestCase):
-    def _rrulestr_reverse_test(self, rule):
-        """
-        Call with an `rrule` and it will test that `str(rrule)` generates a
-        string which generates the same `rrule` as the input when passed to
-        `rrulestr()`
-        """
-        rr_str = str(rule)
-        rrulestr_rrule = rrulestr(rr_str)
-
-        self.assertEqual(list(rule), list(rrulestr_rrule))
-
-    def testYearly(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0)])
-
-    def testYearlyInterval(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0),
-                          datetime(2001, 9, 2, 9, 0)])
-
-    def testYearlyIntervalLarge(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              interval=100,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(2097, 9, 2, 9, 0),
-                          datetime(2197, 9, 2, 9, 0)])
-
-    def testYearlyByMonth(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 2, 9, 0),
-                          datetime(1998, 3, 2, 9, 0),
-                          datetime(1999, 1, 2, 9, 0)])
-
-    def testYearlyByMonthDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 10, 1, 9, 0),
-                          datetime(1997, 10, 3, 9, 0)])
-
-    def testYearlyByMonthAndMonthDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 5, 9, 0),
-                          datetime(1998, 1, 7, 9, 0),
-                          datetime(1998, 3, 5, 9, 0)])
-
-    def testYearlyByWeekDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testYearlyByNWeekDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 25, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 12, 31, 9, 0)])
-
-    def testYearlyByNWeekDayLarge(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byweekday=(TU(3), TH(-3)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 11, 9, 0),
-                          datetime(1998, 1, 20, 9, 0),
-                          datetime(1998, 12, 17, 9, 0)])
-
-    def testYearlyByMonthAndWeekDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 8, 9, 0)])
-
-    def testYearlyByMonthAndNWeekDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 29, 9, 0),
-                          datetime(1998, 3, 3, 9, 0)])
-
-    def testYearlyByMonthAndNWeekDayLarge(self):
-        # This is interesting because the TH(-3) ends up before
-        # the TU(3).
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(3), TH(-3)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 15, 9, 0),
-                          datetime(1998, 1, 20, 9, 0),
-                          datetime(1998, 3, 12, 9, 0)])
-
-    def testYearlyByMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 2, 3, 9, 0),
-                          datetime(1998, 3, 3, 9, 0)])
-
-    def testYearlyByMonthAndMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 3, 3, 9, 0),
-                          datetime(2001, 3, 1, 9, 0)])
-
-    def testYearlyByYearDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 9, 0),
-                          datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0)])
-
-    def testYearlyByYearDayNeg(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 9, 0),
-                          datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0)])
-
-    def testYearlyByMonthAndYearDay(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0),
-                          datetime(1999, 4, 10, 9, 0),
-                          datetime(1999, 7, 19, 9, 0)])
-
-    def testYearlyByMonthAndYearDayNeg(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0),
-                          datetime(1999, 4, 10, 9, 0),
-                          datetime(1999, 7, 19, 9, 0)])
-
-    def testYearlyByWeekNo(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 5, 11, 9, 0),
-                          datetime(1998, 5, 12, 9, 0),
-                          datetime(1998, 5, 13, 9, 0)])
-
-    def testYearlyByWeekNoAndWeekDay(self):
-        # That's a nice one. The first days of week number one
-        # may be in the last year.
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 29, 9, 0),
-                          datetime(1999, 1, 4, 9, 0),
-                          datetime(2000, 1, 3, 9, 0)])
-
-    def testYearlyByWeekNoAndWeekDayLarge(self):
-        # Another nice test. The last days of week number 52/53
-        # may be in the next year.
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 9, 0),
-                          datetime(1998, 12, 27, 9, 0),
-                          datetime(2000, 1, 2, 9, 0)])
-
-    def testYearlyByWeekNoAndWeekDayLast(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 9, 0),
-                          datetime(1999, 1, 3, 9, 0),
-                          datetime(2000, 1, 2, 9, 0)])
-
-    def testYearlyByEaster(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 12, 9, 0),
-                          datetime(1999, 4, 4, 9, 0),
-                          datetime(2000, 4, 23, 9, 0)])
-
-    def testYearlyByEasterPos(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 13, 9, 0),
-                          datetime(1999, 4, 5, 9, 0),
-                          datetime(2000, 4, 24, 9, 0)])
-
-    def testYearlyByEasterNeg(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 11, 9, 0),
-                          datetime(1999, 4, 3, 9, 0),
-                          datetime(2000, 4, 22, 9, 0)])
-
-    def testYearlyByWeekNoAndWeekDay53(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 12, 28, 9, 0),
-                          datetime(2004, 12, 27, 9, 0),
-                          datetime(2009, 12, 28, 9, 0)])
-
-    def testYearlyByHour(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0),
-                          datetime(1998, 9, 2, 6, 0),
-                          datetime(1998, 9, 2, 18, 0)])
-
-    def testYearlyByMinute(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6),
-                          datetime(1997, 9, 2, 9, 18),
-                          datetime(1998, 9, 2, 9, 6)])
-
-    def testYearlyBySecond(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 6),
-                          datetime(1997, 9, 2, 9, 0, 18),
-                          datetime(1998, 9, 2, 9, 0, 6)])
-
-    def testYearlyByHourAndMinute(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6),
-                          datetime(1997, 9, 2, 18, 18),
-                          datetime(1998, 9, 2, 6, 6)])
-
-    def testYearlyByHourAndSecond(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0, 6),
-                          datetime(1997, 9, 2, 18, 0, 18),
-                          datetime(1998, 9, 2, 6, 0, 6)])
-
-    def testYearlyByMinuteAndSecond(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6, 6),
-                          datetime(1997, 9, 2, 9, 6, 18),
-                          datetime(1997, 9, 2, 9, 18, 6)])
-
-    def testYearlyByHourAndMinuteAndSecond(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6, 6),
-                          datetime(1997, 9, 2, 18, 6, 18),
-                          datetime(1997, 9, 2, 18, 18, 6)])
-
-    def testYearlyBySetPos(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonthday=15,
-                              byhour=(6, 18),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 11, 15, 18, 0),
-                          datetime(1998, 2, 15, 6, 0),
-                          datetime(1998, 11, 15, 18, 0)])
-
-    def testMonthly(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 10, 2, 9, 0),
-                          datetime(1997, 11, 2, 9, 0)])
-
-    def testMonthlyInterval(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 11, 2, 9, 0),
-                          datetime(1998, 1, 2, 9, 0)])
-
-    def testMonthlyIntervalLarge(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              interval=18,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1999, 3, 2, 9, 0),
-                          datetime(2000, 9, 2, 9, 0)])
-
-    def testMonthlyByMonth(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 2, 9, 0),
-                          datetime(1998, 3, 2, 9, 0),
-                          datetime(1999, 1, 2, 9, 0)])
-
-    def testMonthlyByMonthDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 10, 1, 9, 0),
-                          datetime(1997, 10, 3, 9, 0)])
-
-    def testMonthlyByMonthAndMonthDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 5, 9, 0),
-                          datetime(1998, 1, 7, 9, 0),
-                          datetime(1998, 3, 5, 9, 0)])
-
-    def testMonthlyByWeekDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-        # Third Monday of the month
-        self.assertEqual(rrule(MONTHLY,
-                         byweekday=(MO(+3)),
-                         dtstart=datetime(1997, 9, 1)).between(datetime(1997, 9, 1),
-                                                               datetime(1997, 12, 1)),
-                         [datetime(1997, 9, 15, 0, 0),
-                          datetime(1997, 10, 20, 0, 0),
-                          datetime(1997, 11, 17, 0, 0)])
-
-    def testMonthlyByNWeekDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 25, 9, 0),
-                          datetime(1997, 10, 7, 9, 0)])
-
-    def testMonthlyByNWeekDayLarge(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byweekday=(TU(3), TH(-3)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 11, 9, 0),
-                          datetime(1997, 9, 16, 9, 0),
-                          datetime(1997, 10, 16, 9, 0)])
-
-    def testMonthlyByMonthAndWeekDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 8, 9, 0)])
-
-    def testMonthlyByMonthAndNWeekDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 29, 9, 0),
-                          datetime(1998, 3, 3, 9, 0)])
-
-    def testMonthlyByMonthAndNWeekDayLarge(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(3), TH(-3)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 15, 9, 0),
-                          datetime(1998, 1, 20, 9, 0),
-                          datetime(1998, 3, 12, 9, 0)])
-
-    def testMonthlyByMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 2, 3, 9, 0),
-                          datetime(1998, 3, 3, 9, 0)])
-
-    def testMonthlyByMonthAndMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 3, 3, 9, 0),
-                          datetime(2001, 3, 1, 9, 0)])
-
-    def testMonthlyByYearDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 9, 0),
-                          datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0)])
-
-    def testMonthlyByYearDayNeg(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 9, 0),
-                          datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0)])
-
-    def testMonthlyByMonthAndYearDay(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0),
-                          datetime(1999, 4, 10, 9, 0),
-                          datetime(1999, 7, 19, 9, 0)])
-
-    def testMonthlyByMonthAndYearDayNeg(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0),
-                          datetime(1999, 4, 10, 9, 0),
-                          datetime(1999, 7, 19, 9, 0)])
-
-    def testMonthlyByWeekNo(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 5, 11, 9, 0),
-                          datetime(1998, 5, 12, 9, 0),
-                          datetime(1998, 5, 13, 9, 0)])
-
-    def testMonthlyByWeekNoAndWeekDay(self):
-        # That's a nice one. The first days of week number one
-        # may be in the last year.
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 29, 9, 0),
-                          datetime(1999, 1, 4, 9, 0),
-                          datetime(2000, 1, 3, 9, 0)])
-
-    def testMonthlyByWeekNoAndWeekDayLarge(self):
-        # Another nice test. The last days of week number 52/53
-        # may be in the next year.
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 9, 0),
-                          datetime(1998, 12, 27, 9, 0),
-                          datetime(2000, 1, 2, 9, 0)])
-
-    def testMonthlyByWeekNoAndWeekDayLast(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 9, 0),
-                          datetime(1999, 1, 3, 9, 0),
-                          datetime(2000, 1, 2, 9, 0)])
-
-    def testMonthlyByWeekNoAndWeekDay53(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 12, 28, 9, 0),
-                          datetime(2004, 12, 27, 9, 0),
-                          datetime(2009, 12, 28, 9, 0)])
-
-    def testMonthlyByEaster(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 12, 9, 0),
-                          datetime(1999, 4, 4, 9, 0),
-                          datetime(2000, 4, 23, 9, 0)])
-
-    def testMonthlyByEasterPos(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 13, 9, 0),
-                          datetime(1999, 4, 5, 9, 0),
-                          datetime(2000, 4, 24, 9, 0)])
-
-    def testMonthlyByEasterNeg(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 11, 9, 0),
-                          datetime(1999, 4, 3, 9, 0),
-                          datetime(2000, 4, 22, 9, 0)])
-
-    def testMonthlyByHour(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0),
-                          datetime(1997, 10, 2, 6, 0),
-                          datetime(1997, 10, 2, 18, 0)])
-
-    def testMonthlyByMinute(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6),
-                          datetime(1997, 9, 2, 9, 18),
-                          datetime(1997, 10, 2, 9, 6)])
-
-    def testMonthlyBySecond(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 6),
-                          datetime(1997, 9, 2, 9, 0, 18),
-                          datetime(1997, 10, 2, 9, 0, 6)])
-
-    def testMonthlyByHourAndMinute(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6),
-                          datetime(1997, 9, 2, 18, 18),
-                          datetime(1997, 10, 2, 6, 6)])
-
-    def testMonthlyByHourAndSecond(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0, 6),
-                          datetime(1997, 9, 2, 18, 0, 18),
-                          datetime(1997, 10, 2, 6, 0, 6)])
-
-    def testMonthlyByMinuteAndSecond(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6, 6),
-                          datetime(1997, 9, 2, 9, 6, 18),
-                          datetime(1997, 9, 2, 9, 18, 6)])
-
-    def testMonthlyByHourAndMinuteAndSecond(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6, 6),
-                          datetime(1997, 9, 2, 18, 6, 18),
-                          datetime(1997, 9, 2, 18, 18, 6)])
-
-    def testMonthlyBySetPos(self):
-        self.assertEqual(list(rrule(MONTHLY,
-                              count=3,
-                              bymonthday=(13, 17),
-                              byhour=(6, 18),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 13, 18, 0),
-                          datetime(1997, 9, 17, 6, 0),
-                          datetime(1997, 10, 13, 18, 0)])
-
-    def testWeekly(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testWeeklyInterval(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 16, 9, 0),
-                          datetime(1997, 9, 30, 9, 0)])
-
-    def testWeeklyIntervalLarge(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              interval=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 1, 20, 9, 0),
-                          datetime(1998, 6, 9, 9, 0)])
-
-    def testWeeklyByMonth(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 13, 9, 0),
-                          datetime(1998, 1, 20, 9, 0)])
-
-    def testWeeklyByMonthDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 10, 1, 9, 0),
-                          datetime(1997, 10, 3, 9, 0)])
-
-    def testWeeklyByMonthAndMonthDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 5, 9, 0),
-                          datetime(1998, 1, 7, 9, 0),
-                          datetime(1998, 3, 5, 9, 0)])
-
-    def testWeeklyByWeekDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testWeeklyByNWeekDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testWeeklyByMonthAndWeekDay(self):
-        # This test is interesting, because it crosses the year
-        # boundary in a weekly period to find day '1' as a
-        # valid recurrence.
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 8, 9, 0)])
-
-    def testWeeklyByMonthAndNWeekDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 8, 9, 0)])
-
-    def testWeeklyByMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 2, 3, 9, 0),
-                          datetime(1998, 3, 3, 9, 0)])
-
-    def testWeeklyByMonthAndMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 3, 3, 9, 0),
-                          datetime(2001, 3, 1, 9, 0)])
-
-    def testWeeklyByYearDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 9, 0),
-                          datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0)])
-
-    def testWeeklyByYearDayNeg(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 9, 0),
-                          datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0)])
-
-    def testWeeklyByMonthAndYearDay(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=4,
-                              bymonth=(1, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 7, 19, 9, 0),
-                          datetime(1999, 1, 1, 9, 0),
-                          datetime(1999, 7, 19, 9, 0)])
-
-    def testWeeklyByMonthAndYearDayNeg(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=4,
-                              bymonth=(1, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 7, 19, 9, 0),
-                          datetime(1999, 1, 1, 9, 0),
-                          datetime(1999, 7, 19, 9, 0)])
-
-    def testWeeklyByWeekNo(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 5, 11, 9, 0),
-                          datetime(1998, 5, 12, 9, 0),
-                          datetime(1998, 5, 13, 9, 0)])
-
-    def testWeeklyByWeekNoAndWeekDay(self):
-        # That's a nice one. The first days of week number one
-        # may be in the last year.
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 29, 9, 0),
-                          datetime(1999, 1, 4, 9, 0),
-                          datetime(2000, 1, 3, 9, 0)])
-
-    def testWeeklyByWeekNoAndWeekDayLarge(self):
-        # Another nice test. The last days of week number 52/53
-        # may be in the next year.
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 9, 0),
-                          datetime(1998, 12, 27, 9, 0),
-                          datetime(2000, 1, 2, 9, 0)])
-
-    def testWeeklyByWeekNoAndWeekDayLast(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 9, 0),
-                          datetime(1999, 1, 3, 9, 0),
-                          datetime(2000, 1, 2, 9, 0)])
-
-    def testWeeklyByWeekNoAndWeekDay53(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 12, 28, 9, 0),
-                          datetime(2004, 12, 27, 9, 0),
-                          datetime(2009, 12, 28, 9, 0)])
-
-    def testWeeklyByEaster(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 12, 9, 0),
-                          datetime(1999, 4, 4, 9, 0),
-                          datetime(2000, 4, 23, 9, 0)])
-
-    def testWeeklyByEasterPos(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 13, 9, 0),
-                          datetime(1999, 4, 5, 9, 0),
-                          datetime(2000, 4, 24, 9, 0)])
-
-    def testWeeklyByEasterNeg(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 11, 9, 0),
-                          datetime(1999, 4, 3, 9, 0),
-                          datetime(2000, 4, 22, 9, 0)])
-
-    def testWeeklyByHour(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0),
-                          datetime(1997, 9, 9, 6, 0),
-                          datetime(1997, 9, 9, 18, 0)])
-
-    def testWeeklyByMinute(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6),
-                          datetime(1997, 9, 2, 9, 18),
-                          datetime(1997, 9, 9, 9, 6)])
-
-    def testWeeklyBySecond(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 6),
-                          datetime(1997, 9, 2, 9, 0, 18),
-                          datetime(1997, 9, 9, 9, 0, 6)])
-
-    def testWeeklyByHourAndMinute(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6),
-                          datetime(1997, 9, 2, 18, 18),
-                          datetime(1997, 9, 9, 6, 6)])
-
-    def testWeeklyByHourAndSecond(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0, 6),
-                          datetime(1997, 9, 2, 18, 0, 18),
-                          datetime(1997, 9, 9, 6, 0, 6)])
-
-    def testWeeklyByMinuteAndSecond(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6, 6),
-                          datetime(1997, 9, 2, 9, 6, 18),
-                          datetime(1997, 9, 2, 9, 18, 6)])
-
-    def testWeeklyByHourAndMinuteAndSecond(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6, 6),
-                          datetime(1997, 9, 2, 18, 6, 18),
-                          datetime(1997, 9, 2, 18, 18, 6)])
-
-    def testWeeklyBySetPos(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              byhour=(6, 18),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0),
-                          datetime(1997, 9, 4, 6, 0),
-                          datetime(1997, 9, 9, 18, 0)])
-
-    def testDaily(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0)])
-
-    def testDailyInterval(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 6, 9, 0)])
-
-    def testDailyIntervalLarge(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              interval=92,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 12, 3, 9, 0),
-                          datetime(1998, 3, 5, 9, 0)])
-
-    def testDailyByMonth(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 1, 2, 9, 0),
-                          datetime(1998, 1, 3, 9, 0)])
-
-    def testDailyByMonthDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 10, 1, 9, 0),
-                          datetime(1997, 10, 3, 9, 0)])
-
-    def testDailyByMonthAndMonthDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 5, 9, 0),
-                          datetime(1998, 1, 7, 9, 0),
-                          datetime(1998, 3, 5, 9, 0)])
-
-    def testDailyByWeekDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testDailyByNWeekDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testDailyByMonthAndWeekDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 8, 9, 0)])
-
-    def testDailyByMonthAndNWeekDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 1, 8, 9, 0)])
-
-    def testDailyByMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 2, 3, 9, 0),
-                          datetime(1998, 3, 3, 9, 0)])
-
-    def testDailyByMonthAndMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 3, 3, 9, 0),
-                          datetime(2001, 3, 1, 9, 0)])
-
-    def testDailyByYearDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 9, 0),
-                          datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0)])
-
-    def testDailyByYearDayNeg(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 9, 0),
-                          datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 4, 10, 9, 0),
-                          datetime(1998, 7, 19, 9, 0)])
-
-    def testDailyByMonthAndYearDay(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=4,
-                              bymonth=(1, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 7, 19, 9, 0),
-                          datetime(1999, 1, 1, 9, 0),
-                          datetime(1999, 7, 19, 9, 0)])
-
-    def testDailyByMonthAndYearDayNeg(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=4,
-                              bymonth=(1, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 9, 0),
-                          datetime(1998, 7, 19, 9, 0),
-                          datetime(1999, 1, 1, 9, 0),
-                          datetime(1999, 7, 19, 9, 0)])
-
-    def testDailyByWeekNo(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 5, 11, 9, 0),
-                          datetime(1998, 5, 12, 9, 0),
-                          datetime(1998, 5, 13, 9, 0)])
-
-    def testDailyByWeekNoAndWeekDay(self):
-        # That's a nice one. The first days of week number one
-        # may be in the last year.
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 29, 9, 0),
-                          datetime(1999, 1, 4, 9, 0),
-                          datetime(2000, 1, 3, 9, 0)])
-
-    def testDailyByWeekNoAndWeekDayLarge(self):
-        # Another nice test. The last days of week number 52/53
-        # may be in the next year.
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 9, 0),
-                          datetime(1998, 12, 27, 9, 0),
-                          datetime(2000, 1, 2, 9, 0)])
-
-    def testDailyByWeekNoAndWeekDayLast(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 9, 0),
-                          datetime(1999, 1, 3, 9, 0),
-                          datetime(2000, 1, 2, 9, 0)])
-
-    def testDailyByWeekNoAndWeekDay53(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 12, 28, 9, 0),
-                          datetime(2004, 12, 27, 9, 0),
-                          datetime(2009, 12, 28, 9, 0)])
-
-    def testDailyByEaster(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 12, 9, 0),
-                          datetime(1999, 4, 4, 9, 0),
-                          datetime(2000, 4, 23, 9, 0)])
-
-    def testDailyByEasterPos(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 13, 9, 0),
-                          datetime(1999, 4, 5, 9, 0),
-                          datetime(2000, 4, 24, 9, 0)])
-
-    def testDailyByEasterNeg(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 11, 9, 0),
-                          datetime(1999, 4, 3, 9, 0),
-                          datetime(2000, 4, 22, 9, 0)])
-
-    def testDailyByHour(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0),
-                          datetime(1997, 9, 3, 6, 0),
-                          datetime(1997, 9, 3, 18, 0)])
-
-    def testDailyByMinute(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6),
-                          datetime(1997, 9, 2, 9, 18),
-                          datetime(1997, 9, 3, 9, 6)])
-
-    def testDailyBySecond(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 6),
-                          datetime(1997, 9, 2, 9, 0, 18),
-                          datetime(1997, 9, 3, 9, 0, 6)])
-
-    def testDailyByHourAndMinute(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6),
-                          datetime(1997, 9, 2, 18, 18),
-                          datetime(1997, 9, 3, 6, 6)])
-
-    def testDailyByHourAndSecond(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0, 6),
-                          datetime(1997, 9, 2, 18, 0, 18),
-                          datetime(1997, 9, 3, 6, 0, 6)])
-
-    def testDailyByMinuteAndSecond(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6, 6),
-                          datetime(1997, 9, 2, 9, 6, 18),
-                          datetime(1997, 9, 2, 9, 18, 6)])
-
-    def testDailyByHourAndMinuteAndSecond(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6, 6),
-                          datetime(1997, 9, 2, 18, 6, 18),
-                          datetime(1997, 9, 2, 18, 18, 6)])
-
-    def testDailyBySetPos(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(15, 45),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 15),
-                          datetime(1997, 9, 3, 6, 45),
-                          datetime(1997, 9, 3, 18, 15)])
-
-    def testHourly(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 2, 10, 0),
-                          datetime(1997, 9, 2, 11, 0)])
-
-    def testHourlyInterval(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 2, 11, 0),
-                          datetime(1997, 9, 2, 13, 0)])
-
-    def testHourlyIntervalLarge(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              interval=769,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 10, 4, 10, 0),
-                          datetime(1997, 11, 5, 11, 0)])
-
-    def testHourlyByMonth(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 1, 0),
-                          datetime(1998, 1, 1, 2, 0)])
-
-    def testHourlyByMonthDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 3, 0, 0),
-                          datetime(1997, 9, 3, 1, 0),
-                          datetime(1997, 9, 3, 2, 0)])
-
-    def testHourlyByMonthAndMonthDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 5, 0, 0),
-                          datetime(1998, 1, 5, 1, 0),
-                          datetime(1998, 1, 5, 2, 0)])
-
-    def testHourlyByWeekDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 2, 10, 0),
-                          datetime(1997, 9, 2, 11, 0)])
-
-    def testHourlyByNWeekDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 2, 10, 0),
-                          datetime(1997, 9, 2, 11, 0)])
-
-    def testHourlyByMonthAndWeekDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 1, 0),
-                          datetime(1998, 1, 1, 2, 0)])
-
-    def testHourlyByMonthAndNWeekDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 1, 0),
-                          datetime(1998, 1, 1, 2, 0)])
-
-    def testHourlyByMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 1, 0),
-                          datetime(1998, 1, 1, 2, 0)])
-
-    def testHourlyByMonthAndMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 1, 0),
-                          datetime(1998, 1, 1, 2, 0)])
-
-    def testHourlyByYearDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 0, 0),
-                          datetime(1997, 12, 31, 1, 0),
-                          datetime(1997, 12, 31, 2, 0),
-                          datetime(1997, 12, 31, 3, 0)])
-
-    def testHourlyByYearDayNeg(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 0, 0),
-                          datetime(1997, 12, 31, 1, 0),
-                          datetime(1997, 12, 31, 2, 0),
-                          datetime(1997, 12, 31, 3, 0)])
-
-    def testHourlyByMonthAndYearDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 0, 0),
-                          datetime(1998, 4, 10, 1, 0),
-                          datetime(1998, 4, 10, 2, 0),
-                          datetime(1998, 4, 10, 3, 0)])
-
-    def testHourlyByMonthAndYearDayNeg(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 0, 0),
-                          datetime(1998, 4, 10, 1, 0),
-                          datetime(1998, 4, 10, 2, 0),
-                          datetime(1998, 4, 10, 3, 0)])
-
-    def testHourlyByWeekNo(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 5, 11, 0, 0),
-                          datetime(1998, 5, 11, 1, 0),
-                          datetime(1998, 5, 11, 2, 0)])
-
-    def testHourlyByWeekNoAndWeekDay(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 29, 0, 0),
-                          datetime(1997, 12, 29, 1, 0),
-                          datetime(1997, 12, 29, 2, 0)])
-
-    def testHourlyByWeekNoAndWeekDayLarge(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 0, 0),
-                          datetime(1997, 12, 28, 1, 0),
-                          datetime(1997, 12, 28, 2, 0)])
-
-    def testHourlyByWeekNoAndWeekDayLast(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 0, 0),
-                          datetime(1997, 12, 28, 1, 0),
-                          datetime(1997, 12, 28, 2, 0)])
-
-    def testHourlyByWeekNoAndWeekDay53(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 12, 28, 0, 0),
-                          datetime(1998, 12, 28, 1, 0),
-                          datetime(1998, 12, 28, 2, 0)])
-
-    def testHourlyByEaster(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 12, 0, 0),
-                          datetime(1998, 4, 12, 1, 0),
-                          datetime(1998, 4, 12, 2, 0)])
-
-    def testHourlyByEasterPos(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 13, 0, 0),
-                          datetime(1998, 4, 13, 1, 0),
-                          datetime(1998, 4, 13, 2, 0)])
-
-    def testHourlyByEasterNeg(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 11, 0, 0),
-                          datetime(1998, 4, 11, 1, 0),
-                          datetime(1998, 4, 11, 2, 0)])
-
-    def testHourlyByHour(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0),
-                          datetime(1997, 9, 3, 6, 0),
-                          datetime(1997, 9, 3, 18, 0)])
-
-    def testHourlyByMinute(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6),
-                          datetime(1997, 9, 2, 9, 18),
-                          datetime(1997, 9, 2, 10, 6)])
-
-    def testHourlyBySecond(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 6),
-                          datetime(1997, 9, 2, 9, 0, 18),
-                          datetime(1997, 9, 2, 10, 0, 6)])
-
-    def testHourlyByHourAndMinute(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6),
-                          datetime(1997, 9, 2, 18, 18),
-                          datetime(1997, 9, 3, 6, 6)])
-
-    def testHourlyByHourAndSecond(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0, 6),
-                          datetime(1997, 9, 2, 18, 0, 18),
-                          datetime(1997, 9, 3, 6, 0, 6)])
-
-    def testHourlyByMinuteAndSecond(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6, 6),
-                          datetime(1997, 9, 2, 9, 6, 18),
-                          datetime(1997, 9, 2, 9, 18, 6)])
-
-    def testHourlyByHourAndMinuteAndSecond(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6, 6),
-                          datetime(1997, 9, 2, 18, 6, 18),
-                          datetime(1997, 9, 2, 18, 18, 6)])
-
-    def testHourlyBySetPos(self):
-        self.assertEqual(list(rrule(HOURLY,
-                              count=3,
-                              byminute=(15, 45),
-                              bysecond=(15, 45),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 15, 45),
-                          datetime(1997, 9, 2, 9, 45, 15),
-                          datetime(1997, 9, 2, 10, 15, 45)])
-
-    def testMinutely(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 2, 9, 1),
-                          datetime(1997, 9, 2, 9, 2)])
-
-    def testMinutelyInterval(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 2, 9, 2),
-                          datetime(1997, 9, 2, 9, 4)])
-
-    def testMinutelyIntervalLarge(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              interval=1501,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 10, 1),
-                          datetime(1997, 9, 4, 11, 2)])
-
-    def testMinutelyByMonth(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 0, 1),
-                          datetime(1998, 1, 1, 0, 2)])
-
-    def testMinutelyByMonthDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 3, 0, 0),
-                          datetime(1997, 9, 3, 0, 1),
-                          datetime(1997, 9, 3, 0, 2)])
-
-    def testMinutelyByMonthAndMonthDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 5, 0, 0),
-                          datetime(1998, 1, 5, 0, 1),
-                          datetime(1998, 1, 5, 0, 2)])
-
-    def testMinutelyByWeekDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 2, 9, 1),
-                          datetime(1997, 9, 2, 9, 2)])
-
-    def testMinutelyByNWeekDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 2, 9, 1),
-                          datetime(1997, 9, 2, 9, 2)])
-
-    def testMinutelyByMonthAndWeekDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 0, 1),
-                          datetime(1998, 1, 1, 0, 2)])
-
-    def testMinutelyByMonthAndNWeekDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 0, 1),
-                          datetime(1998, 1, 1, 0, 2)])
-
-    def testMinutelyByMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 0, 1),
-                          datetime(1998, 1, 1, 0, 2)])
-
-    def testMinutelyByMonthAndMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0),
-                          datetime(1998, 1, 1, 0, 1),
-                          datetime(1998, 1, 1, 0, 2)])
-
-    def testMinutelyByYearDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 0, 0),
-                          datetime(1997, 12, 31, 0, 1),
-                          datetime(1997, 12, 31, 0, 2),
-                          datetime(1997, 12, 31, 0, 3)])
-
-    def testMinutelyByYearDayNeg(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 0, 0),
-                          datetime(1997, 12, 31, 0, 1),
-                          datetime(1997, 12, 31, 0, 2),
-                          datetime(1997, 12, 31, 0, 3)])
-
-    def testMinutelyByMonthAndYearDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 0, 0),
-                          datetime(1998, 4, 10, 0, 1),
-                          datetime(1998, 4, 10, 0, 2),
-                          datetime(1998, 4, 10, 0, 3)])
-
-    def testMinutelyByMonthAndYearDayNeg(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 0, 0),
-                          datetime(1998, 4, 10, 0, 1),
-                          datetime(1998, 4, 10, 0, 2),
-                          datetime(1998, 4, 10, 0, 3)])
-
-    def testMinutelyByWeekNo(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 5, 11, 0, 0),
-                          datetime(1998, 5, 11, 0, 1),
-                          datetime(1998, 5, 11, 0, 2)])
-
-    def testMinutelyByWeekNoAndWeekDay(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 29, 0, 0),
-                          datetime(1997, 12, 29, 0, 1),
-                          datetime(1997, 12, 29, 0, 2)])
-
-    def testMinutelyByWeekNoAndWeekDayLarge(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 0, 0),
-                          datetime(1997, 12, 28, 0, 1),
-                          datetime(1997, 12, 28, 0, 2)])
-
-    def testMinutelyByWeekNoAndWeekDayLast(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 0, 0),
-                          datetime(1997, 12, 28, 0, 1),
-                          datetime(1997, 12, 28, 0, 2)])
-
-    def testMinutelyByWeekNoAndWeekDay53(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 12, 28, 0, 0),
-                          datetime(1998, 12, 28, 0, 1),
-                          datetime(1998, 12, 28, 0, 2)])
-
-    def testMinutelyByEaster(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 12, 0, 0),
-                          datetime(1998, 4, 12, 0, 1),
-                          datetime(1998, 4, 12, 0, 2)])
-
-    def testMinutelyByEasterPos(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 13, 0, 0),
-                          datetime(1998, 4, 13, 0, 1),
-                          datetime(1998, 4, 13, 0, 2)])
-
-    def testMinutelyByEasterNeg(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 11, 0, 0),
-                          datetime(1998, 4, 11, 0, 1),
-                          datetime(1998, 4, 11, 0, 2)])
-
-    def testMinutelyByHour(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0),
-                          datetime(1997, 9, 2, 18, 1),
-                          datetime(1997, 9, 2, 18, 2)])
-
-    def testMinutelyByMinute(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6),
-                          datetime(1997, 9, 2, 9, 18),
-                          datetime(1997, 9, 2, 10, 6)])
-
-    def testMinutelyBySecond(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 6),
-                          datetime(1997, 9, 2, 9, 0, 18),
-                          datetime(1997, 9, 2, 9, 1, 6)])
-
-    def testMinutelyByHourAndMinute(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6),
-                          datetime(1997, 9, 2, 18, 18),
-                          datetime(1997, 9, 3, 6, 6)])
-
-    def testMinutelyByHourAndSecond(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0, 6),
-                          datetime(1997, 9, 2, 18, 0, 18),
-                          datetime(1997, 9, 2, 18, 1, 6)])
-
-    def testMinutelyByMinuteAndSecond(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6, 6),
-                          datetime(1997, 9, 2, 9, 6, 18),
-                          datetime(1997, 9, 2, 9, 18, 6)])
-
-    def testMinutelyByHourAndMinuteAndSecond(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6, 6),
-                          datetime(1997, 9, 2, 18, 6, 18),
-                          datetime(1997, 9, 2, 18, 18, 6)])
-
-    def testMinutelyBySetPos(self):
-        self.assertEqual(list(rrule(MINUTELY,
-                              count=3,
-                              bysecond=(15, 30, 45),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 15),
-                          datetime(1997, 9, 2, 9, 0, 45),
-                          datetime(1997, 9, 2, 9, 1, 15)])
-
-    def testSecondly(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 0),
-                          datetime(1997, 9, 2, 9, 0, 1),
-                          datetime(1997, 9, 2, 9, 0, 2)])
-
-    def testSecondlyInterval(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 0),
-                          datetime(1997, 9, 2, 9, 0, 2),
-                          datetime(1997, 9, 2, 9, 0, 4)])
-
-    def testSecondlyIntervalLarge(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              interval=90061,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 0),
-                          datetime(1997, 9, 3, 10, 1, 1),
-                          datetime(1997, 9, 4, 11, 2, 2)])
-
-    def testSecondlyByMonth(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0, 0),
-                          datetime(1998, 1, 1, 0, 0, 1),
-                          datetime(1998, 1, 1, 0, 0, 2)])
-
-    def testSecondlyByMonthDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 3, 0, 0, 0),
-                          datetime(1997, 9, 3, 0, 0, 1),
-                          datetime(1997, 9, 3, 0, 0, 2)])
-
-    def testSecondlyByMonthAndMonthDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 5, 0, 0, 0),
-                          datetime(1998, 1, 5, 0, 0, 1),
-                          datetime(1998, 1, 5, 0, 0, 2)])
-
-    def testSecondlyByWeekDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 0),
-                          datetime(1997, 9, 2, 9, 0, 1),
-                          datetime(1997, 9, 2, 9, 0, 2)])
-
-    def testSecondlyByNWeekDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 0),
-                          datetime(1997, 9, 2, 9, 0, 1),
-                          datetime(1997, 9, 2, 9, 0, 2)])
-
-    def testSecondlyByMonthAndWeekDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0, 0),
-                          datetime(1998, 1, 1, 0, 0, 1),
-                          datetime(1998, 1, 1, 0, 0, 2)])
-
-    def testSecondlyByMonthAndNWeekDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0, 0),
-                          datetime(1998, 1, 1, 0, 0, 1),
-                          datetime(1998, 1, 1, 0, 0, 2)])
-
-    def testSecondlyByMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0, 0),
-                          datetime(1998, 1, 1, 0, 0, 1),
-                          datetime(1998, 1, 1, 0, 0, 2)])
-
-    def testSecondlyByMonthAndMonthDayAndWeekDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 1, 1, 0, 0, 0),
-                          datetime(1998, 1, 1, 0, 0, 1),
-                          datetime(1998, 1, 1, 0, 0, 2)])
-
-    def testSecondlyByYearDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 0, 0, 0),
-                          datetime(1997, 12, 31, 0, 0, 1),
-                          datetime(1997, 12, 31, 0, 0, 2),
-                          datetime(1997, 12, 31, 0, 0, 3)])
-
-    def testSecondlyByYearDayNeg(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 31, 0, 0, 0),
-                          datetime(1997, 12, 31, 0, 0, 1),
-                          datetime(1997, 12, 31, 0, 0, 2),
-                          datetime(1997, 12, 31, 0, 0, 3)])
-
-    def testSecondlyByMonthAndYearDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 0, 0, 0),
-                          datetime(1998, 4, 10, 0, 0, 1),
-                          datetime(1998, 4, 10, 0, 0, 2),
-                          datetime(1998, 4, 10, 0, 0, 3)])
-
-    def testSecondlyByMonthAndYearDayNeg(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 10, 0, 0, 0),
-                          datetime(1998, 4, 10, 0, 0, 1),
-                          datetime(1998, 4, 10, 0, 0, 2),
-                          datetime(1998, 4, 10, 0, 0, 3)])
-
-    def testSecondlyByWeekNo(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 5, 11, 0, 0, 0),
-                          datetime(1998, 5, 11, 0, 0, 1),
-                          datetime(1998, 5, 11, 0, 0, 2)])
-
-    def testSecondlyByWeekNoAndWeekDay(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 29, 0, 0, 0),
-                          datetime(1997, 12, 29, 0, 0, 1),
-                          datetime(1997, 12, 29, 0, 0, 2)])
-
-    def testSecondlyByWeekNoAndWeekDayLarge(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 0, 0, 0),
-                          datetime(1997, 12, 28, 0, 0, 1),
-                          datetime(1997, 12, 28, 0, 0, 2)])
-
-    def testSecondlyByWeekNoAndWeekDayLast(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 12, 28, 0, 0, 0),
-                          datetime(1997, 12, 28, 0, 0, 1),
-                          datetime(1997, 12, 28, 0, 0, 2)])
-
-    def testSecondlyByWeekNoAndWeekDay53(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 12, 28, 0, 0, 0),
-                          datetime(1998, 12, 28, 0, 0, 1),
-                          datetime(1998, 12, 28, 0, 0, 2)])
-
-    def testSecondlyByEaster(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 12, 0, 0, 0),
-                          datetime(1998, 4, 12, 0, 0, 1),
-                          datetime(1998, 4, 12, 0, 0, 2)])
-
-    def testSecondlyByEasterPos(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 13, 0, 0, 0),
-                          datetime(1998, 4, 13, 0, 0, 1),
-                          datetime(1998, 4, 13, 0, 0, 2)])
-
-    def testSecondlyByEasterNeg(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1998, 4, 11, 0, 0, 0),
-                          datetime(1998, 4, 11, 0, 0, 1),
-                          datetime(1998, 4, 11, 0, 0, 2)])
-
-    def testSecondlyByHour(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0, 0),
-                          datetime(1997, 9, 2, 18, 0, 1),
-                          datetime(1997, 9, 2, 18, 0, 2)])
-
-    def testSecondlyByMinute(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6, 0),
-                          datetime(1997, 9, 2, 9, 6, 1),
-                          datetime(1997, 9, 2, 9, 6, 2)])
-
-    def testSecondlyBySecond(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0, 6),
-                          datetime(1997, 9, 2, 9, 0, 18),
-                          datetime(1997, 9, 2, 9, 1, 6)])
-
-    def testSecondlyByHourAndMinute(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6, 0),
-                          datetime(1997, 9, 2, 18, 6, 1),
-                          datetime(1997, 9, 2, 18, 6, 2)])
-
-    def testSecondlyByHourAndSecond(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 0, 6),
-                          datetime(1997, 9, 2, 18, 0, 18),
-                          datetime(1997, 9, 2, 18, 1, 6)])
-
-    def testSecondlyByMinuteAndSecond(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 6, 6),
-                          datetime(1997, 9, 2, 9, 6, 18),
-                          datetime(1997, 9, 2, 9, 18, 6)])
-
-    def testSecondlyByHourAndMinuteAndSecond(self):
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 18, 6, 6),
-                          datetime(1997, 9, 2, 18, 6, 18),
-                          datetime(1997, 9, 2, 18, 18, 6)])
-
-    def testSecondlyByHourAndMinuteAndSecondBug(self):
-        # This explores a bug found by Mathieu Bridon.
-        self.assertEqual(list(rrule(SECONDLY,
-                              count=3,
-                              bysecond=(0,),
-                              byminute=(1,),
-                              dtstart=datetime(2010, 3, 22, 12, 1))),
-                         [datetime(2010, 3, 22, 12, 1),
-                          datetime(2010, 3, 22, 13, 1),
-                          datetime(2010, 3, 22, 14, 1)])
-
-    def testLongIntegers(self):
-        if not PY3:  # There is no longs in python3
-            self.assertEqual(list(rrule(MINUTELY,
-                                  count=long(2),
-                                  interval=long(2),
-                                  bymonth=long(2),
-                                  byweekday=long(3),
-                                  byhour=long(6),
-                                  byminute=long(6),
-                                  bysecond=long(6),
-                                  dtstart=datetime(1997, 9, 2, 9, 0))),
-                             [datetime(1998, 2, 5, 6, 6, 6),
-                              datetime(1998, 2, 12, 6, 6, 6)])
-            self.assertEqual(list(rrule(YEARLY,
-                                  count=long(2),
-                                  bymonthday=long(5),
-                                  byweekno=long(2),
-                                  dtstart=datetime(1997, 9, 2, 9, 0))),
-                             [datetime(1998, 1, 5, 9, 0),
-                              datetime(2004, 1, 5, 9, 0)])
-
-    def testHourlyBadRRule(self):
-        """
-        When `byhour` is specified with `freq=HOURLY`, there are certain
-        combinations of `dtstart` and `byhour` which result in an rrule with no
-        valid values.
-
-        See https://github.com/dateutil/dateutil/issues/4
-        """
-
-        self.assertRaises(ValueError, rrule, HOURLY,
-                          **dict(interval=4, byhour=(7, 11, 15, 19),
-                                 dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testMinutelyBadRRule(self):
-        """
-        See :func:`testHourlyBadRRule` for details.
-        """
-
-        self.assertRaises(ValueError, rrule, MINUTELY,
-                          **dict(interval=12, byminute=(10, 11, 25, 39, 50),
-                                 dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testSecondlyBadRRule(self):
-        """
-        See :func:`testHourlyBadRRule` for details.
-        """
-
-        self.assertRaises(ValueError, rrule, SECONDLY,
-                          **dict(interval=10, bysecond=(2, 15, 37, 42, 59),
-                                 dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testMinutelyBadComboRRule(self):
-        """
-        Certain values of :param:`interval` in :class:`rrule`, when combined
-        with certain values of :param:`byhour` create rules which apply to no
-        valid dates. The library should detect this case in the iterator and
-        raise a :exception:`ValueError`.
-        """
-
-        # In Python 2.7 you can use a context manager for this.
-        def make_bad_rrule():
-            list(rrule(MINUTELY, interval=120, byhour=(10, 12, 14, 16),
-                 count=2, dtstart=datetime(1997, 9, 2, 9, 0)))
-
-        self.assertRaises(ValueError, make_bad_rrule)
-
-    def testSecondlyBadComboRRule(self):
-        """
-        See :func:`testMinutelyBadComboRRule' for details.
-        """
-
-        # In Python 2.7 you can use a context manager for this.
-        def make_bad_minute_rrule():
-            list(rrule(SECONDLY, interval=360, byminute=(10, 28, 49),
-                 count=4, dtstart=datetime(1997, 9, 2, 9, 0)))
-
-        def make_bad_hour_rrule():
-            list(rrule(SECONDLY, interval=43200, byhour=(2, 10, 18, 23),
-                 count=4, dtstart=datetime(1997, 9, 2, 9, 0)))
-
-        self.assertRaises(ValueError, make_bad_minute_rrule)
-        self.assertRaises(ValueError, make_bad_hour_rrule)
-
-    def testBadUntilCountRRule(self):
-        """
-        See rfc-2445 4.3.10 - This checks for the deprecation warning, and will
-        eventually check for an error.
-        """
-        with self.assertWarns(DeprecationWarning):
-            rrule(DAILY, dtstart=datetime(1997, 9, 2, 9, 0),
-                         count=3, until=datetime(1997, 9, 4, 9, 0))
-
-    def testUntilNotMatching(self):
-        self.assertEqual(list(rrule(DAILY,
-                              dtstart=datetime(1997, 9, 2, 9, 0),
-                              until=datetime(1997, 9, 5, 8, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0)])
-
-    def testUntilMatching(self):
-        self.assertEqual(list(rrule(DAILY,
-                              dtstart=datetime(1997, 9, 2, 9, 0),
-                              until=datetime(1997, 9, 4, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0)])
-
-    def testUntilSingle(self):
-        self.assertEqual(list(rrule(DAILY,
-                              dtstart=datetime(1997, 9, 2, 9, 0),
-                              until=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0)])
-
-    def testUntilEmpty(self):
-        self.assertEqual(list(rrule(DAILY,
-                              dtstart=datetime(1997, 9, 2, 9, 0),
-                              until=datetime(1997, 9, 1, 9, 0))),
-                         [])
-
-    def testUntilWithDate(self):
-        self.assertEqual(list(rrule(DAILY,
-                              dtstart=datetime(1997, 9, 2, 9, 0),
-                              until=date(1997, 9, 5))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0)])
-
-    def testWkStIntervalMO(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              interval=2,
-                              byweekday=(TU, SU),
-                              wkst=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 7, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testWkStIntervalSU(self):
-        self.assertEqual(list(rrule(WEEKLY,
-                              count=3,
-                              interval=2,
-                              byweekday=(TU, SU),
-                              wkst=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 14, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testDTStartIsDate(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              dtstart=date(1997, 9, 2))),
-                         [datetime(1997, 9, 2, 0, 0),
-                          datetime(1997, 9, 3, 0, 0),
-                          datetime(1997, 9, 4, 0, 0)])
-
-    def testDTStartWithMicroseconds(self):
-        self.assertEqual(list(rrule(DAILY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0, 0, 500000))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0)])
-
-    def testMaxYear(self):
-        self.assertEqual(list(rrule(YEARLY,
-                              count=3,
-                              bymonth=2,
-                              bymonthday=31,
-                              dtstart=datetime(9997, 9, 2, 9, 0, 0))),
-                         [])
-
-    def testGetItem(self):
-        self.assertEqual(rrule(DAILY,
-                               count=3,
-                               dtstart=datetime(1997, 9, 2, 9, 0))[0],
-                         datetime(1997, 9, 2, 9, 0))
-
-    def testGetItemNeg(self):
-        self.assertEqual(rrule(DAILY,
-                               count=3,
-                               dtstart=datetime(1997, 9, 2, 9, 0))[-1],
-                         datetime(1997, 9, 4, 9, 0))
-
-    def testGetItemSlice(self):
-        self.assertEqual(rrule(DAILY,
-                               # count=3,
-                               dtstart=datetime(1997, 9, 2, 9, 0))[1:2],
-                         [datetime(1997, 9, 3, 9, 0)])
-
-    def testGetItemSliceEmpty(self):
-        self.assertEqual(rrule(DAILY,
-                               count=3,
-                               dtstart=datetime(1997, 9, 2, 9, 0))[:],
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0)])
-
-    def testGetItemSliceStep(self):
-        self.assertEqual(rrule(DAILY,
-                               count=3,
-                               dtstart=datetime(1997, 9, 2, 9, 0))[::-2],
-                         [datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 2, 9, 0)])
-
-    def testCount(self):
-        self.assertEqual(rrule(DAILY,
-                               count=3,
-                               dtstart=datetime(1997, 9, 2, 9, 0)).count(),
-                         3)
-
-    def testContains(self):
-        rr = rrule(DAILY, count=3, dtstart=datetime(1997, 9, 2, 9, 0))
-        self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True)
-
-    def testContainsNot(self):
-        rr = rrule(DAILY, count=3, dtstart=datetime(1997, 9, 2, 9, 0))
-        self.assertEqual(datetime(1997, 9, 3, 9, 0) not in rr, False)
-
-    def testBefore(self):
-        self.assertEqual(rrule(DAILY,  # count=5
-            dtstart=datetime(1997, 9, 2, 9, 0)).before(datetime(1997, 9, 5, 9, 0)),
-                         datetime(1997, 9, 4, 9, 0))
-
-    def testBeforeInc(self):
-        self.assertEqual(rrule(DAILY,
-                               #count=5,
-                               dtstart=datetime(1997, 9, 2, 9, 0))
-                               .before(datetime(1997, 9, 5, 9, 0), inc=True),
-                         datetime(1997, 9, 5, 9, 0))
-
-    def testAfter(self):
-        self.assertEqual(rrule(DAILY,
-                               #count=5,
-                               dtstart=datetime(1997, 9, 2, 9, 0))
-                               .after(datetime(1997, 9, 4, 9, 0)),
-                         datetime(1997, 9, 5, 9, 0))
-
-    def testAfterInc(self):
-        self.assertEqual(rrule(DAILY,
-                               #count=5,
-                               dtstart=datetime(1997, 9, 2, 9, 0))
-                               .after(datetime(1997, 9, 4, 9, 0), inc=True),
-                         datetime(1997, 9, 4, 9, 0))
-
-    def testXAfter(self):
-        self.assertEqual(list(rrule(DAILY,
-                                    dtstart=datetime(1997, 9, 2, 9, 0))
-                                    .xafter(datetime(1997, 9, 8, 9, 0), count=12)),
-                                    [datetime(1997, 9, 9, 9, 0),
-                                     datetime(1997, 9, 10, 9, 0),
-                                     datetime(1997, 9, 11, 9, 0),
-                                     datetime(1997, 9, 12, 9, 0),
-                                     datetime(1997, 9, 13, 9, 0),
-                                     datetime(1997, 9, 14, 9, 0),
-                                     datetime(1997, 9, 15, 9, 0),
-                                     datetime(1997, 9, 16, 9, 0),
-                                     datetime(1997, 9, 17, 9, 0),
-                                     datetime(1997, 9, 18, 9, 0),
-                                     datetime(1997, 9, 19, 9, 0),
-                                     datetime(1997, 9, 20, 9, 0)])
-
-    def testXAfterInc(self):
-        self.assertEqual(list(rrule(DAILY,
-                                    dtstart=datetime(1997, 9, 2, 9, 0))
-                                    .xafter(datetime(1997, 9, 8, 9, 0), count=12, inc=True)),
-                                    [datetime(1997, 9, 8, 9, 0),
-                                     datetime(1997, 9, 9, 9, 0),
-                                     datetime(1997, 9, 10, 9, 0),
-                                     datetime(1997, 9, 11, 9, 0),
-                                     datetime(1997, 9, 12, 9, 0),
-                                     datetime(1997, 9, 13, 9, 0),
-                                     datetime(1997, 9, 14, 9, 0),
-                                     datetime(1997, 9, 15, 9, 0),
-                                     datetime(1997, 9, 16, 9, 0),
-                                     datetime(1997, 9, 17, 9, 0),
-                                     datetime(1997, 9, 18, 9, 0),
-                                     datetime(1997, 9, 19, 9, 0)])
-
-    def testBetween(self):
-        self.assertEqual(rrule(DAILY,
-                               #count=5,
-                               dtstart=datetime(1997, 9, 2, 9, 0))
-                               .between(datetime(1997, 9, 2, 9, 0),
-                                        datetime(1997, 9, 6, 9, 0)),
-                         [datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 5, 9, 0)])
-
-    def testBetweenInc(self):
-        self.assertEqual(rrule(DAILY,
-                               #count=5,
-                               dtstart=datetime(1997, 9, 2, 9, 0))
-                               .between(datetime(1997, 9, 2, 9, 0),
-                                        datetime(1997, 9, 6, 9, 0), inc=True),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 5, 9, 0),
-                          datetime(1997, 9, 6, 9, 0)])
-
-    def testCachePre(self):
-        rr = rrule(DAILY, count=15, cache=True,
-                   dtstart=datetime(1997, 9, 2, 9, 0))
-        self.assertEqual(list(rr),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 5, 9, 0),
-                          datetime(1997, 9, 6, 9, 0),
-                          datetime(1997, 9, 7, 9, 0),
-                          datetime(1997, 9, 8, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 10, 9, 0),
-                          datetime(1997, 9, 11, 9, 0),
-                          datetime(1997, 9, 12, 9, 0),
-                          datetime(1997, 9, 13, 9, 0),
-                          datetime(1997, 9, 14, 9, 0),
-                          datetime(1997, 9, 15, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testCachePost(self):
-        rr = rrule(DAILY, count=15, cache=True,
-                   dtstart=datetime(1997, 9, 2, 9, 0))
-        for x in rr: pass
-        self.assertEqual(list(rr),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 5, 9, 0),
-                          datetime(1997, 9, 6, 9, 0),
-                          datetime(1997, 9, 7, 9, 0),
-                          datetime(1997, 9, 8, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 10, 9, 0),
-                          datetime(1997, 9, 11, 9, 0),
-                          datetime(1997, 9, 12, 9, 0),
-                          datetime(1997, 9, 13, 9, 0),
-                          datetime(1997, 9, 14, 9, 0),
-                          datetime(1997, 9, 15, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testCachePostInternal(self):
-        rr = rrule(DAILY, count=15, cache=True,
-                   dtstart=datetime(1997, 9, 2, 9, 0))
-        for x in rr: pass
-        self.assertEqual(rr._cache,
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 3, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 5, 9, 0),
-                          datetime(1997, 9, 6, 9, 0),
-                          datetime(1997, 9, 7, 9, 0),
-                          datetime(1997, 9, 8, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 10, 9, 0),
-                          datetime(1997, 9, 11, 9, 0),
-                          datetime(1997, 9, 12, 9, 0),
-                          datetime(1997, 9, 13, 9, 0),
-                          datetime(1997, 9, 14, 9, 0),
-                          datetime(1997, 9, 15, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testCachePreContains(self):
-        rr = rrule(DAILY, count=3, cache=True,
-                   dtstart=datetime(1997, 9, 2, 9, 0))
-        self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True)
-
-    def testCachePostContains(self):
-        rr = rrule(DAILY, count=3, cache=True,
-                   dtstart=datetime(1997, 9, 2, 9, 0))
-        for x in rr: pass
-        self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True)
-
-    def testStr(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=3\n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0)])
-
-    def testStrType(self):
-        self.assertEqual(isinstance(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=3\n"
-                              ), rrule), True)
-
-    def testStrForceSetType(self):
-        self.assertEqual(isinstance(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=3\n"
-                              , forceset=True), rruleset), True)
-
-    def testStrSetType(self):
-        self.assertEqual(isinstance(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n"
-                              "RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n"
-                              ), rruleset), True)
-
-    def testStrCase(self):
-        self.assertEqual(list(rrulestr(
-                              "dtstart:19970902T090000\n"
-                              "rrule:freq=yearly;count=3\n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0)])
-
-    def testStrSpaces(self):
-        self.assertEqual(list(rrulestr(
-                              " DTSTART:19970902T090000 "
-                              " RRULE:FREQ=YEARLY;COUNT=3 "
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0)])
-
-    def testStrSpacesAndLines(self):
-        self.assertEqual(list(rrulestr(
-                              " DTSTART:19970902T090000 \n"
-                              " \n"
-                              " RRULE:FREQ=YEARLY;COUNT=3 \n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0)])
-
-    def testStrNoDTStart(self):
-        self.assertEqual(list(rrulestr(
-                              "RRULE:FREQ=YEARLY;COUNT=3\n"
-                              , dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0)])
-
-    def testStrValueOnly(self):
-        self.assertEqual(list(rrulestr(
-                              "FREQ=YEARLY;COUNT=3\n"
-                              , dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0)])
-
-    def testStrUnfold(self):
-        self.assertEqual(list(rrulestr(
-                              "FREQ=YEA\n RLY;COUNT=3\n", unfold=True,
-                              dtstart=datetime(1997, 9, 2, 9, 0))),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1998, 9, 2, 9, 0),
-                          datetime(1999, 9, 2, 9, 0)])
-
-    def testStrSet(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n"
-                              "RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testStrSetDate(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TU\n"
-                              "RDATE:19970904T090000\n"
-                              "RDATE:19970909T090000\n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testStrSetExRule(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=6;BYDAY=TU,TH\n"
-                              "EXRULE:FREQ=YEARLY;COUNT=3;BYDAY=TH\n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testStrSetExDate(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=6;BYDAY=TU,TH\n"
-                              "EXDATE:19970904T090000\n"
-                              "EXDATE:19970911T090000\n"
-                              "EXDATE:19970918T090000\n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testStrSetDateAndExDate(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RDATE:19970902T090000\n"
-                              "RDATE:19970904T090000\n"
-                              "RDATE:19970909T090000\n"
-                              "RDATE:19970911T090000\n"
-                              "RDATE:19970916T090000\n"
-                              "RDATE:19970918T090000\n"
-                              "EXDATE:19970904T090000\n"
-                              "EXDATE:19970911T090000\n"
-                              "EXDATE:19970918T090000\n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testStrSetDateAndExRule(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RDATE:19970902T090000\n"
-                              "RDATE:19970904T090000\n"
-                              "RDATE:19970909T090000\n"
-                              "RDATE:19970911T090000\n"
-                              "RDATE:19970916T090000\n"
-                              "RDATE:19970918T090000\n"
-                              "EXRULE:FREQ=YEARLY;COUNT=3;BYDAY=TH\n"
-                              )),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testStrKeywords(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=3;INTERVAL=3;"
-                                    "BYMONTH=3;BYWEEKDAY=TH;BYMONTHDAY=3;"
-                                    "BYHOUR=3;BYMINUTE=3;BYSECOND=3\n"
-                              )),
-                         [datetime(2033, 3, 3, 3, 3, 3),
-                          datetime(2039, 3, 3, 3, 3, 3),
-                          datetime(2072, 3, 3, 3, 3, 3)])
-
-    def testStrNWeekDay(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n"
-                              "RRULE:FREQ=YEARLY;COUNT=3;BYDAY=1TU,-1TH\n"
-                              )),
-                         [datetime(1997, 12, 25, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 12, 31, 9, 0)])
-
-    def testStrUntil(self):
-        self.assertEqual(list(rrulestr(
-                              "DTSTART:19970902T090000\n" 
-                              "RRULE:FREQ=YEARLY;"
-                              "UNTIL=19990101T000000;BYDAY=1TU,-1TH\n"
-                              )),
-                         [datetime(1997, 12, 25, 9, 0),
-                          datetime(1998, 1, 6, 9, 0),
-                          datetime(1998, 12, 31, 9, 0)])
-
-    def testStrInvalidUntil(self):
-        with self.assertRaises(ValueError):
-            list(rrulestr("DTSTART:19970902T090000\n"
-                          "RRULE:FREQ=YEARLY;"
-                          "UNTIL=TheCowsComeHome;BYDAY=1TU,-1TH\n"))
-
-    def testStrEmptyByDay(self):
-        with self.assertRaises(ValueError):
-            list(rrulestr("DTSTART:19970902T090000\n"
-                          "FREQ=WEEKLY;"
-                          "BYDAY=;"         # This part is invalid
-                          "WKST=SU"))
-
-    def testStrInvalidByDay(self):
-        with self.assertRaises(ValueError):
-            list(rrulestr("DTSTART:19970902T090000\n"
-                          "FREQ=WEEKLY;"
-                          "BYDAY=-1OK;"         # This part is invalid
-                          "WKST=SU"))
-
-    def testBadBySetPos(self):
-        self.assertRaises(ValueError,
-                          rrule, MONTHLY,
-                                 count=1,
-                                 bysetpos=0,
-                                 dtstart=datetime(1997, 9, 2, 9, 0))
-
-    def testBadBySetPosMany(self):
-        self.assertRaises(ValueError,
-                          rrule, MONTHLY,
-                                 count=1,
-                                 bysetpos=(-1, 0, 1),
-                                 dtstart=datetime(1997, 9, 2, 9, 0))
-
-    # Tests to ensure that str(rrule) works
-    def testToStrYearly(self):
-        rule = rrule(YEARLY, count=3, dtstart=datetime(1997, 9, 2, 9, 0))
-        self._rrulestr_reverse_test(rule)
-
-    def testToStrYearlyInterval(self):
-        rule = rrule(YEARLY, count=3, interval=2,
-                     dtstart=datetime(1997, 9, 2, 9, 0))
-        self._rrulestr_reverse_test(rule)
-
-    def testToStrYearlyByMonth(self):
-        rule = rrule(YEARLY, count=3, bymonth=(1, 3),
-                     dtstart=datetime(1997, 9, 2, 9, 0))
-
-        self._rrulestr_reverse_test(rule)
-
-    def testToStrYearlyByMonth(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                                          count=3,
-                                          bymonth=(1, 3),
-                                          dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                                          count=3,
-                                          bymonthday=(1, 3),
-                                          dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthAndMonthDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                                          count=3,
-                                          bymonth=(1, 3),
-                                          bymonthday=(5, 7),
-                                          dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByWeekDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                                          count=3,
-                                          byweekday=(TU, TH),
-                                          dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                                          count=3,
-                                          byweekday=(TU(1), TH(-1)),
-                                          dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByNWeekDayLarge(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byweekday=(TU(3), TH(-3)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthAndNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthAndNWeekDayLarge(self):
-        # This is interesting because the TH(-3) ends up before
-        # the TU(3).
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(3), TH(-3)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthAndMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByYearDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthAndYearDay(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMonthAndYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByWeekNo(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByWeekNoAndWeekDay(self):
-        # That's a nice one. The first days of week number one
-        # may be in the last year.
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByWeekNoAndWeekDayLarge(self):
-        # Another nice test. The last days of week number 52/53
-        # may be in the next year.
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByWeekNoAndWeekDayLast(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByEaster(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByEasterPos(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByEasterNeg(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByWeekNoAndWeekDay53(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByHour(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMinute(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyBySecond(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByHourAndMinute(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByHourAndSecond(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyByHourAndMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrYearlyBySetPos(self):
-        self._rrulestr_reverse_test(rrule(YEARLY,
-                              count=3,
-                              bymonthday=15,
-                              byhour=(6, 18),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthly(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyInterval(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyIntervalLarge(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              interval=18,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonth(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthAndMonthDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-        # Third Monday of the month
-        self.assertEqual(rrule(MONTHLY,
-                         byweekday=(MO(+3)),
-                         dtstart=datetime(1997, 9, 1)).between(datetime(1997,
-                                                                        9,
-                                                                        1),
-                                                               datetime(1997,
-                                                                        12,
-                                                                        1)),
-                         [datetime(1997, 9, 15, 0, 0),
-                          datetime(1997, 10, 20, 0, 0),
-                          datetime(1997, 11, 17, 0, 0)])
-
-    def testToStrMonthlyByNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByNWeekDayLarge(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byweekday=(TU(3), TH(-3)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthAndNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthAndNWeekDayLarge(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(3), TH(-3)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthAndMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByYearDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthAndYearDay(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMonthAndYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByWeekNo(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByWeekNoAndWeekDay(self):
-        # That's a nice one. The first days of week number one
-        # may be in the last year.
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByWeekNoAndWeekDayLarge(self):
-        # Another nice test. The last days of week number 52/53
-        # may be in the next year.
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByWeekNoAndWeekDayLast(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByWeekNoAndWeekDay53(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByEaster(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByEasterPos(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByEasterNeg(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByHour(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMinute(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyBySecond(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByHourAndMinute(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByHourAndSecond(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyByHourAndMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMonthlyBySetPos(self):
-        self._rrulestr_reverse_test(rrule(MONTHLY,
-                              count=3,
-                              bymonthday=(13, 17),
-                              byhour=(6, 18),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeekly(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyInterval(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyIntervalLarge(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              interval=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonth(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonthDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonthAndMonthDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByWeekDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonthAndWeekDay(self):
-        # This test is interesting, because it crosses the year
-        # boundary in a weekly period to find day '1' as a
-        # valid recurrence.
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonthAndNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonthAndMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByYearDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonthAndYearDay(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=4,
-                              bymonth=(1, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMonthAndYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=4,
-                              bymonth=(1, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByWeekNo(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByWeekNoAndWeekDay(self):
-        # That's a nice one. The first days of week number one
-        # may be in the last year.
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByWeekNoAndWeekDayLarge(self):
-        # Another nice test. The last days of week number 52/53
-        # may be in the next year.
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByWeekNoAndWeekDayLast(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByWeekNoAndWeekDay53(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByEaster(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByEasterPos(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByEasterNeg(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByHour(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMinute(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyBySecond(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByHourAndMinute(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByHourAndSecond(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyByHourAndMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrWeeklyBySetPos(self):
-        self._rrulestr_reverse_test(rrule(WEEKLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              byhour=(6, 18),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDaily(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyInterval(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyIntervalLarge(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              interval=92,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonth(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonthDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonthAndMonthDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByWeekDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonthAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonthAndNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonthAndMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByYearDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonthAndYearDay(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=4,
-                              bymonth=(1, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMonthAndYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=4,
-                              bymonth=(1, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByWeekNo(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByWeekNoAndWeekDay(self):
-        # That's a nice one. The first days of week number one
-        # may be in the last year.
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByWeekNoAndWeekDayLarge(self):
-        # Another nice test. The last days of week number 52/53
-        # may be in the next year.
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByWeekNoAndWeekDayLast(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByWeekNoAndWeekDay53(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByEaster(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByEasterPos(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByEasterNeg(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByHour(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMinute(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyBySecond(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByHourAndMinute(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByHourAndSecond(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyByHourAndMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrDailyBySetPos(self):
-        self._rrulestr_reverse_test(rrule(DAILY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(15, 45),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourly(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyInterval(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyIntervalLarge(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              interval=769,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonth(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonthDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonthAndMonthDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByWeekDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonthAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonthAndNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonthAndMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByYearDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonthAndYearDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMonthAndYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByWeekNo(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByWeekNoAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByWeekNoAndWeekDayLarge(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByWeekNoAndWeekDayLast(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByWeekNoAndWeekDay53(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByEaster(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByEasterPos(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByEasterNeg(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByHour(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMinute(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyBySecond(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByHourAndMinute(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByHourAndSecond(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyByHourAndMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrHourlyBySetPos(self):
-        self._rrulestr_reverse_test(rrule(HOURLY,
-                              count=3,
-                              byminute=(15, 45),
-                              bysecond=(15, 45),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutely(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyInterval(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyIntervalLarge(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              interval=1501,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonth(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonthDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonthAndMonthDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonthAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonthAndNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonthAndMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByYearDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonthAndYearDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMonthAndYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByWeekNo(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByWeekNoAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByWeekNoAndWeekDayLarge(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByWeekNoAndWeekDayLast(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByWeekNoAndWeekDay53(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByEaster(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByEasterPos(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByEasterNeg(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByHour(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMinute(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyBySecond(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByHourAndMinute(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByHourAndSecond(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyByHourAndMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrMinutelyBySetPos(self):
-        self._rrulestr_reverse_test(rrule(MINUTELY,
-                              count=3,
-                              bysecond=(15, 30, 45),
-                              bysetpos=(3, -3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondly(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyInterval(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              interval=2,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyIntervalLarge(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              interval=90061,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonth(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonthDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonthAndMonthDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(5, 7),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByWeekDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonthAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonthAndNWeekDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              byweekday=(TU(1), TH(-1)),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonthAndMonthDayAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bymonth=(1, 3),
-                              bymonthday=(1, 3),
-                              byweekday=(TU, TH),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByYearDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=4,
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=4,
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonthAndYearDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(1, 100, 200, 365),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMonthAndYearDayNeg(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=4,
-                              bymonth=(4, 7),
-                              byyearday=(-365, -266, -166, -1),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByWeekNo(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byweekno=20,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByWeekNoAndWeekDay(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byweekno=1,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByWeekNoAndWeekDayLarge(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byweekno=52,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByWeekNoAndWeekDayLast(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byweekno=-1,
-                              byweekday=SU,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByWeekNoAndWeekDay53(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byweekno=53,
-                              byweekday=MO,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByEaster(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byeaster=0,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByEasterPos(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byeaster=1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByEasterNeg(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byeaster=-1,
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByHour(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byhour=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMinute(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyBySecond(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByHourAndMinute(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByHourAndSecond(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byhour=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByHourAndMinuteAndSecond(self):
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              byhour=(6, 18),
-                              byminute=(6, 18),
-                              bysecond=(6, 18),
-                              dtstart=datetime(1997, 9, 2, 9, 0)))
-
-    def testToStrSecondlyByHourAndMinuteAndSecondBug(self):
-        # This explores a bug found by Mathieu Bridon.
-        self._rrulestr_reverse_test(rrule(SECONDLY,
-                              count=3,
-                              bysecond=(0,),
-                              byminute=(1,),
-                              dtstart=datetime(2010, 3, 22, 12, 1)))
-
-    def testToStrLongIntegers(self):
-        if not PY3:  # There is no longs in python3
-            self._rrulestr_reverse_test(rrule(MINUTELY,
-                                  count=long(2),
-                                  interval=long(2),
-                                  bymonth=long(2),
-                                  byweekday=long(3),
-                                  byhour=long(6),
-                                  byminute=long(6),
-                                  bysecond=long(6),
-                                  dtstart=datetime(1997, 9, 2, 9, 0)))
-            
-            self._rrulestr_reverse_test(rrule(YEARLY,
-                                  count=long(2),
-                                  bymonthday=long(5),
-                                  byweekno=long(2),
-                                  dtstart=datetime(1997, 9, 2, 9, 0)))
-
-
-class RRuleSetTest(unittest.TestCase):
-    def testSet(self):
-        rrset = rruleset()
-        rrset.rrule(rrule(YEARLY, count=2, byweekday=TU,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        rrset.rrule(rrule(YEARLY, count=1, byweekday=TH,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        self.assertEqual(list(rrset),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testSetDate(self):
-        rrset = rruleset()
-        rrset.rrule(rrule(YEARLY, count=1, byweekday=TU,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        rrset.rdate(datetime(1997, 9, 4, 9))
-        rrset.rdate(datetime(1997, 9, 9, 9))
-        self.assertEqual(list(rrset),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testSetExRule(self):
-        rrset = rruleset()
-        rrset.rrule(rrule(YEARLY, count=6, byweekday=(TU, TH),
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        rrset.exrule(rrule(YEARLY, count=3, byweekday=TH,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        self.assertEqual(list(rrset),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testSetExDate(self):
-        rrset = rruleset()
-        rrset.rrule(rrule(YEARLY, count=6, byweekday=(TU, TH),
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        rrset.exdate(datetime(1997, 9, 4, 9))
-        rrset.exdate(datetime(1997, 9, 11, 9))
-        rrset.exdate(datetime(1997, 9, 18, 9))
-        self.assertEqual(list(rrset),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testSetExDateRevOrder(self):
-        rrset = rruleset()
-        rrset.rrule(rrule(MONTHLY, count=5, bymonthday=10,
-                          dtstart=datetime(2004, 1, 1, 9, 0)))
-        rrset.exdate(datetime(2004, 4, 10, 9, 0))
-        rrset.exdate(datetime(2004, 2, 10, 9, 0))
-        self.assertEqual(list(rrset),
-                         [datetime(2004, 1, 10, 9, 0),
-                          datetime(2004, 3, 10, 9, 0),
-                          datetime(2004, 5, 10, 9, 0)])
-
-    def testSetDateAndExDate(self):
-        rrset = rruleset()
-        rrset.rdate(datetime(1997, 9, 2, 9))
-        rrset.rdate(datetime(1997, 9, 4, 9))
-        rrset.rdate(datetime(1997, 9, 9, 9))
-        rrset.rdate(datetime(1997, 9, 11, 9))
-        rrset.rdate(datetime(1997, 9, 16, 9))
-        rrset.rdate(datetime(1997, 9, 18, 9))
-        rrset.exdate(datetime(1997, 9, 4, 9))
-        rrset.exdate(datetime(1997, 9, 11, 9))
-        rrset.exdate(datetime(1997, 9, 18, 9))
-        self.assertEqual(list(rrset),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testSetDateAndExRule(self):
-        rrset = rruleset()
-        rrset.rdate(datetime(1997, 9, 2, 9))
-        rrset.rdate(datetime(1997, 9, 4, 9))
-        rrset.rdate(datetime(1997, 9, 9, 9))
-        rrset.rdate(datetime(1997, 9, 11, 9))
-        rrset.rdate(datetime(1997, 9, 16, 9))
-        rrset.rdate(datetime(1997, 9, 18, 9))
-        rrset.exrule(rrule(YEARLY, count=3, byweekday=TH,
-                           dtstart=datetime(1997, 9, 2, 9, 0)))
-        self.assertEqual(list(rrset),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 9, 9, 0),
-                          datetime(1997, 9, 16, 9, 0)])
-
-    def testSetCount(self):
-        rrset = rruleset()
-        rrset.rrule(rrule(YEARLY, count=6, byweekday=(TU, TH),
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        rrset.exrule(rrule(YEARLY, count=3, byweekday=TH,
-                           dtstart=datetime(1997, 9, 2, 9, 0)))
-        self.assertEqual(rrset.count(), 3)
-
-    def testSetCachePre(self):
-        rrset = rruleset()
-        rrset.rrule(rrule(YEARLY, count=2, byweekday=TU,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        rrset.rrule(rrule(YEARLY, count=1, byweekday=TH,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        self.assertEqual(list(rrset),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testSetCachePost(self):
-        rrset = rruleset(cache=True)
-        rrset.rrule(rrule(YEARLY, count=2, byweekday=TU,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        rrset.rrule(rrule(YEARLY, count=1, byweekday=TH,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        for x in rrset: pass
-        self.assertEqual(list(rrset),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testSetCachePostInternal(self):
-        rrset = rruleset(cache=True)
-        rrset.rrule(rrule(YEARLY, count=2, byweekday=TU,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        rrset.rrule(rrule(YEARLY, count=1, byweekday=TH,
-                          dtstart=datetime(1997, 9, 2, 9, 0)))
-        for x in rrset: pass
-        self.assertEqual(list(rrset._cache),
-                         [datetime(1997, 9, 2, 9, 0),
-                          datetime(1997, 9, 4, 9, 0),
-                          datetime(1997, 9, 9, 9, 0)])
-
-    def testSetRRuleCount(self):
-        # Test that the count is updated when an rrule is added
-        rrset = rruleset(cache=False)
-        for cache in (True, False):
-            rrset = rruleset(cache=cache)
-            rrset.rrule(rrule(YEARLY, count=2, byweekday=TH,
-                              dtstart=datetime(1983, 4, 1)))
-            rrset.rrule(rrule(WEEKLY, count=4, byweekday=FR,
-                              dtstart=datetime(1991, 6, 3)))
-
-            # Check the length twice - first one sets a cache, second reads it
-            self.assertEqual(rrset.count(), 6)
-            self.assertEqual(rrset.count(), 6)
-
-            # This should invalidate the cache and force an update
-            rrset.rrule(rrule(MONTHLY, count=3, dtstart=datetime(1994, 1, 3)))
-
-            self.assertEqual(rrset.count(), 9)
-            self.assertEqual(rrset.count(), 9)
-
-    def testSetRDateCount(self):
-        # Test that the count is updated when an rdate is added
-        rrset = rruleset(cache=False)
-        for cache in (True, False):
-            rrset = rruleset(cache=cache)
-            rrset.rrule(rrule(YEARLY, count=2, byweekday=TH,
-                              dtstart=datetime(1983, 4, 1)))
-            rrset.rrule(rrule(WEEKLY, count=4, byweekday=FR,
-                              dtstart=datetime(1991, 6, 3)))
-
-            # Check the length twice - first one sets a cache, second reads it
-            self.assertEqual(rrset.count(), 6)
-            self.assertEqual(rrset.count(), 6)
-
-            # This should invalidate the cache and force an update
-            rrset.rdate(datetime(1993, 2, 14))
-
-            self.assertEqual(rrset.count(), 7)
-            self.assertEqual(rrset.count(), 7)
-
-    def testSetExRuleCount(self):
-        # Test that the count is updated when an exrule is added
-        rrset = rruleset(cache=False)
-        for cache in (True, False):
-            rrset = rruleset(cache=cache)
-            rrset.rrule(rrule(YEARLY, count=2, byweekday=TH,
-                              dtstart=datetime(1983, 4, 1)))
-            rrset.rrule(rrule(WEEKLY, count=4, byweekday=FR,
-                              dtstart=datetime(1991, 6, 3)))
-
-            # Check the length twice - first one sets a cache, second reads it
-            self.assertEqual(rrset.count(), 6)
-            self.assertEqual(rrset.count(), 6)
-
-            # This should invalidate the cache and force an update
-            rrset.exrule(rrule(WEEKLY, count=2, interval=2,
-                               dtstart=datetime(1991, 6, 14)))
-
-            self.assertEqual(rrset.count(), 4)
-            self.assertEqual(rrset.count(), 4)
-
-    def testSetExDateCount(self):
-        # Test that the count is updated when an rdate is added
-        for cache in (True, False):
-            rrset = rruleset(cache=cache)
-            rrset.rrule(rrule(YEARLY, count=2, byweekday=TH,
-                              dtstart=datetime(1983, 4, 1)))
-            rrset.rrule(rrule(WEEKLY, count=4, byweekday=FR,
-                              dtstart=datetime(1991, 6, 3)))
-
-            # Check the length twice - first one sets a cache, second reads it
-            self.assertEqual(rrset.count(), 6)
-            self.assertEqual(rrset.count(), 6)
-
-            # This should invalidate the cache and force an update
-            rrset.exdate(datetime(1991, 6, 28))
-
-            self.assertEqual(rrset.count(), 5)
-            self.assertEqual(rrset.count(), 5)
-
-
-class WeekdayTest(unittest.TestCase):
-    def testInvalidNthWeekday(self):
-        with self.assertRaises(ValueError):
-            zeroth_friday = FR(0)
-
-    def testWeekdayCallable(self):
-        # Calling a weekday instance generates a new weekday instance with the
-        # value of n changed.
-        from dateutil.rrule import weekday
-        self.assertEqual(MO(1), weekday(0, 1))
-
-        # Calling a weekday instance with the identical n returns the original
-        # object
-        FR_3 = weekday(4, 3)
-        self.assertIs(FR_3(3), FR_3)
-
-    def testWeekdayEquality(self):
-        # Two weekday objects are not equal if they have different values for n
-        self.assertNotEqual(TH, TH(-1))
-        self.assertNotEqual(SA(3), SA(2))
-
-    def testWeekdayEqualitySubclass(self):
-        # Two weekday objects equal if their "weekday" and "n" attributes are
-        # available and the same
-        class BasicWeekday(object):
-            def __init__(self, weekday):
-                self.weekday = weekday
-
-        class BasicNWeekday(BasicWeekday):
-            def __init__(self, weekday, n=None):
-                super(BasicNWeekday, self).__init__(weekday)
-                self.n = n
-
-        MO_Basic = BasicWeekday(0)
-        
-        self.assertNotEqual(MO, MO_Basic)
-        self.assertNotEqual(MO(1), MO_Basic)
-
-        TU_BasicN = BasicNWeekday(1)
-
-        self.assertEqual(TU, TU_BasicN)
-        self.assertNotEqual(TU(3), TU_BasicN)
-
-        WE_Basic3 = BasicNWeekday(2, 3)
-        self.assertEqual(WE(3), WE_Basic3)
-        self.assertNotEqual(WE(2), WE_Basic3)
-
-    def testWeekdayReprNoN(self):
-        no_n_reprs = ('MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU')
-        no_n_wdays = (MO, TU, WE, TH, FR, SA, SU)
-
-        for repstr, wday in zip(no_n_reprs, no_n_wdays):
-            self.assertEqual(repr(wday), repstr)
-
-    def testWeekdayReprWithN(self):
-        with_n_reprs = ('WE(+1)', 'TH(-2)', 'SU(+3)')
-        with_n_wdays = (WE(1), TH(-2), SU(+3))
-
-        for repstr, wday in zip(with_n_reprs, with_n_wdays):
-            self.assertEqual(repr(wday), repstr)
-
diff --git a/lib/dateutil/test/test_tz.py b/lib/dateutil/test/test_tz.py
deleted file mode 100644
index 7a7ea78ed12a5f080b316ab93a2d1921a4fbd686..0000000000000000000000000000000000000000
--- a/lib/dateutil/test/test_tz.py
+++ /dev/null
@@ -1,525 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-from ._common import unittest, TZWinContext
-
-from datetime import datetime, timedelta
-from datetime import time as dt_time
-from six import BytesIO, StringIO
-
-import os
-import subprocess
-import sys
-import time as _time
-import base64
-IS_WIN = sys.platform.startswith('win')
-
-# dateutil imports
-from dateutil.relativedelta import relativedelta
-from dateutil.parser import parse
-from dateutil import tz as tz
-from dateutil import zoneinfo
-
-try:
-    from dateutil import tzwin
-except ImportError as e:
-    if IS_WIN:
-        raise e
-    else:
-        pass
-
-MISSING_TARBALL = ("This test fails if you don't have the dateutil "
-                   "timezone file installed. Please read the README")
-
-TZFILE_EST5EDT = b"""
-VFppZgAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAAAAAADrAAAABAAAABCeph5wn7rrYKCGAHCh
-ms1gomXicKOD6eCkaq5wpTWnYKZTyvCnFYlgqDOs8Kj+peCqE47wqt6H4KvzcPCsvmngrdNS8K6e
-S+CvszTwsH4t4LGcUXCyZ0pgs3wzcLRHLGC1XBVwticOYLc793C4BvBguRvZcLnm0mC7BPXwu8a0
-YLzk1/C9r9DgvsS58L+PsuDApJvwwW+U4MKEffDDT3bgxGRf8MUvWODGTXxwxw864MgtXnDI+Fdg
-yg1AcMrYOWDLiPBw0iP0cNJg++DTdeTw1EDd4NVVxvDWIL/g1zWo8NgAoeDZFYrw2eCD4Nr+p3Db
-wGXg3N6JcN2pgmDevmtw34lkYOCeTXDhaUZg4n4vcONJKGDkXhFw5Vcu4OZHLfDnNxDg6CcP8OkW
-8uDqBvHw6vbU4Ovm0/Ds1rbg7ca18O6/02Dvr9Jw8J+1YPGPtHDyf5dg82+WcPRfeWD1T3hw9j9b
-YPcvWnD4KHfg+Q88cPoIWeD6+Fjw++g74PzYOvD9yB3g/rgc8P+n/+AAl/7wAYfh4AJ34PADcP5g
-BGD9cAVQ4GAGQN9wBzDCYAeNGXAJEKRgCa2U8ArwhmAL4IVwDNmi4A3AZ3AOuYTgD6mD8BCZZuAR
-iWXwEnlI4BNpR/AUWSrgFUkp8BY5DOAXKQvwGCIpYBkI7fAaAgtgGvIKcBvh7WAc0exwHcHPYB6x
-znAfobFgIHYA8CGBk2AiVeLwI2qv4CQ1xPAlSpHgJhWm8Ccqc+An/sNwKQpV4CnepXAq6jfgK76H
-cCzTVGAtnmlwLrM2YC9+S3AwkxhgMWdn8DJy+mAzR0nwNFLcYDUnK/A2Mr5gNwcN8Dgb2uA45u/w
-Ofu84DrG0fA7257gPK/ucD27gOA+j9BwP5ti4EBvsnBBhH9gQk+UcENkYWBEL3ZwRURDYEYPWHBH
-JCVgR/h08EkEB2BJ2FbwSuPpYEu4OPBMzQXgTZga8E6s5+BPd/zwUIzJ4FFhGXBSbKvgU0D7cFRM
-jeBVIN1wVixv4FcAv3BYFYxgWOChcFn1bmBawINwW9VQYFypn/BdtTJgXomB8F+VFGBgaWPwYX4w
-4GJJRfBjXhLgZCkn8GU99OBmEkRwZx3W4GfyJnBo/bjgadIIcGrdmuBrsepwbMa3YG2RzHBupplg
-b3GucHCGe2BxWsrwcmZdYHM6rPB0Rj9gdRqO8HYvW+B2+nDweA894HjaUvB57x/gero08HvPAeB8
-o1Fwfa7j4H6DM3B/jsXgAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB
-AAEAAQABAgMBAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB
-AAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA
-AQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB
-AAEAAQABAAEAAQABAAEAAQABAAEAAf//x8ABAP//ubAABP//x8ABCP//x8ABDEVEVABFU1QARVdU
-AEVQVAAAAAABAAAAAQ==
-"""
-
-EUROPE_HELSINKI = b"""
-VFppZgAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABQAAAAAAAAB1AAAABQAAAA2kc28Yy85RYMy/hdAV
-I+uQFhPckBcDzZAX876QGOOvkBnToJAaw5GQG7y9EBysrhAdnJ8QHoyQEB98gRAgbHIQIVxjECJM
-VBAjPEUQJCw2ECUcJxAmDBgQJwVDkCf1NJAo5SWQKdUWkCrFB5ArtPiQLKTpkC2U2pAuhMuQL3S8
-kDBkrZAxXdkQMnK0EDM9uxA0UpYQNR2dEDYyeBA2/X8QOBuUkDjdYRA5+3aQOr1DEDvbWJA8pl+Q
-Pbs6kD6GQZA/mxyQQGYjkEGEORBCRgWQQ2QbEEQl55BFQ/0QRgXJkEcj3xBH7uYQSQPBEEnOyBBK
-46MQS66qEEzMv5BNjowQTqyhkE9ubhBQjIOQUVeKkFJsZZBTN2yQVExHkFUXTpBWLCmQVvcwkFgV
-RhBY1xKQWfUoEFq29JBb1QoQXKAREF207BBef/MQX5TOEGBf1RBhfeqQYj+3EGNdzJBkH5kQZT2u
-kGYItZBnHZCQZ+iXkGj9cpBpyHmQat1UkGuoW5BsxnEQbYg9kG6mUxBvaB+QcIY1EHFRPBByZhcQ
-czEeEHRF+RB1EQAQdi8VkHbw4hB4DveQeNDEEHnu2ZB6sKYQe867kHyZwpB9rp2QfnmkkH+Of5AC
-AQIDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQD
-BAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAME
-AwQAABdoAAAAACowAQQAABwgAAkAACowAQQAABwgAAlITVQARUVTVABFRVQAAAAAAQEAAAABAQ==
-"""
-
-NEW_YORK = b"""
-VFppZgAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAABcAAADrAAAABAAAABCeph5wn7rrYKCGAHCh
-ms1gomXicKOD6eCkaq5wpTWnYKZTyvCnFYlgqDOs8Kj+peCqE47wqt6H4KvzcPCsvmngrdNS8K6e
-S+CvszTwsH4t4LGcUXCyZ0pgs3wzcLRHLGC1XBVwticOYLc793C4BvBguRvZcLnm0mC7BPXwu8a0
-YLzk1/C9r9DgvsS58L+PsuDApJvwwW+U4MKEffDDT3bgxGRf8MUvWODGTXxwxw864MgtXnDI+Fdg
-yg1AcMrYOWDLiPBw0iP0cNJg++DTdeTw1EDd4NVVxvDWIL/g1zWo8NgAoeDZFYrw2eCD4Nr+p3Db
-wGXg3N6JcN2pgmDevmtw34lkYOCeTXDhaUZg4n4vcONJKGDkXhFw5Vcu4OZHLfDnNxDg6CcP8OkW
-8uDqBvHw6vbU4Ovm0/Ds1rbg7ca18O6/02Dvr9Jw8J+1YPGPtHDyf5dg82+WcPRfeWD1T3hw9j9b
-YPcvWnD4KHfg+Q88cPoIWeD6+Fjw++g74PzYOvD9yB3g/rgc8P+n/+AAl/7wAYfh4AJ34PADcP5g
-BGD9cAVQ4GEGQN9yBzDCYgeNGXMJEKRjCa2U9ArwhmQL4IV1DNmi5Q3AZ3YOuYTmD6mD9xCZZucR
-iWX4EnlI6BNpR/kUWSrpFUkp+RY5DOoXKQv6GCIpaxkI7fsaAgtsGvIKfBvh7Wwc0ex8HcHPbR6x
-zn0fobFtIHYA/SGBk20iVeL+I2qv7iQ1xP4lSpHuJhWm/ycqc+8n/sOAKQpV8CnepYAq6jfxK76H
-gSzTVHItnmmCLrM2cy9+S4MwkxhzMWdoBDJy+nQzR0oENFLcdTUnLAU2Mr51NwcOBjgb2vY45vAG
-Ofu89jrG0gY72572PK/uhj27gPY+j9CGP5ti9kBvsoZBhH92Qk+UhkNkYXZEL3aHRURDd0XzqQdH
-LV/3R9OLB0kNQfdJs20HSu0j90uciYdM1kB3TXxrh062IndPXE2HUJYEd1E8L4dSdeZ3UxwRh1RV
-yHdU+/OHVjWqd1blEAdYHsb3WMTyB1n+qPdapNQHW96K91yEtgddvmz3XmSYB1+eTvdgTbSHYYdr
-d2ItlodjZ013ZA14h2VHL3dl7VqHZycRd2fNPIdpBvN3aa0eh2rm1XdrljsHbM/x9212HQdur9P3
-b1X/B3CPtfdxNeEHcm+X93MVwwd0T3n3dP7fh3Y4lnd23sGHeBh4d3i+o4d5+Fp3ep6Fh3vYPHd8
-fmeHfbged35eSYd/mAB3AAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB
-AAEAAQABAgMBAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB
-AAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA
-AQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB
-AAEAAQABAAEAAQABAAEAAQABAAEAAf//x8ABAP//ubAABP//x8ABCP//x8ABDEVEVABFU1QARVdU
-AEVQVAAEslgAAAAAAQWk7AEAAAACB4YfggAAAAMJZ1MDAAAABAtIhoQAAAAFDSsLhQAAAAYPDD8G
-AAAABxDtcocAAAAIEs6mCAAAAAkVn8qJAAAACheA/goAAAALGWIxiwAAAAwdJeoMAAAADSHa5Q0A
-AAAOJZ6djgAAAA8nf9EPAAAAECpQ9ZAAAAARLDIpEQAAABIuE1ySAAAAEzDnJBMAAAAUM7hIlAAA
-ABU2jBAVAAAAFkO3G5YAAAAXAAAAAQAAAAE=
-"""
-
-TZICAL_EST5EDT = """
-BEGIN:VTIMEZONE
-TZID:US-Eastern
-LAST-MODIFIED:19870101T000000Z
-TZURL:http://zones.stds_r_us.net/tz/US-Eastern
-BEGIN:STANDARD
-DTSTART:19671029T020000
-RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
-TZOFFSETFROM:-0400
-TZOFFSETTO:-0500
-TZNAME:EST
-END:STANDARD
-BEGIN:DAYLIGHT
-DTSTART:19870405T020000
-RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
-TZOFFSETFROM:-0500
-TZOFFSETTO:-0400
-TZNAME:EDT
-END:DAYLIGHT
-END:VTIMEZONE
-"""
-
-
-class TZTest(unittest.TestCase):
-    def testStrStart1(self):
-        self.assertEqual(datetime(2003, 4, 6, 1, 59,
-                                  tzinfo=tz.tzstr("EST5EDT")).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00,
-                                  tzinfo=tz.tzstr("EST5EDT")).tzname(), "EDT")
-
-    def testStrEnd1(self):
-        self.assertEqual(datetime(2003, 10, 26, 0, 59,
-                                  tzinfo=tz.tzstr("EST5EDT")).tzname(), "EDT")
-        self.assertEqual(datetime(2003, 10, 26, 1, 00,
-                                  tzinfo=tz.tzstr("EST5EDT")).tzname(), "EST")
-
-    def testStrStart2(self):
-        s = "EST5EDT,4,0,6,7200,10,0,26,7200,3600"
-        self.assertEqual(datetime(2003, 4, 6, 1, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-
-    def testStrEnd2(self):
-        s = "EST5EDT,4,0,6,7200,10,0,26,7200,3600"
-        self.assertEqual(datetime(2003, 10, 26, 0, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-        self.assertEqual(datetime(2003, 10, 26, 1, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-
-    def testStrStart3(self):
-        s = "EST5EDT,4,1,0,7200,10,-1,0,7200,3600"
-        self.assertEqual(datetime(2003, 4, 6, 1, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-
-    def testStrEnd3(self):
-        s = "EST5EDT,4,1,0,7200,10,-1,0,7200,3600"
-        self.assertEqual(datetime(2003, 10, 26, 0, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-        self.assertEqual(datetime(2003, 10, 26, 1, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-
-    def testStrStart4(self):
-        s = "EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00"
-        self.assertEqual(datetime(2003, 4, 6, 1, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-
-    def testStrEnd4(self):
-        s = "EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00"
-        self.assertEqual(datetime(2003, 10, 26, 0, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-        self.assertEqual(datetime(2003, 10, 26, 1, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-
-    def testStrStart5(self):
-        s = "EST5EDT4,95/02:00:00,298/02:00"
-        self.assertEqual(datetime(2003, 4, 6, 1, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-
-    def testStrEnd5(self):
-        s = "EST5EDT4,95/02:00:00,298/02"
-        self.assertEqual(datetime(2003, 10, 26, 0, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-        self.assertEqual(datetime(2003, 10, 26, 1, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-
-    def testStrStart6(self):
-        s = "EST5EDT4,J96/02:00:00,J299/02:00"
-        self.assertEqual(datetime(2003, 4, 6, 1, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-
-    def testStrEnd6(self):
-        s = "EST5EDT4,J96/02:00:00,J299/02"
-        self.assertEqual(datetime(2003, 10, 26, 0, 59,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EDT")
-        self.assertEqual(datetime(2003, 10, 26, 1, 00,
-                                  tzinfo=tz.tzstr(s)).tzname(), "EST")
-
-    def testStrStr(self):
-        # Test that tz.tzstr() won't throw an error if given a str instead
-        # of a unicode literal.
-        self.assertEqual(datetime(2003, 4, 6, 1, 59,
-                                  tzinfo=tz.tzstr(str("EST5EDT"))).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00,
-                                  tzinfo=tz.tzstr(str("EST5EDT"))).tzname(), "EDT")
-
-    def testStrCmp1(self):
-        self.assertEqual(tz.tzstr("EST5EDT"),
-                         tz.tzstr("EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00"))
-
-    def testStrCmp2(self):
-        self.assertEqual(tz.tzstr("EST5EDT"),
-                         tz.tzstr("EST5EDT,4,1,0,7200,10,-1,0,7200,3600"))
-
-    def testRangeCmp1(self):
-        from dateutil.relativedelta import SU
-        self.assertEqual(tz.tzstr("EST5EDT"),
-                         tz.tzrange("EST", -18000, "EDT", -14400,
-                                 relativedelta(hours=+2,
-                                               month=4, day=1,
-                                               weekday=SU(+1)),
-                                 relativedelta(hours=+1,
-                                               month=10, day=31,
-                                               weekday=SU(-1))))
-
-    def testRangeCmp2(self):
-        self.assertEqual(tz.tzstr("EST5EDT"),
-                         tz.tzrange("EST", -18000, "EDT"))
-
-    def testFileStart1(self):
-        tzc = tz.tzfile(BytesIO(base64.b64decode(TZFILE_EST5EDT)))
-        self.assertEqual(datetime(2003, 4, 6, 1, 59, tzinfo=tzc).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00, tzinfo=tzc).tzname(), "EDT")
-
-    def testFileEnd1(self):
-        tzc = tz.tzfile(BytesIO(base64.b64decode(TZFILE_EST5EDT)))
-        self.assertEqual(datetime(2003, 10, 26, 0, 59, tzinfo=tzc).tzname(),
-                         "EDT")
-        self.assertEqual(datetime(2003, 10, 26, 1, 00, tzinfo=tzc).tzname(),
-                         "EST")
-
-    def testZoneInfoFileStart1(self):
-        tz = zoneinfo.gettz("EST5EDT")
-        self.assertEqual(datetime(2003, 4, 6, 1, 59, tzinfo=tz).tzname(), "EST",
-                         MISSING_TARBALL)
-        self.assertEqual(datetime(2003, 4, 6, 2, 00, tzinfo=tz).tzname(), "EDT")
-
-    def testZoneInfoFileEnd1(self):
-        tzc = zoneinfo.gettz("EST5EDT")
-        self.assertEqual(datetime(2003, 10, 26, 0, 59, tzinfo=tzc).tzname(),
-                         "EDT", MISSING_TARBALL)
-        self.assertEqual(datetime(2003, 10, 26, 1, 00, tzinfo=tzc).tzname(),
-                         "EST")
-
-    def testZoneInfoOffsetSignal(self):
-        utc = zoneinfo.gettz("UTC")
-        nyc = zoneinfo.gettz("America/New_York")
-        self.assertNotEqual(utc, None, MISSING_TARBALL)
-        self.assertNotEqual(nyc, None)
-        t0 = datetime(2007, 11, 4, 0, 30, tzinfo=nyc)
-        t1 = t0.astimezone(utc)
-        t2 = t1.astimezone(nyc)
-        self.assertEqual(t0, t2)
-        self.assertEqual(nyc.dst(t0), timedelta(hours=1))
-
-    def testTzNameNone(self):
-        gmt5 = tz.tzoffset(None, -18000)       # -5:00
-        self.assertIs(datetime(2003, 10, 26, 0, 0, tzinfo=gmt5).tzname(),
-                      None)
-
-    def testICalStart1(self):
-        tzc = tz.tzical(StringIO(TZICAL_EST5EDT)).get()
-        self.assertEqual(datetime(2003, 4, 6, 1, 59, tzinfo=tzc).tzname(), "EST")
-        self.assertEqual(datetime(2003, 4, 6, 2, 00, tzinfo=tzc).tzname(), "EDT")
-
-    def testICalEnd1(self):
-        tzc = tz.tzical(StringIO(TZICAL_EST5EDT)).get()
-        self.assertEqual(datetime(2003, 10, 26, 0, 59, tzinfo=tzc).tzname(), "EDT")
-        self.assertEqual(datetime(2003, 10, 26, 1, 00, tzinfo=tzc).tzname(), "EST")
-
-    def testRoundNonFullMinutes(self):
-        # This timezone has an offset of 5992 seconds in 1900-01-01.
-        tzc = tz.tzfile(BytesIO(base64.b64decode(EUROPE_HELSINKI)))
-        self.assertEqual(str(datetime(1900, 1, 1, 0, 0, tzinfo=tzc)),
-                             "1900-01-01 00:00:00+01:40")
-
-    def testLeapCountDecodesProperly(self):
-        # This timezone has leapcnt, and failed to decode until
-        # Eugene Oden notified about the issue.
-        tzc = tz.tzfile(BytesIO(base64.b64decode(NEW_YORK)))
-        self.assertEqual(datetime(2007, 3, 31, 20, 12).tzname(), None)  # What is the point of this?
-
-    def testGettz(self):
-        # bug 892569
-        str(tz.gettz('UTC'))
-
-    def testGetTzEquality(self):
-        self.assertEqual(tz.gettz('UTC'), tz.gettz('UTC'))
-
-    def testBrokenIsDstHandling(self):
-        # tzrange._isdst() was using a date() rather than a datetime().
-        # Issue reported by Lennart Regebro.
-        dt = datetime(2007, 8, 6, 4, 10, tzinfo=tz.tzutc())
-        self.assertEqual(dt.astimezone(tz=tz.gettz("GMT+2")),
-                          datetime(2007, 8, 6, 6, 10, tzinfo=tz.tzstr("GMT+2")))
-
-    def testGMTHasNoDaylight(self):
-        # tz.tzstr("GMT+2") improperly considered daylight saving time.
-        # Issue reported by Lennart Regebro.
-        dt = datetime(2007, 8, 6, 4, 10)
-        self.assertEqual(tz.gettz("GMT+2").dst(dt), timedelta(0))
-
-    def testGMTOffset(self):
-        # GMT and UTC offsets have inverted signal when compared to the
-        # usual TZ variable handling.
-        dt = datetime(2007, 8, 6, 4, 10, tzinfo=tz.tzutc())
-        self.assertEqual(dt.astimezone(tz=tz.tzstr("GMT+2")),
-                          datetime(2007, 8, 6, 6, 10, tzinfo=tz.tzstr("GMT+2")))
-        self.assertEqual(dt.astimezone(tz=tz.gettz("UTC-2")),
-                          datetime(2007, 8, 6, 2, 10, tzinfo=tz.tzstr("UTC-2")))
-
-    def testTimeOnlyUTC(self):
-        # https://github.com/dateutil/dateutil/issues/132
-        # tzutc doesn't care
-        tz_utc = tz.tzutc()
-        self.assertEqual(dt_time(13, 20, tzinfo=tz_utc).utcoffset(),
-                         timedelta(0))
-
-    def testTimeOnlyOffset(self):
-        # tzoffset doesn't care
-        tz_offset = tz.tzoffset('+3', 3600)
-        self.assertEqual(dt_time(13, 20, tzinfo=tz_offset).utcoffset(),
-                         timedelta(seconds=3600))
-
-    def testTimeOnlyLocal(self):
-        # tzlocal returns None
-        tz_local = tz.tzlocal()
-        self.assertIs(dt_time(13, 20, tzinfo=tz_local).utcoffset(), None)
-
-    def testTimeOnlyRange(self):
-        # tzrange returns None
-        tz_range = tz.tzrange('dflt')
-        self.assertIs(dt_time(13, 20, tzinfo=tz_range).utcoffset(), None)
-
-    def testTimeOnlyGettz(self):
-        # gettz returns None
-        tz_get = tz.gettz('Europe/Minsk')
-        self.assertIs(dt_time(13, 20, tzinfo=tz_get).utcoffset(), None)
-
-    @unittest.skipIf(IS_WIN, "requires Unix")
-    def testTZSetDoesntCorrupt(self):
-        # if we start in non-UTC then tzset UTC make sure parse doesn't get
-        # confused
-        os.environ['TZ'] = 'UTC'
-        _time.tzset()
-        # this should parse to UTC timezone not the original timezone
-        dt = parse('2014-07-20T12:34:56+00:00')
-        self.assertEqual(str(dt), '2014-07-20 12:34:56+00:00')
-
-@unittest.skipUnless(IS_WIN, "Requires Windows")
-class TzWinTest(unittest.TestCase):
-    def testTzResLoadName(self):
-        # This may not work right on non-US locales.
-        tzr = tzwin.tzres()
-        self.assertEqual(tzr.load_name(112), "Eastern Standard Time")
-
-    def testTzResNameFromString(self):
-        tzr = tzwin.tzres()
-        self.assertEqual(tzr.name_from_string('@tzres.dll,-221'),
-                         'Alaskan Daylight Time')
-
-        self.assertEqual(tzr.name_from_string('Samoa Daylight Time'),
-                         'Samoa Daylight Time')
-
-        with self.assertRaises(ValueError):
-            tzr.name_from_string('@tzres.dll,100')
-
-    def testIsdstZoneWithNoDaylightSaving(self):
-        tz = tzwin.tzwin("UTC")
-        dt = parse("2013-03-06 19:08:15")
-        self.assertFalse(tz._isdst(dt))
-
-    def testOffset(self):
-        tz = tzwin.tzwin("Cape Verde Standard Time")
-        self.assertEqual(tz.utcoffset(datetime(1995, 5, 21, 12, 9, 13)),
-                         timedelta(-1, 82800))
-
-    def testLocal(self):
-        # Not sure how to pin a local time zone, so for now we're just going
-        # to run this and make sure it doesn't raise an error
-        # See Github Issue #135: https://github.com/dateutil/dateutil/issues/135
-        datetime.now(tzwin.tzwinlocal())
-
-        datetime(2014, 3, 11, tzinfo=tzwin.tzwinlocal()).utcoffset()
-
-    def testTzwinName(self):
-        # https://github.com/dateutil/dateutil/issues/143
-        tw = tz.tzwin('Eastern Standard Time')
-
-        # Cover the transitions for at least two years.
-        ESTs = 'Eastern Standard Time'
-        EDTs = 'Eastern Daylight Time'
-        transition_dates = [(datetime(2015, 3, 8, 0, 59), ESTs),
-                            (datetime(2015, 3, 8, 2, 1), EDTs),
-                            (datetime(2015, 11, 1, 1, 59), EDTs),
-                            (datetime(2015, 11, 1, 3, 1), ESTs),
-                            (datetime(2016, 3, 13, 0, 59), ESTs),
-                            (datetime(2016, 3, 13, 2, 1), EDTs),
-                            (datetime(2016, 11, 6, 1, 59), EDTs),
-                            (datetime(2016, 11, 6, 3, 1), ESTs)]
-
-        for t_date, expected in transition_dates:
-            self.assertEqual(t_date.replace(tzinfo=tw).tzname(), expected)
-
-    def testTzwinRepr(self):
-        tw = tz.tzwin('Yakutsk Standard Time')
-        self.assertEqual(repr(tw), 'tzwin(' +
-                                   repr('Yakutsk Standard Time') + ')')
-
-    def testTzWinEquality(self):
-        # https://github.com/dateutil/dateutil/issues/151
-        tzwin_names = ('Eastern Standard Time',
-                       'West Pacific Standard Time',
-                       'Yakutsk Standard Time',
-                       'Iran Standard Time',
-                       'UTC')
-
-        for tzwin_name in tzwin_names:
-            # Get two different instances to compare
-            tw1 = tz.tzwin(tzwin_name)
-            tw2 = tz.tzwin(tzwin_name)
-
-            self.assertEqual(tw1, tw2)
-
-    def testTzWinInequality(self):
-        # https://github.com/dateutil/dateutil/issues/151
-        # Note these last two currently differ only in their name.
-        tzwin_names = (('Eastern Standard Time', 'Yakutsk Standard Time'),
-                       ('Greenwich Standard Time', 'GMT Standard Time'),
-                       ('GMT Standard Time', 'UTC'),
-                       ('E. South America Standard Time',
-                        'Argentina Standard Time'))
-
-        for tzwn1, tzwn2 in tzwin_names:
-            # Get two different instances to compare            
-            tw1 = tz.tzwin(tzwn1)
-            tw2 = tz.tzwin(tzwn2)
-
-            self.assertNotEqual(tw1, tw2)
-
-    @unittest.skipUnless(TZWinContext.tz_change_allowed(),
-        'Skipping unless tz changes are allowed.')
-    def testTzwinLocalName(self):
-        # https://github.com/dateutil/dateutil/issues/143
-        ESTs = 'Eastern Standard Time'
-        EDTs = 'Eastern Daylight Time'
-        transition_dates = [(datetime(2015, 3, 8, 0, 59), ESTs),
-                            (datetime(2015, 3, 8, 2, 1), EDTs),
-                            (datetime(2015, 11, 1, 1, 59), EDTs),
-                            (datetime(2015, 11, 1, 3, 1), ESTs),
-                            (datetime(2016, 3, 13, 0, 59), ESTs),
-                            (datetime(2016, 3, 13, 2, 1), EDTs),
-                            (datetime(2016, 11, 6, 1, 59), EDTs),
-                            (datetime(2016, 11, 6, 3, 1), ESTs)]
-
-        with TZWinContext('Eastern Standard Time'):
-            tw = tz.tzwinlocal()
-
-            for t_date, expected in transition_dates:
-                self.assertEqual(t_date.replace(tzinfo=tw).tzname(), expected)
-
-    def testTzWinLocalRepr(self):
-        tw = tz.tzwinlocal()
-        self.assertEqual(repr(tw), 'tzwinlocal()')
-
-    @unittest.skipUnless(TZWinContext.tz_change_allowed(),
-        'Skipping unless tz changes are allowed.')
-    def testTzwinLocalRepr(self):
-        # https://github.com/dateutil/dateutil/issues/143
-        with TZWinContext('Eastern Standard Time'):
-            tw = tz.tzwinlocal()
-
-            self.assertEqual(str(tw), 'tzwinlocal(' +
-                                      repr('Eastern Standard Time') + ')')
-
-        with TZWinContext('Pacific Standard Time'):
-            tw = tz.tzwinlocal()
-
-            self.assertEqual(str(tw), 'tzwinlocal(' + 
-                                      repr('Pacific Standard Time') + ')')
-
-    @unittest.skipUnless(TZWinContext.tz_change_allowed(),
-        'Skipping unless tz changes are allowed.')
-    def testTzwinLocalEquality(self):
-        tw_est = tz.tzwin('Eastern Standard Time')
-        tw_pst = tz.tzwin('Pacific Standard Time')
-
-        with TZWinContext('Eastern Standard Time'):
-            twl1 = tz.tzwinlocal()
-            twl2 = tz.tzwinlocal()
-
-            self.assertEqual(twl1, twl2)
-            self.assertEqual(twl1, tw_est)
-            self.assertNotEqual(twl1, tw_pst)
-
-        with TZWinContext('Pacific Standard Time'):
-            twl1 = tz.tzwinlocal()
-            twl2 = tz.tzwinlocal()
-            tw = tz.tzwin('Pacific Standard Time')
-
-            self.assertEqual(twl1, twl2)
-            self.assertEqual(twl1, tw)
-            self.assertEqual(twl1, tw_pst)
-            self.assertNotEqual(twl1, tw_est)
-
diff --git a/lib/dateutil/tz/_common.py b/lib/dateutil/tz/_common.py
index bbce0fb998616d19e2df96b026764b892e67380d..212e8ce95abebd9e7b090083a08067f7a435dcd9 100644
--- a/lib/dateutil/tz/_common.py
+++ b/lib/dateutil/tz/_common.py
@@ -1,6 +1,13 @@
 from six import PY3
+from six.moves import _thread
+
+from datetime import datetime, timedelta, tzinfo
+import copy
+
+ZERO = timedelta(0)
+
+__all__ = ['tzname_in_python2', 'enfold']
 
-__all__ = ['tzname_in_python2']
 
 def tzname_in_python2(namefunc):
     """Change unicode output into bytestrings in Python 2
@@ -15,4 +22,359 @@ def tzname_in_python2(namefunc):
 
         return name
 
-    return adjust_encoding
\ No newline at end of file
+    return adjust_encoding
+
+
+# The following is adapted from Alexander Belopolsky's tz library
+# https://github.com/abalkin/tz
+if hasattr(datetime, 'fold'):
+    # This is the pre-python 3.6 fold situation
+    def enfold(dt, fold=1):
+        """
+        Provides a unified interface for assigning the ``fold`` attribute to
+        datetimes both before and after the implementation of PEP-495.
+
+        :param fold:
+            The value for the ``fold`` attribute in the returned datetime. This
+            should be either 0 or 1.
+
+        :return:
+            Returns an object for which ``getattr(dt, 'fold', 0)`` returns
+            ``fold`` for all versions of Python. In versions prior to
+            Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
+            subclass of :py:class:`datetime.datetime` with the ``fold``
+            attribute added, if ``fold`` is 1.
+
+        ..versionadded:: 2.6.0
+        """
+        return dt.replace(fold=fold)
+
+else:
+    class _DatetimeWithFold(datetime):
+        """
+        This is a class designed to provide a PEP 495-compliant interface for
+        Python versions before 3.6. It is used only for dates in a fold, so
+        the ``fold`` attribute is fixed at ``1``.
+
+        ..versionadded:: 2.6.0
+        """
+        __slots__ = ()
+
+        @property
+        def fold(self):
+            return 1
+
+    def enfold(dt, fold=1):
+        """
+        Provides a unified interface for assigning the ``fold`` attribute to
+        datetimes both before and after the implementation of PEP-495.
+
+        :param fold:
+            The value for the ``fold`` attribute in the returned datetime. This
+            should be either 0 or 1.
+
+        :return:
+            Returns an object for which ``getattr(dt, 'fold', 0)`` returns
+            ``fold`` for all versions of Python. In versions prior to
+            Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
+            subclass of :py:class:`datetime.datetime` with the ``fold``
+            attribute added, if ``fold`` is 1.
+
+        ..versionadded:: 2.6.0
+        """
+        if getattr(dt, 'fold', 0) == fold:
+            return dt
+
+        args = dt.timetuple()[:6]
+        args += (dt.microsecond, dt.tzinfo)
+
+        if fold:
+            return _DatetimeWithFold(*args)
+        else:
+            return datetime(*args)
+
+
+class _tzinfo(tzinfo):
+    """
+    Base class for all ``dateutil`` ``tzinfo`` objects.
+    """
+
+    def is_ambiguous(self, dt):
+        """
+        Whether or not the "wall time" of a given datetime is ambiguous in this
+        zone.
+
+        :param dt:
+            A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+        :return:
+            Returns ``True`` if ambiguous, ``False`` otherwise.
+
+        ..versionadded:: 2.6.0
+        """
+
+        dt = dt.replace(tzinfo=self)
+
+        wall_0 = enfold(dt, fold=0)
+        wall_1 = enfold(dt, fold=1)
+
+        same_offset = wall_0.utcoffset() == wall_1.utcoffset()
+        same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
+        
+        return same_dt and not same_offset
+
+    def _fold_status(self, dt_utc, dt_wall):
+        """
+        Determine the fold status of a "wall" datetime, given a representation
+        of the same datetime as a (naive) UTC datetime. This is calculated based
+        on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
+        datetimes, and that this offset is the actual number of hours separating
+        ``dt_utc`` and ``dt_wall``.
+
+        :param dt_utc:
+            Representation of the datetime as UTC
+
+        :param dt_wall:
+            Representation of the datetime as "wall time". This parameter must
+            either have a `fold` attribute or have a fold-naive
+            :class:`datetime.tzinfo` attached, otherwise the calculation may
+            fail.
+        """
+        if self.is_ambiguous(dt_wall):
+            delta_wall = dt_wall - dt_utc
+            _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
+        else:
+            _fold = 0
+
+        return _fold
+
+    def _fold(self, dt):
+        return getattr(dt, 'fold', 0)
+
+    def _fromutc(self, dt):
+        """
+        Given a timezone-aware datetime in a given timezone, calculates a
+        timezone-aware datetime in a new timezone.
+
+        Since this is the one time that we *know* we have an unambiguous
+        datetime object, we take this opportunity to determine whether the
+        datetime is ambiguous and in a "fold" state (e.g. if it's the first
+        occurence, chronologically, of the ambiguous datetime).
+
+        :param dt:
+            A timezone-aware :class:`datetime.dateime` object.
+        """
+
+        # Re-implement the algorithm from Python's datetime.py
+        if not isinstance(dt, datetime):
+            raise TypeError("fromutc() requires a datetime argument")
+        if dt.tzinfo is not self:
+            raise ValueError("dt.tzinfo is not self")
+
+        dtoff = dt.utcoffset()
+        if dtoff is None:
+            raise ValueError("fromutc() requires a non-None utcoffset() "
+                             "result")
+
+        # The original datetime.py code assumes that `dst()` defaults to
+        # zero during ambiguous times. PEP 495 inverts this presumption, so
+        # for pre-PEP 495 versions of python, we need to tweak the algorithm.
+        dtdst = dt.dst()
+        if dtdst is None:
+            raise ValueError("fromutc() requires a non-None dst() result")
+        delta = dtoff - dtdst
+        if delta:
+            dt += delta
+            # Set fold=1 so we can default to being in the fold for
+            # ambiguous dates.
+            dtdst = enfold(dt, fold=1).dst()
+            if dtdst is None:
+                raise ValueError("fromutc(): dt.dst gave inconsistent "
+                                 "results; cannot convert")
+        return dt + dtdst
+
+    def fromutc(self, dt):
+        """
+        Given a timezone-aware datetime in a given timezone, calculates a
+        timezone-aware datetime in a new timezone.
+
+        Since this is the one time that we *know* we have an unambiguous
+        datetime object, we take this opportunity to determine whether the
+        datetime is ambiguous and in a "fold" state (e.g. if it's the first
+        occurance, chronologically, of the ambiguous datetime).
+
+        :param dt:
+            A timezone-aware :class:`datetime.dateime` object.
+        """
+        dt_wall = self._fromutc(dt)
+
+        # Calculate the fold status given the two datetimes.
+        _fold = self._fold_status(dt, dt_wall)
+
+        # Set the default fold value for ambiguous dates
+        return enfold(dt_wall, fold=_fold)
+
+
+class tzrangebase(_tzinfo):
+    """
+    This is an abstract base class for time zones represented by an annual
+    transition into and out of DST. Child classes should implement the following
+    methods:
+
+        * ``__init__(self, *args, **kwargs)``
+        * ``transitions(self, year)`` - this is expected to return a tuple of
+          datetimes representing the DST on and off transitions in standard
+          time.
+
+    A fully initialized ``tzrangebase`` subclass should also provide the
+    following attributes:
+        * ``hasdst``: Boolean whether or not the zone uses DST.
+        * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
+          representing the respective UTC offsets.
+        * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
+          abbreviations in DST and STD, respectively.
+        * ``_hasdst``: Whether or not the zone has DST.
+
+    ..versionadded:: 2.6.0
+    """
+    def __init__(self):
+        raise NotImplementedError('tzrangebase is an abstract base class')
+
+    def utcoffset(self, dt):
+        isdst = self._isdst(dt)
+
+        if isdst is None:
+            return None
+        elif isdst:
+            return self._dst_offset
+        else:
+            return self._std_offset
+
+    def dst(self, dt):
+        isdst = self._isdst(dt)
+
+        if isdst is None:
+            return None
+        elif isdst:
+            return self._dst_base_offset
+        else:
+            return ZERO
+
+    @tzname_in_python2
+    def tzname(self, dt):
+        if self._isdst(dt):
+            return self._dst_abbr
+        else:
+            return self._std_abbr
+
+    def fromutc(self, dt):
+        """ Given a datetime in UTC, return local time """
+        if not isinstance(dt, datetime):
+            raise TypeError("fromutc() requires a datetime argument")
+
+        if dt.tzinfo is not self:
+            raise ValueError("dt.tzinfo is not self")
+
+        # Get transitions - if there are none, fixed offset
+        transitions = self.transitions(dt.year)
+        if transitions is None:
+            return dt + self.utcoffset(dt)
+
+        # Get the transition times in UTC
+        dston, dstoff = transitions
+
+        dston -= self._std_offset
+        dstoff -= self._std_offset
+
+        utc_transitions = (dston, dstoff)
+        dt_utc = dt.replace(tzinfo=None)
+
+
+        isdst = self._naive_isdst(dt_utc, utc_transitions)
+
+        if isdst:
+            dt_wall = dt + self._dst_offset
+        else:
+            dt_wall = dt + self._std_offset
+
+        _fold = int(not isdst and self.is_ambiguous(dt_wall))
+
+        return enfold(dt_wall, fold=_fold)
+
+    def is_ambiguous(self, dt):
+        """
+        Whether or not the "wall time" of a given datetime is ambiguous in this
+        zone.
+
+        :param dt:
+            A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+        :return:
+            Returns ``True`` if ambiguous, ``False`` otherwise.
+
+        .. versionadded:: 2.6.0
+        """
+        if not self.hasdst:
+            return False
+
+        start, end = self.transitions(dt.year)
+
+        dt = dt.replace(tzinfo=None)
+        return (end <= dt < end + self._dst_base_offset)
+
+    def _isdst(self, dt):
+        if not self.hasdst:
+            return False
+        elif dt is None:
+            return None
+
+        transitions = self.transitions(dt.year)
+
+        if transitions is None:
+            return False
+
+        dt = dt.replace(tzinfo=None)
+
+        isdst = self._naive_isdst(dt, transitions)
+
+        # Handle ambiguous dates
+        if not isdst and self.is_ambiguous(dt):
+            return not self._fold(dt)
+        else:
+            return isdst
+
+    def _naive_isdst(self, dt, transitions):
+        dston, dstoff = transitions
+
+        dt = dt.replace(tzinfo=None)
+
+        if dston < dstoff:
+            isdst = dston <= dt < dstoff
+        else:
+            isdst = not dstoff <= dt < dston
+
+        return isdst
+
+    @property
+    def _dst_base_offset(self):
+        return self._dst_offset - self._std_offset
+    
+    __hash__ = None
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def __repr__(self):
+        return "%s(...)" % self.__class__.__name__
+
+    __reduce__ = object.__reduce__
+
+
+def _total_seconds(td):
+    # Python 2.6 doesn't have a total_seconds() method on timedelta objects
+    return ((td.seconds + td.days * 86400) * 1000000 +
+            td.microseconds) // 1000000
+
+_total_seconds = getattr(timedelta, 'total_seconds', _total_seconds)
diff --git a/lib/dateutil/tz/tz.py b/lib/dateutil/tz/tz.py
index 56421fea8f3cb5eb60dcba94cb7e55c2fb1fe29f..6bee29168412ba65aba5b0dc20eaf2b0bb2e9b66 100644
--- a/lib/dateutil/tz/tz.py
+++ b/lib/dateutil/tz/tz.py
@@ -12,24 +12,30 @@ import struct
 import time
 import sys
 import os
+import bisect
+import copy
+
+from operator import itemgetter
+
+from contextlib import contextmanager
 
 from six import string_types, PY3
-from ._common import tzname_in_python2
+from ._common import tzname_in_python2, _tzinfo, _total_seconds
+from ._common import tzrangebase, enfold
 
 try:
     from .win import tzwin, tzwinlocal
 except ImportError:
     tzwin = tzwinlocal = None
 
-relativedelta = None
-parser = None
-rrule = None
-
 ZERO = datetime.timedelta(0)
-EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
+EPOCH = datetime.datetime.utcfromtimestamp(0)
+EPOCHORDINAL = EPOCH.toordinal()
 
 class tzutc(datetime.tzinfo):
-
+    """
+    This is a tzinfo object that represents the UTC time zone.
+    """
     def utcoffset(self, dt):
         return ZERO
 
@@ -40,12 +46,33 @@ class tzutc(datetime.tzinfo):
     def tzname(self, dt):
         return "UTC"
 
+    def is_ambiguous(self, dt):
+        """
+        Whether or not the "wall time" of a given datetime is ambiguous in this
+        zone.
+
+        :param dt:
+            A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+        :return:
+            Returns ``True`` if ambiguous, ``False`` otherwise.
+
+        .. versionadded:: 2.6.0
+        """
+        return False
+
     def __eq__(self, other):
+        if not isinstance(other, (tzutc, tzoffset)):
+            return NotImplemented
+
         return (isinstance(other, tzutc) or
                 (isinstance(other, tzoffset) and other._offset == ZERO))
 
+    __hash__ = None
+
     def __ne__(self, other):
-        return not self.__eq__(other)
+        return not (self == other)
 
     def __repr__(self):
         return "%s()" % self.__class__.__name__
@@ -54,9 +81,24 @@ class tzutc(datetime.tzinfo):
 
 
 class tzoffset(datetime.tzinfo):
+    """
+    A simple class for representing a fixed offset from UTC.
+
+    :param name:
+        The timezone name, to be returned when ``tzname()`` is called.
 
+    :param offset:
+        The time zone offset in seconds, or (since version 2.6.0, represented
+        as a :py:class:`datetime.timedelta` object.
+    """
     def __init__(self, name, offset):
         self._name = name
+        
+        try:
+            # Allow a timedelta
+            offset = _total_seconds(offset)
+        except (TypeError, AttributeError):
+            pass
         self._offset = datetime.timedelta(seconds=offset)
 
     def utcoffset(self, dt):
@@ -65,36 +107,64 @@ class tzoffset(datetime.tzinfo):
     def dst(self, dt):
         return ZERO
 
+    def is_ambiguous(self, dt):
+        """
+        Whether or not the "wall time" of a given datetime is ambiguous in this
+        zone.
+
+        :param dt:
+            A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+        :return:
+            Returns ``True`` if ambiguous, ``False`` otherwise.
+
+        .. versionadded:: 2.6.0
+        """
+        return False
+
     @tzname_in_python2
     def tzname(self, dt):
         return self._name
 
     def __eq__(self, other):
-        return (isinstance(other, tzoffset) and
-                self._offset == other._offset)
+        if not isinstance(other, tzoffset):
+            return NotImplemented
+
+        return self._offset == other._offset
+
+    __hash__ = None
 
     def __ne__(self, other):
-        return not self.__eq__(other)
+        return not (self == other)
 
     def __repr__(self):
         return "%s(%s, %s)" % (self.__class__.__name__,
                                repr(self._name),
-                               self._offset.days*86400+self._offset.seconds)
+                               int(_total_seconds(self._offset)))
 
     __reduce__ = object.__reduce__
 
 
-class tzlocal(datetime.tzinfo):
+class tzlocal(_tzinfo):
+    """
+    A :class:`tzinfo` subclass built around the ``time`` timezone functions.
+    """
     def __init__(self):
+        super(tzlocal, self).__init__()
+
         self._std_offset = datetime.timedelta(seconds=-time.timezone)
         if time.daylight:
             self._dst_offset = datetime.timedelta(seconds=-time.altzone)
         else:
             self._dst_offset = self._std_offset
 
+        self._dst_saved = self._dst_offset - self._std_offset
+        self._hasdst = bool(self._dst_saved)
+
     def utcoffset(self, dt):
-        if dt is None:
-            return dt
+        if dt is None and self._hasdst:
+            return None
 
         if self._isdst(dt):
             return self._dst_offset
@@ -102,8 +172,11 @@ class tzlocal(datetime.tzinfo):
             return self._std_offset
 
     def dst(self, dt):
+        if dt is None and self._hasdst:
+            return None
+
         if self._isdst(dt):
-            return self._dst_offset-self._std_offset
+            return self._dst_offset - self._std_offset
         else:
             return ZERO
 
@@ -111,7 +184,29 @@ class tzlocal(datetime.tzinfo):
     def tzname(self, dt):
         return time.tzname[self._isdst(dt)]
 
-    def _isdst(self, dt):
+    def is_ambiguous(self, dt):
+        """
+        Whether or not the "wall time" of a given datetime is ambiguous in this
+        zone.
+
+        :param dt:
+            A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+        :return:
+            Returns ``True`` if ambiguous, ``False`` otherwise.
+
+        .. versionadded:: 2.6.0
+        """
+        naive_dst = self._naive_is_dst(dt)
+        return (not naive_dst and
+                (naive_dst != self._naive_is_dst(dt - self._dst_saved)))
+
+    def _naive_is_dst(self, dt):
+        timestamp = _datetime_to_timestamp(dt)
+        return time.localtime(timestamp + time.timezone).tm_isdst
+
+    def _isdst(self, dt, fold_naive=True):
         # We can't use mktime here. It is unstable when deciding if
         # the hour near to a change is DST or not.
         #
@@ -136,19 +231,32 @@ class tzlocal(datetime.tzinfo):
         #
         # Here is a more stable implementation:
         #
-        timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
-                     + dt.hour * 3600
-                     + dt.minute * 60
-                     + dt.second)
-        return time.localtime(timestamp+time.timezone).tm_isdst
+        if not self._hasdst:
+            return False
+
+        # Check for ambiguous times:
+        dstval = self._naive_is_dst(dt)
+        fold = getattr(dt, 'fold', None)
+
+        if self.is_ambiguous(dt):
+            if fold is not None:
+                return not self._fold(dt)
+            else:
+                return True
+
+        return dstval
 
     def __eq__(self, other):
-        return (isinstance(other, tzlocal) and
-                (self._std_offset == other._std_offset and
-                 self._dst_offset == other._dst_offset))
+        if not isinstance(other, tzlocal):
+            return NotImplemented
+
+        return (self._std_offset == other._std_offset and
+                self._dst_offset == other._dst_offset)
+
+    __hash__ = None
 
     def __ne__(self, other):
-        return not self.__eq__(other)
+        return not (self == other)
 
     def __repr__(self):
         return "%s()" % self.__class__.__name__
@@ -157,7 +265,8 @@ class tzlocal(datetime.tzinfo):
 
 
 class _ttinfo(object):
-    __slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
+    __slots__ = ["offset", "delta", "isdst", "abbr",
+                 "isstd", "isgmt", "dstoffset"]
 
     def __init__(self):
         for attr in self.__slots__:
@@ -173,16 +282,20 @@ class _ttinfo(object):
 
     def __eq__(self, other):
         if not isinstance(other, _ttinfo):
-            return False
+            return NotImplemented
+
         return (self.offset == other.offset and
                 self.delta == other.delta and
                 self.isdst == other.isdst and
                 self.abbr == other.abbr and
                 self.isstd == other.isstd and
-                self.isgmt == other.isgmt)
+                self.isgmt == other.isgmt and
+                self.dstoffset == other.dstoffset)
+
+    __hash__ = None
 
     def __ne__(self, other):
-        return not self.__eq__(other)
+        return not (self == other)
 
     def __getstate__(self):
         state = {}
@@ -196,12 +309,44 @@ class _ttinfo(object):
                 setattr(self, name, state[name])
 
 
-class tzfile(datetime.tzinfo):
+class _tzfile(object):
+    """
+    Lightweight class for holding the relevant transition and time zone
+    information read from binary tzfiles.
+    """
+    attrs = ['trans_list', 'trans_idx', 'ttinfo_list',
+             'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
+
+    def __init__(self, **kwargs):
+        for attr in self.attrs:
+            setattr(self, attr, kwargs.get(attr, None))
+
+
+class tzfile(_tzinfo):
+    """
+    This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)``
+    format timezone files to extract current and historical zone information.
 
-    # http://www.twinsun.com/tz/tz-link.htm
-    # ftp://ftp.iana.org/tz/tz*.tar.gz
+    :param fileobj:
+        This can be an opened file stream or a file name that the time zone
+        information can be read from.
+
+    :param filename:
+        This is an optional parameter specifying the source of the time zone
+        information in the event that ``fileobj`` is a file object. If omitted
+        and ``fileobj`` is a file stream, this parameter will be set either to
+        ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
+
+    See `Sources for Time Zone and Daylight Saving Time Data 
+    <http://www.twinsun.com/tz/tz-link.htm>`_ for more information. Time zone
+    files can be compiled from the `IANA Time Zone database files
+    <https://www.iana.org/time-zones>`_ with the `zic time zone compiler
+    <https://www.freebsd.org/cgi/man.cgi?query=zic&sektion=8>`_
+    """
 
     def __init__(self, fileobj, filename=None):
+        super(tzfile, self).__init__()
+
         file_opened_here = False
         if isinstance(fileobj, string_types):
             self._filename = fileobj
@@ -214,6 +359,24 @@ class tzfile(datetime.tzinfo):
         else:
             self._filename = repr(fileobj)
 
+        if fileobj is not None:
+            if not file_opened_here:
+                fileobj = _ContextWrapper(fileobj)
+
+            with fileobj as file_stream:
+                tzobj = self._read_tzfile(file_stream)
+
+            self._set_tzdata(tzobj)
+
+    def _set_tzdata(self, tzobj):
+        """ Set the time zone data of this object from a _tzfile object """
+        # Copy the relevant attributes over as private attributes
+        for attr in _tzfile.attrs:
+            setattr(self, '_' + attr, getattr(tzobj, attr))
+
+    def _read_tzfile(self, fileobj):
+        out = _tzfile()
+
         # From tzfile(5):
         #
         # The time zone information files used by tzset(3)
@@ -223,176 +386,169 @@ class tzfile(datetime.tzinfo):
         # six four-byte values of type long, written in a
         # ``standard'' byte order (the high-order  byte
         # of the value is written first).
-        try:
-            if fileobj.read(4).decode() != "TZif":
-                raise ValueError("magic not found")
+        if fileobj.read(4).decode() != "TZif":
+            raise ValueError("magic not found")
 
-            fileobj.read(16)
+        fileobj.read(16)
 
-            (
-                # The number of UTC/local indicators stored in the file.
-                ttisgmtcnt,
+        (
+            # The number of UTC/local indicators stored in the file.
+            ttisgmtcnt,
 
-                # The number of standard/wall indicators stored in the file.
-                ttisstdcnt,
+            # The number of standard/wall indicators stored in the file.
+            ttisstdcnt,
 
-                # The number of leap seconds for which data is
-                # stored in the file.
-                leapcnt,
+            # The number of leap seconds for which data is
+            # stored in the file.
+            leapcnt,
 
-                # The number of "transition times" for which data
-                # is stored in the file.
-                timecnt,
+            # The number of "transition times" for which data
+            # is stored in the file.
+            timecnt,
 
-                # The number of "local time types" for which data
-                # is stored in the file (must not be zero).
-                typecnt,
+            # The number of "local time types" for which data
+            # is stored in the file (must not be zero).
+            typecnt,
 
-                # The  number  of  characters  of "time zone
-                # abbreviation strings" stored in the file.
-                charcnt,
+            # The  number  of  characters  of "time zone
+            # abbreviation strings" stored in the file.
+            charcnt,
 
-            ) = struct.unpack(">6l", fileobj.read(24))
+        ) = struct.unpack(">6l", fileobj.read(24))
 
-            # The above header is followed by tzh_timecnt four-byte
-            # values  of  type long,  sorted  in ascending order.
-            # These values are written in ``standard'' byte order.
-            # Each is used as a transition time (as  returned  by
-            # time(2)) at which the rules for computing local time
-            # change.
+        # The above header is followed by tzh_timecnt four-byte
+        # values  of  type long,  sorted  in ascending order.
+        # These values are written in ``standard'' byte order.
+        # Each is used as a transition time (as  returned  by
+        # time(2)) at which the rules for computing local time
+        # change.
 
-            if timecnt:
-                self._trans_list = struct.unpack(">%dl" % timecnt,
-                                                 fileobj.read(timecnt*4))
-            else:
-                self._trans_list = []
-
-            # Next come tzh_timecnt one-byte values of type unsigned
-            # char; each one tells which of the different types of
-            # ``local time'' types described in the file is associated
-            # with the same-indexed transition time. These values
-            # serve as indices into an array of ttinfo structures that
-            # appears next in the file.
-
-            if timecnt:
-                self._trans_idx = struct.unpack(">%dB" % timecnt,
-                                                fileobj.read(timecnt))
-            else:
-                self._trans_idx = []
-
-            # Each ttinfo structure is written as a four-byte value
-            # for tt_gmtoff  of  type long,  in  a  standard  byte
-            # order, followed  by a one-byte value for tt_isdst
-            # and a one-byte  value  for  tt_abbrind.   In  each
-            # structure, tt_gmtoff  gives  the  number  of
-            # seconds to be added to UTC, tt_isdst tells whether
-            # tm_isdst should be set by  localtime(3),  and
-            # tt_abbrind serves  as an index into the array of
-            # time zone abbreviation characters that follow the
-            # ttinfo structure(s) in the file.
-
-            ttinfo = []
-
-            for i in range(typecnt):
-                ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
-
-            abbr = fileobj.read(charcnt).decode()
-
-            # Then there are tzh_leapcnt pairs of four-byte
-            # values, written in  standard byte  order;  the
-            # first  value  of  each pair gives the time (as
-            # returned by time(2)) at which a leap second
-            # occurs;  the  second  gives the  total  number of
-            # leap seconds to be applied after the given time.
-            # The pairs of values are sorted in ascending order
-            # by time.
-
-            # Not used, for now
-            # if leapcnt:
-            #    leap = struct.unpack(">%dl" % (leapcnt*2),
-            #                         fileobj.read(leapcnt*8))
-
-            # Then there are tzh_ttisstdcnt standard/wall
-            # indicators, each stored as a one-byte value;
-            # they tell whether the transition times associated
-            # with local time types were specified as standard
-            # time or wall clock time, and are used when
-            # a time zone file is used in handling POSIX-style
-            # time zone environment variables.
-
-            if ttisstdcnt:
-                isstd = struct.unpack(">%db" % ttisstdcnt,
-                                      fileobj.read(ttisstdcnt))
-
-            # Finally, there are tzh_ttisgmtcnt UTC/local
-            # indicators, each stored as a one-byte value;
-            # they tell whether the transition times associated
-            # with local time types were specified as UTC or
-            # local time, and are used when a time zone file
-            # is used in handling POSIX-style time zone envi-
-            # ronment variables.
-
-            if ttisgmtcnt:
-                isgmt = struct.unpack(">%db" % ttisgmtcnt,
-                                      fileobj.read(ttisgmtcnt))
-
-            # ** Everything has been read **
-        finally:
-            if file_opened_here:
-                fileobj.close()
+        if timecnt:
+            out.trans_list = list(struct.unpack(">%dl" % timecnt,
+                                                  fileobj.read(timecnt*4)))
+        else:
+            out.trans_list = []
+
+        # Next come tzh_timecnt one-byte values of type unsigned
+        # char; each one tells which of the different types of
+        # ``local time'' types described in the file is associated
+        # with the same-indexed transition time. These values
+        # serve as indices into an array of ttinfo structures that
+        # appears next in the file.
+
+        if timecnt:
+            out.trans_idx = struct.unpack(">%dB" % timecnt,
+                                            fileobj.read(timecnt))
+        else:
+            out.trans_idx = []
+
+        # Each ttinfo structure is written as a four-byte value
+        # for tt_gmtoff  of  type long,  in  a  standard  byte
+        # order, followed  by a one-byte value for tt_isdst
+        # and a one-byte  value  for  tt_abbrind.   In  each
+        # structure, tt_gmtoff  gives  the  number  of
+        # seconds to be added to UTC, tt_isdst tells whether
+        # tm_isdst should be set by  localtime(3),  and
+        # tt_abbrind serves  as an index into the array of
+        # time zone abbreviation characters that follow the
+        # ttinfo structure(s) in the file.
+
+        ttinfo = []
+
+        for i in range(typecnt):
+            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
+
+        abbr = fileobj.read(charcnt).decode()
+
+        # Then there are tzh_leapcnt pairs of four-byte
+        # values, written in  standard byte  order;  the
+        # first  value  of  each pair gives the time (as
+        # returned by time(2)) at which a leap second
+        # occurs;  the  second  gives the  total  number of
+        # leap seconds to be applied after the given time.
+        # The pairs of values are sorted in ascending order
+        # by time.
+
+        # Not used, for now (but read anyway for correct file position)
+        if leapcnt:
+            leap = struct.unpack(">%dl" % (leapcnt*2),
+                                 fileobj.read(leapcnt*8))
+
+        # Then there are tzh_ttisstdcnt standard/wall
+        # indicators, each stored as a one-byte value;
+        # they tell whether the transition times associated
+        # with local time types were specified as standard
+        # time or wall clock time, and are used when
+        # a time zone file is used in handling POSIX-style
+        # time zone environment variables.
+
+        if ttisstdcnt:
+            isstd = struct.unpack(">%db" % ttisstdcnt,
+                                  fileobj.read(ttisstdcnt))
+
+        # Finally, there are tzh_ttisgmtcnt UTC/local
+        # indicators, each stored as a one-byte value;
+        # they tell whether the transition times associated
+        # with local time types were specified as UTC or
+        # local time, and are used when a time zone file
+        # is used in handling POSIX-style time zone envi-
+        # ronment variables.
+
+        if ttisgmtcnt:
+            isgmt = struct.unpack(">%db" % ttisgmtcnt,
+                                  fileobj.read(ttisgmtcnt))
 
         # Build ttinfo list
-        self._ttinfo_list = []
+        out.ttinfo_list = []
         for i in range(typecnt):
             gmtoff, isdst, abbrind = ttinfo[i]
             # Round to full-minutes if that's not the case. Python's
             # datetime doesn't accept sub-minute timezones. Check
             # http://python.org/sf/1447945 for some information.
-            gmtoff = (gmtoff+30)//60*60
+            gmtoff = 60 * ((gmtoff + 30) // 60)
             tti = _ttinfo()
             tti.offset = gmtoff
+            tti.dstoffset = datetime.timedelta(0)
             tti.delta = datetime.timedelta(seconds=gmtoff)
             tti.isdst = isdst
             tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
             tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
             tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
-            self._ttinfo_list.append(tti)
+            out.ttinfo_list.append(tti)
 
         # Replace ttinfo indexes for ttinfo objects.
-        trans_idx = []
-        for idx in self._trans_idx:
-            trans_idx.append(self._ttinfo_list[idx])
-        self._trans_idx = tuple(trans_idx)
+        out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
 
         # Set standard, dst, and before ttinfos. before will be
         # used when a given time is before any transitions,
         # and will be set to the first non-dst ttinfo, or to
         # the first dst, if all of them are dst.
-        self._ttinfo_std = None
-        self._ttinfo_dst = None
-        self._ttinfo_before = None
-        if self._ttinfo_list:
-            if not self._trans_list:
-                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
+        out.ttinfo_std = None
+        out.ttinfo_dst = None
+        out.ttinfo_before = None
+        if out.ttinfo_list:
+            if not out.trans_list:
+                out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
             else:
                 for i in range(timecnt-1, -1, -1):
-                    tti = self._trans_idx[i]
-                    if not self._ttinfo_std and not tti.isdst:
-                        self._ttinfo_std = tti
-                    elif not self._ttinfo_dst and tti.isdst:
-                        self._ttinfo_dst = tti
-                    if self._ttinfo_std and self._ttinfo_dst:
+                    tti = out.trans_idx[i]
+                    if not out.ttinfo_std and not tti.isdst:
+                        out.ttinfo_std = tti
+                    elif not out.ttinfo_dst and tti.isdst:
+                        out.ttinfo_dst = tti
+
+                    if out.ttinfo_std and out.ttinfo_dst:
                         break
                 else:
-                    if self._ttinfo_dst and not self._ttinfo_std:
-                        self._ttinfo_std = self._ttinfo_dst
+                    if out.ttinfo_dst and not out.ttinfo_std:
+                        out.ttinfo_std = out.ttinfo_dst
 
-                for tti in self._ttinfo_list:
+                for tti in out.ttinfo_list:
                     if not tti.isdst:
-                        self._ttinfo_before = tti
+                        out.ttinfo_before = tti
                         break
                 else:
-                    self._ttinfo_before = self._ttinfo_list[0]
+                    out.ttinfo_before = out.ttinfo_list[0]
 
         # Now fix transition times to become relative to wall time.
         #
@@ -401,43 +557,113 @@ class tzfile(datetime.tzinfo):
         # isgmt are off, so it should be in wall time. OTOH, it's
         # always in gmt time. Let me know if you have comments
         # about this.
-        laststdoffset = 0
-        self._trans_list = list(self._trans_list)
-        for i in range(len(self._trans_list)):
-            tti = self._trans_idx[i]
+        laststdoffset = None
+        for i, tti in enumerate(out.trans_idx):
             if not tti.isdst:
-                # This is std time.
-                self._trans_list[i] += tti.offset
-                laststdoffset = tti.offset
+                offset = tti.offset
+                laststdoffset = offset
             else:
-                # This is dst time. Convert to std.
-                self._trans_list[i] += laststdoffset
-        self._trans_list = tuple(self._trans_list)
-
-    def _find_ttinfo(self, dt, laststd=0):
-        timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
-                     + dt.hour * 3600
-                     + dt.minute * 60
-                     + dt.second)
-        idx = 0
-        for trans in self._trans_list:
-            if timestamp < trans:
-                break
-            idx += 1
-        else:
+                if laststdoffset is not None:
+                    # Store the DST offset as well and update it in the list
+                    tti.dstoffset = tti.offset - laststdoffset
+                    out.trans_idx[i] = tti
+
+                offset = laststdoffset or 0
+
+            out.trans_list[i] += offset
+
+        # In case we missed any DST offsets on the way in for some reason, make
+        # a second pass over the list, looking for the /next/ DST offset.
+        laststdoffset = None
+        for i in reversed(range(len(out.trans_idx))):
+            tti = out.trans_idx[i]
+            if tti.isdst:
+                if not (tti.dstoffset or laststdoffset is None):
+                    tti.dstoffset = tti.offset - laststdoffset
+            else:
+                laststdoffset = tti.offset
+
+            if not isinstance(tti.dstoffset, datetime.timedelta):
+                tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
+            
+            out.trans_idx[i] = tti
+
+        out.trans_idx = tuple(out.trans_idx)
+        out.trans_list = tuple(out.trans_list)
+
+        return out
+
+    def _find_last_transition(self, dt):
+        # If there's no list, there are no transitions to find
+        if not self._trans_list:
+            return None
+
+        timestamp = _datetime_to_timestamp(dt)
+
+        # Find where the timestamp fits in the transition list - if the
+        # timestamp is a transition time, it's part of the "after" period.
+        idx = bisect.bisect_right(self._trans_list, timestamp)
+
+        # We want to know when the previous transition was, so subtract off 1
+        return idx - 1
+
+    def _get_ttinfo(self, idx):
+        # For no list or after the last transition, default to _ttinfo_std
+        if idx is None or (idx + 1) == len(self._trans_list):
             return self._ttinfo_std
-        if idx == 0:
+
+        # If there is a list and the time is before it, return _ttinfo_before
+        if idx < 0:
             return self._ttinfo_before
-        if laststd:
-            while idx > 0:
-                tti = self._trans_idx[idx-1]
-                if not tti.isdst:
-                    return tti
-                idx -= 1
-            else:
-                return self._ttinfo_std
-        else:
-            return self._trans_idx[idx-1]
+
+        return self._trans_idx[idx]
+
+    def _find_ttinfo(self, dt):
+        idx = self._resolve_ambiguous_time(dt)
+
+        return self._get_ttinfo(idx)
+
+    def is_ambiguous(self, dt, idx=None):
+        """
+        Whether or not the "wall time" of a given datetime is ambiguous in this
+        zone.
+
+        :param dt:
+            A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+        :return:
+            Returns ``True`` if ambiguous, ``False`` otherwise.
+
+        .. versionadded:: 2.6.0
+        """
+        if idx is None:
+            idx = self._find_last_transition(dt)
+
+        # Calculate the difference in offsets from current to previous
+        timestamp = _datetime_to_timestamp(dt)
+        tti = self._get_ttinfo(idx)
+
+        if idx is None or idx <= 0:
+            return False
+
+        od = self._get_ttinfo(idx - 1).offset - tti.offset
+        tt = self._trans_list[idx]          # Transition time
+
+        return timestamp < tt + od
+
+    def _resolve_ambiguous_time(self, dt):
+        idx = self._find_last_transition(dt)
+
+        # If we have no transitions, return the index
+        _fold = self._fold(dt)
+        if idx is None or idx == 0:
+            return idx
+
+        # Get the current datetime as a timestamp
+        idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
+
+        return idx - idx_offset
 
     def utcoffset(self, dt):
         if dt is None:
@@ -445,119 +671,202 @@ class tzfile(datetime.tzinfo):
 
         if not self._ttinfo_std:
             return ZERO
+
         return self._find_ttinfo(dt).delta
 
     def dst(self, dt):
+        if dt is None:
+            return None
+
         if not self._ttinfo_dst:
             return ZERO
+        
         tti = self._find_ttinfo(dt)
+
         if not tti.isdst:
             return ZERO
 
         # The documentation says that utcoffset()-dst() must
         # be constant for every dt.
-        return tti.delta-self._find_ttinfo(dt, laststd=1).delta
-
-        # An alternative for that would be:
-        #
-        # return self._ttinfo_dst.offset-self._ttinfo_std.offset
-        #
-        # However, this class stores historical changes in the
-        # dst offset, so I belive that this wouldn't be the right
-        # way to implement this.
+        return tti.dstoffset
 
     @tzname_in_python2
     def tzname(self, dt):
-        if not self._ttinfo_std:
+        if not self._ttinfo_std or dt is None:
             return None
         return self._find_ttinfo(dt).abbr
 
     def __eq__(self, other):
         if not isinstance(other, tzfile):
-            return False
+            return NotImplemented
         return (self._trans_list == other._trans_list and
                 self._trans_idx == other._trans_idx and
                 self._ttinfo_list == other._ttinfo_list)
 
+    __hash__ = None
+
     def __ne__(self, other):
-        return not self.__eq__(other)
+        return not (self == other)
 
     def __repr__(self):
         return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
 
     def __reduce__(self):
-        if not os.path.isfile(self._filename):
-            raise ValueError("Unpickable %s class" % self.__class__.__name__)
-        return (self.__class__, (self._filename,))
+        return self.__reduce_ex__(None)
+
+    def __reduce_ex__(self, protocol):
+        return (self.__class__, (None, self._filename), self.__dict__)
+
+
+class tzrange(tzrangebase):
+    """
+    The ``tzrange`` object is a time zone specified by a set of offsets and
+    abbreviations, equivalent to the way the ``TZ`` variable can be specified
+    in POSIX-like systems, but using Python delta objects to specify DST
+    start, end and offsets.
+
+    :param stdabbr:
+        The abbreviation for standard time (e.g. ``'EST'``).
+
+    :param stdoffset:
+        An integer or :class:`datetime.timedelta` object or equivalent
+        specifying the base offset from UTC.
+
+        If unspecified, +00:00 is used.
+
+    :param dstabbr:
+        The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
+
+        If specified, with no other DST information, DST is assumed to occur
+        and the default behavior or ``dstoffset``, ``start`` and ``end`` is
+        used. If unspecified and no other DST information is specified, it
+        is assumed that this zone has no DST.
+
+        If this is unspecified and other DST information is *is* specified,
+        DST occurs in the zone but the time zone abbreviation is left
+        unchanged.
 
+    :param dstoffset:
+        A an integer or :class:`datetime.timedelta` object or equivalent
+        specifying the UTC offset during DST. If unspecified and any other DST
+        information is specified, it is assumed to be the STD offset +1 hour.
 
-class tzrange(datetime.tzinfo):
+    :param start:
+        A :class:`relativedelta.relativedelta` object or equivalent specifying
+        the time and time of year that daylight savings time starts. To specify,
+        for example, that DST starts at 2AM on the 2nd Sunday in March, pass:
+
+            ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
+
+        If unspecified and any other DST information is specified, the default
+        value is 2 AM on the first Sunday in April.
+
+    :param end:
+        A :class:`relativedelta.relativedelta` object or equivalent representing
+        the time and time of year that daylight savings time ends, with the
+        same specification method as in ``start``. One note is that this should
+        point to the first time in the *standard* zone, so if a transition
+        occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM,
+        set the `hours` parameter to +1.
+
+
+    **Examples:**
+
+    .. testsetup:: tzrange
+
+        from dateutil.tz import tzrange, tzstr
+
+    .. doctest:: tzrange
+
+        >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
+        True
+
+        >>> from dateutil.relativedelta import *
+        >>> range1 = tzrange("EST", -18000, "EDT")
+        >>> range2 = tzrange("EST", -18000, "EDT", -14400,
+        ...                  relativedelta(hours=+2, month=4, day=1,
+        ...                                weekday=SU(+1)),
+        ...                  relativedelta(hours=+1, month=10, day=31,
+        ...                                weekday=SU(-1)))
+        >>> tzstr('EST5EDT') == range1 == range2
+        True
+
+    """
     def __init__(self, stdabbr, stdoffset=None,
                  dstabbr=None, dstoffset=None,
                  start=None, end=None):
+
         global relativedelta
-        if not relativedelta:
-            from dateutil import relativedelta
+        from dateutil import relativedelta
+
         self._std_abbr = stdabbr
         self._dst_abbr = dstabbr
+
+        try:
+            stdoffset = _total_seconds(stdoffset)
+        except (TypeError, AttributeError):
+            pass
+
+        try:
+            dstoffset = _total_seconds(dstoffset)
+        except (TypeError, AttributeError):
+            pass
+
         if stdoffset is not None:
             self._std_offset = datetime.timedelta(seconds=stdoffset)
         else:
             self._std_offset = ZERO
+
         if dstoffset is not None:
             self._dst_offset = datetime.timedelta(seconds=dstoffset)
         elif dstabbr and stdoffset is not None:
-            self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
+            self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
         else:
             self._dst_offset = ZERO
+
         if dstabbr and start is None:
             self._start_delta = relativedelta.relativedelta(
                 hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
         else:
             self._start_delta = start
+
         if dstabbr and end is None:
             self._end_delta = relativedelta.relativedelta(
                 hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
         else:
             self._end_delta = end
 
-    def utcoffset(self, dt):
-        if dt is None:
-            return None
+        self._dst_base_offset_ = self._dst_offset - self._std_offset
+        self.hasdst = bool(self._start_delta)
 
-        if self._isdst(dt):
-            return self._dst_offset
-        else:
-            return self._std_offset
+    def transitions(self, year):
+        """
+        For a given year, get the DST on and off transition times, expressed
+        always on the standard time side. For zones with no transitions, this
+        function returns ``None``.
 
-    def dst(self, dt):
-        if self._isdst(dt):
-            return self._dst_offset-self._std_offset
-        else:
-            return ZERO
+        :param year:
+            The year whose transitions you would like to query.
 
-    @tzname_in_python2
-    def tzname(self, dt):
-        if self._isdst(dt):
-            return self._dst_abbr
-        else:
-            return self._std_abbr
+        :return:
+            Returns a :class:`tuple` of :class:`datetime.datetime` objects,
+            ``(dston, dstoff)`` for zones with an annual DST transition, or
+            ``None`` for fixed offset zones.
+        """
+        if not self.hasdst:
+            return None
 
-    def _isdst(self, dt):
-        if not self._start_delta:
-            return False
-        year = datetime.datetime(dt.year, 1, 1)
-        start = year+self._start_delta
-        end = year+self._end_delta
-        dt = dt.replace(tzinfo=None)
-        if start < end:
-            return dt >= start and dt < end
-        else:
-            return dt >= start or dt < end
+        base_year = datetime.datetime(year, 1, 1)
+
+        start = base_year + self._start_delta
+        end = base_year + self._end_delta
+
+        return (start, end)
 
     def __eq__(self, other):
         if not isinstance(other, tzrange):
-            return False
+            return NotImplemented
+
         return (self._std_abbr == other._std_abbr and
                 self._dst_abbr == other._dst_abbr and
                 self._std_offset == other._std_offset and
@@ -565,21 +874,44 @@ class tzrange(datetime.tzinfo):
                 self._start_delta == other._start_delta and
                 self._end_delta == other._end_delta)
 
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def __repr__(self):
-        return "%s(...)" % self.__class__.__name__
-
-    __reduce__ = object.__reduce__
+    @property
+    def _dst_base_offset(self):
+        return self._dst_base_offset_
 
 
 class tzstr(tzrange):
-
-    def __init__(self, s):
+    """
+    ``tzstr`` objects are time zone objects specified by a time-zone string as
+    it would be passed to a ``TZ`` variable on POSIX-style systems (see
+    the `GNU C Library: TZ Variable`_ for more details).
+
+    There is one notable exception, which is that POSIX-style time zones use an
+    inverted offset format, so normally ``GMT+3`` would be parsed as an offset
+    3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
+    offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
+    behavior, pass a ``True`` value to ``posix_offset``.
+
+    The :class:`tzrange` object provides the same functionality, but is
+    specified using :class:`relativedelta.relativedelta` objects. rather than
+    strings.
+
+    :param s:
+        A time zone string in ``TZ`` variable format. This can be a
+        :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`)
+        or a stream emitting unicode characters (e.g. :class:`StringIO`).
+
+    :param posix_offset:
+        Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
+        ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
+        POSIX standard.
+
+    .. _`GNU C Library: TZ Variable`:
+        https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
+    """
+    def __init__(self, s, posix_offset=False):
         global parser
-        if not parser:
-            from dateutil import parser
+        from dateutil import parser
+
         self._s = s
 
         res = parser._parsetz(s)
@@ -588,7 +920,7 @@ class tzstr(tzrange):
 
         # Here we break the compatibility with the TZ variable handling.
         # GMT-3 actually *means* the timezone -3.
-        if res.stdabbr in ("GMT", "UTC"):
+        if res.stdabbr in ("GMT", "UTC") and not posix_offset:
             res.stdoffset *= -1
 
         # We must initialize it first, since _delta() needs
@@ -606,7 +938,10 @@ class tzstr(tzrange):
             if self._start_delta:
                 self._end_delta = self._delta(res.end, isend=1)
 
+        self.hasdst = bool(self._start_delta)
+
     def _delta(self, x, isend=0):
+        from dateutil import relativedelta
         kwargs = {}
         if x.month is not None:
             kwargs["month"] = x.month
@@ -642,8 +977,8 @@ class tzstr(tzrange):
             # Convert to standard time, to follow the documented way
             # of working with the extra hour. See the documentation
             # of the tzinfo class.
-            delta = self._dst_offset-self._std_offset
-            kwargs["seconds"] -= delta.seconds+delta.days*86400
+            delta = self._dst_offset - self._std_offset
+            kwargs["seconds"] -= delta.seconds + delta.days * 86400
         return relativedelta.relativedelta(**kwargs)
 
     def __repr__(self):
@@ -655,14 +990,16 @@ class _tzicalvtzcomp(object):
                  tzname=None, rrule=None):
         self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
         self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
-        self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
+        self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
         self.isdst = isdst
         self.tzname = tzname
         self.rrule = rrule
 
 
-class _tzicalvtz(datetime.tzinfo):
+class _tzicalvtz(_tzinfo):
     def __init__(self, tzid, comps=[]):
+        super(_tzicalvtz, self).__init__()
+
         self._tzid = tzid
         self._comps = comps
         self._cachedate = []
@@ -671,22 +1008,25 @@ class _tzicalvtz(datetime.tzinfo):
     def _find_comp(self, dt):
         if len(self._comps) == 1:
             return self._comps[0]
+
         dt = dt.replace(tzinfo=None)
+
         try:
-            return self._cachecomp[self._cachedate.index(dt)]
+            return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))]
         except ValueError:
             pass
-        lastcomp = None
+
+
         lastcompdt = None
+        lastcomp = None
+
         for comp in self._comps:
-            if not comp.isdst:
-                # Handle the extra hour in DST -> STD
-                compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
-            else:
-                compdt = comp.rrule.before(dt, inc=True)
+            compdt = self._find_compdt(comp, dt)
+
             if compdt and (not lastcompdt or lastcompdt < compdt):
                 lastcompdt = compdt
                 lastcomp = comp
+
         if not lastcomp:
             # RFC says nothing about what to do when a given
             # time is before the first onset date. We'll look for the
@@ -698,13 +1038,24 @@ class _tzicalvtz(datetime.tzinfo):
                     break
             else:
                 lastcomp = comp[0]
-        self._cachedate.insert(0, dt)
+
+        self._cachedate.insert(0, (dt, self._fold(dt)))
         self._cachecomp.insert(0, lastcomp)
+
         if len(self._cachedate) > 10:
             self._cachedate.pop()
             self._cachecomp.pop()
+
         return lastcomp
 
+    def _find_compdt(self, comp, dt):
+        if comp.tzoffsetdiff < ZERO and self._fold(dt):
+            dt -= comp.tzoffsetdiff
+
+        compdt = comp.rrule.before(dt, inc=True)
+
+        return compdt
+
     def utcoffset(self, dt):
         if dt is None:
             return None
@@ -729,35 +1080,65 @@ class _tzicalvtz(datetime.tzinfo):
 
 
 class tzical(object):
+    """
+    This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
+    as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects.
+
+    :param `fileobj`:
+        A file or stream in iCalendar format, which should be UTF-8 encoded
+        with CRLF endings.
+
+    .. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt
+    """
     def __init__(self, fileobj):
         global rrule
-        if not rrule:
-            from dateutil import rrule
+        from dateutil import rrule
 
         if isinstance(fileobj, string_types):
             self._s = fileobj
             # ical should be encoded in UTF-8 with CRLF
             fileobj = open(fileobj, 'r')
-        elif hasattr(fileobj, "name"):
-            self._s = fileobj.name
+            file_opened_here = True
         else:
-            self._s = repr(fileobj)
+            self._s = getattr(fileobj, 'name', repr(fileobj))
+            fileobj = _ContextWrapper(fileobj)
 
         self._vtz = {}
 
-        self._parse_rfc(fileobj.read())
+        with fileobj as fobj:
+            self._parse_rfc(fobj.read())
 
     def keys(self):
+        """
+        Retrieves the available time zones as a list.
+        """
         return list(self._vtz.keys())
 
     def get(self, tzid=None):
+        """
+        Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
+
+        :param tzid:
+            If there is exactly one time zone available, omitting ``tzid``
+            or passing :py:const:`None` value returns it. Otherwise a valid
+            key (which can be retrieved from :func:`keys`) is required.
+
+        :raises ValueError:
+            Raised if ``tzid`` is not specified but there are either more
+            or fewer than 1 zone defined.
+
+        :returns:
+            Returns either a :py:class:`datetime.tzinfo` object representing
+            the relevant time zone or :py:const:`None` if the ``tzid`` was
+            not found.
+        """
         if tzid is None:
-            keys = list(self._vtz.keys())
-            if len(keys) == 0:
+            if len(self._vtz) == 0:
                 raise ValueError("no timezones defined")
-            elif len(keys) > 1:
+            elif len(self._vtz) > 1:
                 raise ValueError("more than one timezone available")
-            tzid = keys[0]
+            tzid = next(iter(self._vtz))
+
         return self._vtz.get(tzid)
 
     def _parse_offset(self, s):
@@ -770,11 +1151,11 @@ class tzical(object):
         else:
             signal = +1
         if len(s) == 4:
-            return (int(s[:2])*3600+int(s[2:])*60)*signal
+            return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
         elif len(s) == 6:
-            return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
+            return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
         else:
-            raise ValueError("invalid offset: "+s)
+            raise ValueError("invalid offset: " + s)
 
     def _parse_rfc(self, s):
         lines = s.splitlines()
@@ -899,7 +1280,10 @@ class tzical(object):
 
 if sys.platform != "win32":
     TZFILES = ["/etc/localtime", "localtime"]
-    TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
+    TZPATHS = ["/usr/share/zoneinfo",
+               "/usr/lib/zoneinfo",
+               "/usr/share/lib/zoneinfo",
+               "/etc/zoneinfo"]
 else:
     TZFILES = []
     TZPATHS = []
@@ -957,9 +1341,11 @@ def gettz(name=None):
                         tz = tzwin(name)
                     except WindowsError:
                         tz = None
+
                 if not tz:
-                    from dateutil.zoneinfo import gettz
-                    tz = gettz(name)
+                    from dateutil.zoneinfo import get_zonefile_instance
+                    tz = get_zonefile_instance().get(name)
+
                 if not tz:
                     for c in name:
                         # name must have at least one offset to be a tzstr
@@ -976,4 +1362,103 @@ def gettz(name=None):
                             tz = tzlocal()
     return tz
 
+
+def datetime_exists(dt, tz=None):
+    """
+    Given a datetime and a time zone, determine whether or not a given datetime
+    would fall in a gap.
+
+    :param dt:
+        A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
+        is provided.)
+
+    :param tz:
+        A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
+        ``None`` or not provided, the datetime's own time zone will be used.
+    
+    :return:
+        Returns a boolean value whether or not the "wall time" exists in ``tz``.
+    """
+    if tz is None:
+        if dt.tzinfo is None:
+            raise ValueError('Datetime is naive and no time zone provided.')
+        tz = dt.tzinfo
+
+    dt = dt.replace(tzinfo=None)
+
+    # This is essentially a test of whether or not the datetime can survive
+    # a round trip to UTC.
+    dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz)
+    dt_rt = dt_rt.replace(tzinfo=None)
+
+    return dt == dt_rt
+
+
+def datetime_ambiguous(dt, tz=None):
+    """
+    Given a datetime and a time zone, determine whether or not a given datetime
+    is ambiguous (i.e if there are two times differentiated only by their DST
+    status).
+
+    :param dt:
+        A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
+        is provided.)
+
+    :param tz:
+        A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
+        ``None`` or not provided, the datetime's own time zone will be used.
+    
+    :return:
+        Returns a boolean value whether or not the "wall time" is ambiguous in
+        ``tz``.
+
+    .. versionadded:: 2.6.0
+    """
+    if tz is None:
+        if dt.tzinfo is None:
+            raise ValueError('Datetime is naive and no time zone provided.')
+
+        tz = dt.tzinfo
+
+    # If a time zone defines its own "is_ambiguous" function, we'll use that.
+    is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
+    if is_ambiguous_fn is not None:
+        try:
+            return tz.is_ambiguous(dt)
+        except:
+            pass
+
+    # If it doesn't come out and tell us it's ambiguous, we'll just check if
+    # the fold attribute has any effect on this particular date and time.
+    dt = dt.replace(tzinfo=tz)
+    wall_0 = enfold(dt, fold=0)
+    wall_1 = enfold(dt, fold=1)
+
+    same_offset = wall_0.utcoffset() == wall_1.utcoffset()
+    same_dst = wall_0.dst() == wall_1.dst()
+
+    return not (same_offset and same_dst)
+
+
+def _datetime_to_timestamp(dt):
+    """
+    Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds
+    since January 1, 1970, ignoring the time zone.
+    """
+    return _total_seconds((dt.replace(tzinfo=None) - EPOCH))
+
+class _ContextWrapper(object):
+    """
+    Class for wrapping contexts so that they are passed through in a 
+    with statement.
+    """
+    def __init__(self, context):
+        self.context = context
+
+    def __enter__(self):
+        return self.context
+
+    def __exit__(*args, **kwargs):
+        pass
+
 # vim:ts=4:sw=4:et
diff --git a/lib/dateutil/tz/win.py b/lib/dateutil/tz/win.py
index 321602052cf7039a87f1fde92ed9df0905d28c43..9f4e5519f97aeb7eaec7f1a549faba02cb70d9d9 100644
--- a/lib/dateutil/tz/win.py
+++ b/lib/dateutil/tz/win.py
@@ -12,7 +12,8 @@ except ValueError:
     # ValueError is raised on non-Windows systems for some horrible reason.
     raise ImportError("Running tzwin on non-Windows system")
 
-from ._common import tzname_in_python2
+from ._common import tzname_in_python2, _tzinfo
+from ._common import tzrangebase
 
 __all__ = ["tzwin", "tzwinlocal", "tzres"]
 
@@ -41,7 +42,7 @@ class tzres(object):
     Class for accessing `tzres.dll`, which contains timezone name related
     resources.
 
-    ..versionadded:: 2.5.0
+    .. versionadded:: 2.5.0
     """
     p_wchar = ctypes.POINTER(wintypes.WCHAR)        # Pointer to a wide char
 
@@ -112,13 +113,18 @@ class tzres(object):
         return self.load_name(offset)
 
 
-class tzwinbase(datetime.tzinfo):
+class tzwinbase(tzrangebase):
     """tzinfo class based on win32's timezones available in the registry."""
+    def __init__(self):
+        raise NotImplementedError('tzwinbase is an abstract base class')
+
     def __eq__(self, other):
         # Compare on all relevant dimensions, including name.
-        return (isinstance(other, tzwinbase) and
-                (self._stdoffset == other._stdoffset and
-                 self._dstoffset == other._dstoffset and
+        if not isinstance(other, tzwinbase):
+            return NotImplemented
+
+        return  (self._std_offset == other._std_offset and
+                 self._dst_offset == other._dst_offset and
                  self._stddayofweek == other._stddayofweek and
                  self._dstdayofweek == other._dstdayofweek and
                  self._stdweeknumber == other._stdweeknumber and
@@ -127,60 +133,58 @@ class tzwinbase(datetime.tzinfo):
                  self._dsthour == other._dsthour and
                  self._stdminute == other._stdminute and
                  self._dstminute == other._dstminute and
-                 self._stdname == other._stdname and
-                 self._dstname == other._dstname))
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def utcoffset(self, dt):
-        if self._isdst(dt):
-            return datetime.timedelta(minutes=self._dstoffset)
-        else:
-            return datetime.timedelta(minutes=self._stdoffset)
-
-    def dst(self, dt):
-        if self._isdst(dt):
-            minutes = self._dstoffset - self._stdoffset
-            return datetime.timedelta(minutes=minutes)
-        else:
-            return datetime.timedelta(0)
-
-    @tzname_in_python2
-    def tzname(self, dt):
-        if self._isdst(dt):
-            return self._dstname
-        else:
-            return self._stdname
+                 self._std_abbr == other._std_abbr and
+                 self._dst_abbr == other._dst_abbr)
 
     @staticmethod
     def list():
         """Return a list of all time zones known to the system."""
-        handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
-        tzkey = winreg.OpenKey(handle, TZKEYNAME)
-        result = [winreg.EnumKey(tzkey, i)
-                  for i in range(winreg.QueryInfoKey(tzkey)[0])]
-        tzkey.Close()
-        handle.Close()
+        with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+            with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
+                result = [winreg.EnumKey(tzkey, i)
+                          for i in range(winreg.QueryInfoKey(tzkey)[0])]
         return result
 
     def display(self):
         return self._display
 
-    def _isdst(self, dt):
-        if not self._dstmonth:
-            # dstmonth == 0 signals the zone has no daylight saving time
-            return False
-        dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
+    def transitions(self, year):
+        """
+        For a given year, get the DST on and off transition times, expressed
+        always on the standard time side. For zones with no transitions, this
+        function returns ``None``.
+
+        :param year:
+            The year whose transitions you would like to query.
+
+        :return:
+            Returns a :class:`tuple` of :class:`datetime.datetime` objects,
+            ``(dston, dstoff)`` for zones with an annual DST transition, or
+            ``None`` for fixed offset zones.
+        """
+
+        if not self.hasdst:
+            return None
+
+        dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
                                self._dsthour, self._dstminute,
                                self._dstweeknumber)
-        dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
+
+        dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
                                 self._stdhour, self._stdminute,
                                 self._stdweeknumber)
-        if dston < dstoff:
-            return dston <= dt.replace(tzinfo=None) < dstoff
-        else:
-            return not dstoff <= dt.replace(tzinfo=None) < dston
+
+        # Ambiguous dates default to the STD side
+        dstoff -= self._dst_base_offset
+
+        return dston, dstoff
+
+    def _get_hasdst(self):
+        return self._dstmonth != 0
+
+    @property
+    def _dst_base_offset(self):
+        return self._dst_base_offset_
 
 
 class tzwin(tzwinbase):
@@ -194,15 +198,17 @@ class tzwin(tzwinbase):
             with winreg.OpenKey(handle, tzkeyname) as tzkey:
                 keydict = valuestodict(tzkey)
 
-        self._stdname = keydict["Std"]
-        self._dstname = keydict["Dlt"]
+        self._std_abbr = keydict["Std"]
+        self._dst_abbr = keydict["Dlt"]
 
         self._display = keydict["Display"]
 
         # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
         tup = struct.unpack("=3l16h", keydict["TZI"])
-        self._stdoffset = -tup[0]-tup[1]          # Bias + StandardBias * -1
-        self._dstoffset = self._stdoffset-tup[2]  # + DaylightBias * -1
+        stdoffset = -tup[0]-tup[1]          # Bias + StandardBias * -1
+        dstoffset = stdoffset-tup[2]        # + DaylightBias * -1
+        self._std_offset = datetime.timedelta(minutes=stdoffset)
+        self._dst_offset = datetime.timedelta(minutes=dstoffset)
 
         # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
         # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
@@ -218,6 +224,9 @@ class tzwin(tzwinbase):
          self._dsthour,
          self._dstminute) = tup[12:17]
 
+        self._dst_base_offset_ = self._dst_offset - self._std_offset
+        self.hasdst = self._get_hasdst()
+
     def __repr__(self):
         return "tzwin(%s)" % repr(self._name)
 
@@ -226,27 +235,28 @@ class tzwin(tzwinbase):
 
 
 class tzwinlocal(tzwinbase):
-
     def __init__(self):
         with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
-
             with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
                 keydict = valuestodict(tzlocalkey)
 
-            self._stdname = keydict["StandardName"]
-            self._dstname = keydict["DaylightName"]
+            self._std_abbr = keydict["StandardName"]
+            self._dst_abbr = keydict["DaylightName"]
 
             try:
                 tzkeyname = text_type('{kn}\{sn}').format(kn=TZKEYNAME,
-                                                          sn=self._stdname)
+                                                          sn=self._std_abbr)
                 with winreg.OpenKey(handle, tzkeyname) as tzkey:
                     _keydict = valuestodict(tzkey)
                     self._display = _keydict["Display"]
             except OSError:
                 self._display = None
 
-        self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
-        self._dstoffset = self._stdoffset-keydict["DaylightBias"]
+        stdoffset = -keydict["Bias"]-keydict["StandardBias"]
+        dstoffset = stdoffset-keydict["DaylightBias"]
+
+        self._std_offset = datetime.timedelta(minutes=stdoffset)
+        self._dst_offset = datetime.timedelta(minutes=dstoffset)
 
         # For reasons unclear, in this particular key, the day of week has been
         # moved to the END of the SYSTEMTIME structure.
@@ -268,12 +278,15 @@ class tzwinlocal(tzwinbase):
 
         self._dstdayofweek = tup[7]
 
+        self._dst_base_offset_ = self._dst_offset - self._std_offset
+        self.hasdst = self._get_hasdst()
+
     def __repr__(self):
         return "tzwinlocal()"
 
     def __str__(self):
         # str will return the standard name, not the daylight name.
-        return "tzwinlocal(%s)" % repr(self._stdname)
+        return "tzwinlocal(%s)" % repr(self._std_abbr)
 
     def __reduce__(self):
         return (self.__class__, ())
diff --git a/lib/dateutil/zoneinfo/__init__.py b/lib/dateutil/zoneinfo/__init__.py
index 8156092ef6b2cc4df1eb62ee7a08e6493e4a10eb..7145e05cf30c8451fe431be041a7f00f0f10f9fb 100644
--- a/lib/dateutil/zoneinfo/__init__.py
+++ b/lib/dateutil/zoneinfo/__init__.py
@@ -6,7 +6,6 @@ import tempfile
 import shutil
 import json
 
-from subprocess import check_call
 from tarfile import TarFile
 from pkgutil import get_data
 from io import BytesIO
@@ -14,7 +13,7 @@ from contextlib import closing
 
 from dateutil.tz import tzfile
 
-__all__ = ["gettz", "gettz_db_metadata", "rebuild"]
+__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata", "rebuild"]
 
 ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
 METADATA_FN = 'METADATA'
@@ -72,17 +71,92 @@ class ZoneInfoFile(object):
             self.zones = dict()
             self.metadata = None
 
+    def get(self, name, default=None):
+        """
+        Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
+        for retrieving zones from the zone dictionary.
+        
+        :param name:
+            The name of the zone to retrieve. (Generally IANA zone names)
+
+        :param default:
+            The value to return in the event of a missing key.
+
+        .. versionadded:: 2.6.0
+
+        """
+        return self.zones.get(name, default)
+
 
 # The current API has gettz as a module function, although in fact it taps into
 # a stateful class. So as a workaround for now, without changing the API, we
 # will create a new "global" class instance the first time a user requests a
 # timezone. Ugly, but adheres to the api.
 #
-# TODO: deprecate this.
+# TODO: Remove after deprecation period.
 _CLASS_ZONE_INSTANCE = list()
 
+def get_zonefile_instance(new_instance=False):
+    """
+    This is a convenience function which provides a :class:`ZoneInfoFile`
+    instance using the data provided by the ``dateutil`` package. By default, it
+    caches a single instance of the ZoneInfoFile object and returns that.
+
+    :param new_instance:
+        If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
+        used as the cached instance for the next call. Otherwise, new instances
+        are created only as necessary.
+
+    :return:
+        Returns a :class:`ZoneInfoFile` object.
+
+    .. versionadded:: 2.6
+    """
+    if new_instance:
+        zif = None
+    else:
+        zif = getattr(get_zonefile_instance, '_cached_instance', None)
+
+    if zif is None:
+        zif = ZoneInfoFile(getzoneinfofile_stream())
+
+        get_zonefile_instance._cached_instance = zif
+
+    return zif
 
 def gettz(name):
+    """
+    This retrieves a time zone from the local zoneinfo tarball that is packaged
+    with dateutil.
+
+    :param name:
+        An IANA-style time zone name, as found in the zoneinfo file.
+
+    :return:
+        Returns a :class:`dateutil.tz.tzfile` time zone object.
+
+    .. warning::
+        It is generally inadvisable to use this function, and it is only
+        provided for API compatibility with earlier versions. This is *not*
+        equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
+        time zone based on the inputs, favoring system zoneinfo. This is ONLY
+        for accessing the dateutil-specific zoneinfo (which may be out of
+        date compared to the system zoneinfo).
+
+    .. deprecated:: 2.6
+        If you need to use a specific zoneinfofile over the system zoneinfo,
+        instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
+        :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
+
+        Use :func:`get_zonefile_instance` to retrieve an instance of the
+        dateutil-provided zoneinfo.
+    """
+    warnings.warn("zoneinfo.gettz() will be removed in future versions, "
+                  "to use the dateutil-provided zoneinfo files, instantiate a "
+                  "ZoneInfoFile object and use ZoneInfoFile.zones.get() "
+                  "instead. See the documentation for details.",
+                  DeprecationWarning)
+
     if len(_CLASS_ZONE_INSTANCE) == 0:
         _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
     return _CLASS_ZONE_INSTANCE[0].zones.get(name)
@@ -93,8 +167,19 @@ def gettz_db_metadata():
 
     See `zonefile_metadata`_
 
-    :returns: A dictionary with the database metadata
+    :returns:
+        A dictionary with the database metadata
+
+    .. deprecated:: 2.6
+        See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
+        query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
     """
+    warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
+                  "versions, to use the dateutil-provided zoneinfo files, "
+                  "ZoneInfoFile object and query the 'metadata' attribute "
+                  "instead. See the documentation for details.",
+                  DeprecationWarning)
+
     if len(_CLASS_ZONE_INSTANCE) == 0:
         _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
     return _CLASS_ZONE_INSTANCE[0].metadata
diff --git a/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz
index 83b2e05188e7f27231a5cec913b921b6912af3e5..1d15597b4630ade143d8408477858d1ba18dc707 100644
Binary files a/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz and b/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ
diff --git a/lib/feedparser/api.py b/lib/feedparser/api.py
index 12eafd2a837037f3fe501aa8312a5f0e47575b84..614bd2d26ca53bc7e4ea1b9e6b2f599f3a6d6b94 100644
--- a/lib/feedparser/api.py
+++ b/lib/feedparser/api.py
@@ -60,7 +60,6 @@ from .sanitizer import replace_doctype
 from .sgml import *
 from .urls import _convert_to_idn, _makeSafeAbsoluteURI
 from .util import FeedParserDict
-from . import USER_AGENT
 
 bytes_ = type(b'')
 unicode_ = type('')
diff --git a/lib/feedparser/util.py b/lib/feedparser/util.py
index df36b3eb3e4f99b82317263addadad68b80ab616..f7c02c01eb27c397ef30a63e7049bc692f3fd833 100644
--- a/lib/feedparser/util.py
+++ b/lib/feedparser/util.py
@@ -122,23 +122,9 @@ class FeedParserDict(dict):
 
     def __setitem__(self, key, value):
         key = self.keymap.get(key, key)
-        if key == 'newznab_attr':
-            if isinstance(value, dict) and value.keys() == ['name', 'value']:
-                key = value['name']
-                value = value['value']
-
-            if not dict.__contains__(self, 'categories'):
-                dict.__setitem__(self, 'categories', [])
-
-            if key == 'category':
-                self['categories'].append(value)
-            else:
-                dict.__setitem__(self, key, value)
-        else:
-            if isinstance(key, list):
-                key = key[0]
-
-            return dict.__setitem__(self, key, value)
+        if isinstance(key, list):
+            key = key[0]
+        return dict.__setitem__(self, key, value)
 
     def setdefault(self, key, value):
         if key not in self:
diff --git a/lib/github/AuthenticatedUser.py b/lib/github/AuthenticatedUser.py
index 27cef266c06c84afabe5c8cab7f32bce9179b4d9..ad1523c42539975533339dd7868a065ce55421bf 100644
--- a/lib/github/AuthenticatedUser.py
+++ b/lib/github/AuthenticatedUser.py
@@ -8,7 +8,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -45,8 +46,13 @@ import github.Notification
 class AuthenticatedUser(github.GithubObject.CompletableGithubObject):
     """
     This class represents AuthenticatedUsers as returned for example by http://developer.github.com/v3/todo
+    
+    An AuthenticatedUser object can be created by calling ``get_user()`` on a Github object.
     """
 
+    def __repr__(self):
+        return self.get__repr__({"login": self._login.value})
+
     @property
     def avatar_url(self):
         """
diff --git a/lib/github/Authorization.py b/lib/github/Authorization.py
index b1ff7314d419bc7ace04f0b92f17c38f7319db59..11b4d9b4d2da67e1e1b34b70ee94d751913d1e96 100644
--- a/lib/github/Authorization.py
+++ b/lib/github/Authorization.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -34,6 +35,9 @@ class Authorization(github.GithubObject.CompletableGithubObject):
     This class represents Authorizations as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"scopes": self._scopes.value})
+
     @property
     def app(self):
         """
diff --git a/lib/github/AuthorizationApplication.py b/lib/github/AuthorizationApplication.py
index a072798b44e790138498fc1b056058786c06cfb6..63445fbf20063989770032cb634f34b0c6ef765b 100644
--- a/lib/github/AuthorizationApplication.py
+++ b/lib/github/AuthorizationApplication.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class AuthorizationApplication(github.GithubObject.CompletableGithubObject):
     This class represents AuthorizationApplications as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"name": self._name.value})
+
     @property
     def name(self):
         """
diff --git a/lib/github/Branch.py b/lib/github/Branch.py
index 88ffb7b32811333c5f41a6e05cbfd3849b412b9d..effc5bf69901ddaab08d66c3fe80b28b71e1bcba 100644
--- a/lib/github/Branch.py
+++ b/lib/github/Branch.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,9 @@ class Branch(github.GithubObject.NonCompletableGithubObject):
     This class represents Branchs. The reference can be found here http://developer.github.com/v3/repos/#list-branches
     """
 
+    def __repr__(self):
+        return self.get__repr__({"name": self._name.value})
+
     @property
     def commit(self):
         """
@@ -49,6 +53,27 @@ class Branch(github.GithubObject.NonCompletableGithubObject):
         """
         return self._name.value
 
+    @property
+    def protected(self):
+        """
+        :type: bool
+        """
+        return self._protected.value
+
+    @property
+    def enforcement_level(self):
+        """
+        :type: string
+        """
+        return self._enforcement_level.value
+
+    @property
+    def contexts(self):
+        """
+        :type: list of strings
+        """
+        return self._contexts.value
+
     def _initAttributes(self):
         self._commit = github.GithubObject.NotSet
         self._name = github.GithubObject.NotSet
@@ -58,3 +83,7 @@ class Branch(github.GithubObject.NonCompletableGithubObject):
             self._commit = self._makeClassAttribute(github.Commit.Commit, attributes["commit"])
         if "name" in attributes:  # pragma no branch
             self._name = self._makeStringAttribute(attributes["name"])
+        if "protection" in attributes:
+            self._protected = self._makeBoolAttribute(attributes["protection"]["enabled"])
+            self._enforcement_level = self._makeStringAttribute(attributes["protection"]["required_status_checks"]["enforcement_level"])
+            self._contexts = self._makeListOfStringsAttribute(attributes["protection"]["required_status_checks"]["contexts"])
diff --git a/lib/github/Commit.py b/lib/github/Commit.py
index a53b5e4478e302f07dba9adf7b6bc381487749ff..b84dfab7053599783fd8efa7de65a670074099a3 100644
--- a/lib/github/Commit.py
+++ b/lib/github/Commit.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -31,6 +32,7 @@ import github.PaginatedList
 import github.GitCommit
 import github.NamedUser
 import github.CommitStatus
+import github.CommitCombinedStatus
 import github.File
 import github.CommitStats
 import github.CommitComment
@@ -41,6 +43,9 @@ class Commit(github.GithubObject.CompletableGithubObject):
     This class represents Commits. The reference can be found here http://developer.github.com/v3/git/commits/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value})
+
     @property
     def author(self):
         """
@@ -203,6 +208,17 @@ class Commit(github.GithubObject.CompletableGithubObject):
             None
         )
 
+    def get_combined_status(self):
+        """
+        :calls: `GET /repos/:owner/:repo/commits/:ref/status/ <http://developer.github.com/v3/repos/statuses>`_
+        :rtype: :class:`github.CommitCombinedStatus.CommitCombinedStatus`
+        """
+        headers, data = self._requester.requestJsonAndCheck(
+            "GET",
+            self.url + "/status"
+        )
+        return github.CommitCombinedStatus.CommitCombinedStatus(self._requester, headers, data, completed=True)
+
     @property
     def _identity(self):
         return self.sha
diff --git a/lib/github/CommitCombinedStatus.py b/lib/github/CommitCombinedStatus.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2ef2967a4e5df1f466548c45b06cabda7d400c8
--- /dev/null
+++ b/lib/github/CommitCombinedStatus.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+
+# ########################## Copyrights and license ############################
+#                                                                              #
+# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net>                 #
+# Copyright 2012 Zearin <zearin@gonk.net>                                      #
+# Copyright 2013 AKFish <akfish@gmail.com>                                     #
+# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
+#                                                                              #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
+#                                                                              #
+# PyGithub is free software: you can redistribute it and/or modify it under    #
+# the terms of the GNU Lesser General Public License as published by the Free  #
+# Software Foundation, either version 3 of the License, or (at your option)    #
+# any later version.                                                           #
+#                                                                              #
+# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY  #
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS    #
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
+# details.                                                                     #
+#                                                                              #
+# You should have received a copy of the GNU Lesser General Public License     #
+# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.             #
+#                                                                              #
+# ##############################################################################
+
+import github.GithubObject
+
+import github.CommitStatus
+import github.Repository
+
+
+class CommitCombinedStatus(github.GithubObject.NonCompletableGithubObject):
+    """
+    This class represents CommitCombinedStatus as returned for example by https://developer.github.com/v3/repos/statuses/
+    """
+
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value, "state": self._state.value})
+
+    @property
+    def state(self):
+        """
+        :type: string
+        """
+        return self._state.value
+
+    @property
+    def sha(self):
+        """
+        :type: string
+        """
+        return self._sha.value
+
+    @property
+    def total_count(self):
+        """
+        :type: integer
+        """
+        return self._total_count.value
+
+    @property
+    def commit_url(self):
+        """
+        :type: string
+        """
+        return self._commit_url.value
+
+    @property
+    def url(self):
+        """
+        :type: string
+        """
+        return self._url.value
+
+    @property
+    def repository(self):
+        """
+        :type: :class:`github.Repository.Repository`
+        """
+        return self._repository.value
+
+    @property
+    def statuses(self):
+        """
+        :type: list of :class:`CommitStatus`
+        """
+        return self._statuses.value
+
+    def _initAttributes(self):
+        self._state = github.GithubObject.NotSet
+        self._sha = github.GithubObject.NotSet
+        self._total_count = github.GithubObject.NotSet
+        self._commit_url = github.GithubObject.NotSet
+        self._url = github.GithubObject.NotSet
+        self._repository = github.GithubObject.NotSet
+        self._statuses = github.GithubObject.NotSet
+
+    def _useAttributes(self, attributes):
+        if "state" in attributes:  # pragma no branch
+            self._state = self._makeStringAttribute(attributes["state"])
+        if "sha" in attributes:  # pragma no branch
+            self._sha = self._makeStringAttribute(attributes["sha"])
+        if "total_count" in attributes:  # pragma no branch
+            self._total_count = self._makeIntAttribute(attributes["total_count"])
+        if "commit_url" in attributes:  # pragma no branch
+            self._commit_url = self._makeStringAttribute(attributes["commit_url"])
+        if "url" in attributes:  # pragma no branch
+            self._url = self._makeStringAttribute(attributes["url"])
+        if "repository" in attributes:  # pragma no branch
+            self._repository = self._makeClassAttribute(github.Repository.Repository, attributes["repository"])
+        if "statuses" in attributes:  # pragma no branch
+            self._statuses = self._makeListOfClassesAttribute(github.CommitStatus.CommitStatus, attributes["statuses"])
diff --git a/lib/github/CommitComment.py b/lib/github/CommitComment.py
index b5f4577d8fa4870e8b3971344558c4d7b041256f..c338100efe9637b47c5bc4a2761b71a8544c4082 100644
--- a/lib/github/CommitComment.py
+++ b/lib/github/CommitComment.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -34,6 +35,9 @@ class CommitComment(github.GithubObject.CompletableGithubObject):
     This class represents CommitComments as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "user": self.user})
+
     @property
     def body(self):
         """
diff --git a/lib/github/CommitStats.py b/lib/github/CommitStats.py
index af6ecba36d558786bffe98b0997305457e3f56f8..d8a2a3cefd16227eac766ef951b4810fb94731e1 100644
--- a/lib/github/CommitStats.py
+++ b/lib/github/CommitStats.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/CommitStatus.py b/lib/github/CommitStatus.py
index d6074ea8546e36f11cc8d7059e9577be904e5991..ff43d695f5106e39bf07ee16799249c226f96b4e 100644
--- a/lib/github/CommitStatus.py
+++ b/lib/github/CommitStatus.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -34,6 +35,13 @@ class CommitStatus(github.GithubObject.NonCompletableGithubObject):
     This class represents CommitStatuss as returned for example by https://developer.github.com/v3/repos/statuses/
     """
 
+    def __repr__(self):
+        return self.get__repr__({
+            "id": self._id.value,
+            "state": self._state.value,
+            "context": self._context.value
+        })
+
     @property
     def created_at(self):
         """
@@ -69,6 +77,13 @@ class CommitStatus(github.GithubObject.NonCompletableGithubObject):
         """
         return self._state.value
 
+    @property
+    def context(self):
+        """
+        :type: string
+        """
+        return self._context.value
+
     @property
     def target_url(self):
         """
@@ -96,6 +111,7 @@ class CommitStatus(github.GithubObject.NonCompletableGithubObject):
         self._description = github.GithubObject.NotSet
         self._id = github.GithubObject.NotSet
         self._state = github.GithubObject.NotSet
+        self._context = github.GithubObject.NotSet
         self._target_url = github.GithubObject.NotSet
         self._updated_at = github.GithubObject.NotSet
         self._url = github.GithubObject.NotSet
@@ -111,6 +127,8 @@ class CommitStatus(github.GithubObject.NonCompletableGithubObject):
             self._id = self._makeIntAttribute(attributes["id"])
         if "state" in attributes:  # pragma no branch
             self._state = self._makeStringAttribute(attributes["state"])
+        if "context" in attributes:  # pragma no branch
+            self._context = self._makeStringAttribute(attributes["context"])
         if "target_url" in attributes:  # pragma no branch
             self._target_url = self._makeStringAttribute(attributes["target_url"])
         if "updated_at" in attributes:  # pragma no branch
diff --git a/lib/github/Comparison.py b/lib/github/Comparison.py
index 9111a25baf50279cdc6b1ce1b3f356c57469e934..bee42c73b34e4a664de003b3d842c12de08bc624 100644
--- a/lib/github/Comparison.py
+++ b/lib/github/Comparison.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/Consts.py b/lib/github/Consts.py
index 62a10645039dcacbf286436f7687b16a6de0b764..2c1a169a139a3e9b55a0ccbbb7d38d25cfddd5e3 100644
--- a/lib/github/Consts.py
+++ b/lib/github/Consts.py
@@ -5,7 +5,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -41,4 +42,4 @@ REQ_IF_MODIFIED_SINCE = "If-Modified-Since"
 # (Lower Case)                                                                 #
 # ##############################################################################
 RES_ETAG = "etag"
-RES_LAST_MODIFED = "last-modified"
+RES_LAST_MODIFIED = "last-modified"
diff --git a/lib/github/ContentFile.py b/lib/github/ContentFile.py
index e2b0993bb1b477cec7ccdbafc4f33edf92c7b58f..2f69946be7605d2908d6ecec1d9ea572810c08ca 100644
--- a/lib/github/ContentFile.py
+++ b/lib/github/ContentFile.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -39,6 +40,9 @@ class ContentFile(github.GithubObject.CompletableGithubObject):
     This class represents ContentFiles as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"path": self._path.value})
+
     @property
     def content(self):
         """
diff --git a/lib/github/Download.py b/lib/github/Download.py
index af73f73ee2a1cc2dc78e9e7ec76b588e91880cea..49acc4897328d97f9508ab2a1abde9a8d9c5b65d 100644
--- a/lib/github/Download.py
+++ b/lib/github/Download.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class Download(github.GithubObject.CompletableGithubObject):
     This class represents Downloads as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value})
+
     @property
     def accesskeyid(self):
         """
diff --git a/lib/github/Event.py b/lib/github/Event.py
index 87af16236a049f89f806819b46bb8a4cd21a9a27..a769a230e2a2fd8f68cb2e904db31cb72e4d58a5 100644
--- a/lib/github/Event.py
+++ b/lib/github/Event.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -37,6 +38,9 @@ class Event(github.GithubObject.NonCompletableGithubObject):
     This class represents Events. The reference can be found here http://developer.github.com/v3/activity/events/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "type": self._type.value})
+
     @property
     def actor(self):
         """
diff --git a/lib/github/File.py b/lib/github/File.py
index d411b3b54752101f2f0beaf473ea960c0b587008..d03efd3ba9a4f5c1fb5b047d3bd46f73fb1a88d8 100644
--- a/lib/github/File.py
+++ b/lib/github/File.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class File(github.GithubObject.NonCompletableGithubObject):
     This class represents Files as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value, "filename": self._filename.value})
+
     @property
     def additions(self):
         """
@@ -81,6 +85,13 @@ class File(github.GithubObject.NonCompletableGithubObject):
         """
         return self._patch.value
 
+    @property
+    def previous_filename(self):
+        """
+        :type: string
+        """
+        return self._previous_filename.value
+
     @property
     def raw_url(self):
         """
@@ -110,6 +121,7 @@ class File(github.GithubObject.NonCompletableGithubObject):
         self._deletions = github.GithubObject.NotSet
         self._filename = github.GithubObject.NotSet
         self._patch = github.GithubObject.NotSet
+        self._previous_filename = github.GithubObject.NotSet
         self._raw_url = github.GithubObject.NotSet
         self._sha = github.GithubObject.NotSet
         self._status = github.GithubObject.NotSet
@@ -129,6 +141,8 @@ class File(github.GithubObject.NonCompletableGithubObject):
             self._filename = self._makeStringAttribute(attributes["filename"])
         if "patch" in attributes:  # pragma no branch
             self._patch = self._makeStringAttribute(attributes["patch"])
+        if "previous_filename" in attributes: # pragma no branch
+            self._previous_filename = self._makeStringAttribute(attributes["previous_filename"])
         if "raw_url" in attributes:  # pragma no branch
             self._raw_url = self._makeStringAttribute(attributes["raw_url"])
         if "sha" in attributes:  # pragma no branch
diff --git a/lib/github/Gist.py b/lib/github/Gist.py
index f93b79898061f973452090f6265d0c2701539745..8d75e1e10e5c8b8a59a725d6e4f84cc4a969d498 100644
--- a/lib/github/Gist.py
+++ b/lib/github/Gist.py
@@ -8,7 +8,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -39,6 +40,9 @@ class Gist(github.GithubObject.CompletableGithubObject):
     This class represents Gists as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value})
+
     @property
     def comments(self):
         """
diff --git a/lib/github/GistComment.py b/lib/github/GistComment.py
index 2e07b7a39b8fe2c6b7a60400fe93c546a6cc7756..be1d08f7432d1f29711887407dd4befba0ac1fc4 100644
--- a/lib/github/GistComment.py
+++ b/lib/github/GistComment.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -34,6 +35,9 @@ class GistComment(github.GithubObject.CompletableGithubObject):
     This class represents GistComments as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "user": self._user.value})
+
     @property
     def body(self):
         """
diff --git a/lib/github/GistFile.py b/lib/github/GistFile.py
index a90df90f0b9dcb1e7837f437c2daa089161c6083..5778acfcef2c1685d1b5ff651868b3e008bcd5ac 100644
--- a/lib/github/GistFile.py
+++ b/lib/github/GistFile.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class GistFile(github.GithubObject.NonCompletableGithubObject):
     This class represents GistFiles as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"filename": self._filename.value})
+
     @property
     def content(self):
         """
diff --git a/lib/github/GistHistoryState.py b/lib/github/GistHistoryState.py
index 3fcaf48ab36e45d6a1369e28ecc1536b18e447ca..137462a67913e33f0d5126a1755b75e93a399c2c 100644
--- a/lib/github/GistHistoryState.py
+++ b/lib/github/GistHistoryState.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/GitAuthor.py b/lib/github/GitAuthor.py
index 552f19ef30d3d14fa6aef2c677fdc97f24613801..324dcc14fba2eb0d33872a479448c779ce8d2746 100644
--- a/lib/github/GitAuthor.py
+++ b/lib/github/GitAuthor.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class GitAuthor(github.GithubObject.NonCompletableGithubObject):
     This class represents GitAuthors as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"name": self._name.value})
+
     @property
     def date(self):
         """
diff --git a/lib/github/GitBlob.py b/lib/github/GitBlob.py
index 9c59b94bfdc6a2c3aa882d18381142a88f1d8626..3de1c6fb799c1ed4c50d8a70ebc3a0a531235e04 100644
--- a/lib/github/GitBlob.py
+++ b/lib/github/GitBlob.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class GitBlob(github.GithubObject.CompletableGithubObject):
     This class represents GitBlobs as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value})
+
     @property
     def content(self):
         """
diff --git a/lib/github/GitCommit.py b/lib/github/GitCommit.py
index 3467eab5a5a166703b8c50387638ec8f62980293..9d43d51b64b06bdd0878a00e50f49e9871a46f0a 100644
--- a/lib/github/GitCommit.py
+++ b/lib/github/GitCommit.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,9 @@ class GitCommit(github.GithubObject.CompletableGithubObject):
     This class represents GitCommits as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value})
+
     @property
     def author(self):
         """
diff --git a/lib/github/GitObject.py b/lib/github/GitObject.py
index 1a169952bc23e9b2b53fe7f7d8b23646fe25bed2..0850061be1bebdb09a44bb1de29b687d400ade06 100644
--- a/lib/github/GitObject.py
+++ b/lib/github/GitObject.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class GitObject(github.GithubObject.NonCompletableGithubObject):
     This class represents GitObjects as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value})
+
     @property
     def sha(self):
         """
diff --git a/lib/github/GitRef.py b/lib/github/GitRef.py
index 0938fdb12d10be6464fd8267f1556125efc5fa3e..417982fe71e0122c8d98cf42d73dbc46f09295fb 100644
--- a/lib/github/GitRef.py
+++ b/lib/github/GitRef.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -34,6 +35,9 @@ class GitRef(github.GithubObject.CompletableGithubObject):
     This class represents GitRefs as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"ref": self._ref.value})
+
     @property
     def object(self):
         """
@@ -89,6 +93,22 @@ class GitRef(github.GithubObject.CompletableGithubObject):
         )
         self._useAttributes(data)
 
+    def get_statuses(self):
+        """
+        https://developer.github.com/v3/repos/statuses/#list-statuses-for-a-specific-ref
+        :calls: `GET /repos/:owner/:repo/commits/:ref/statuses`
+        :return:
+        """
+        pass
+
+    def get_status(self):
+        """
+        https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref
+        :calls: `GET /repos/:owner/:repo/commits/:ref/status`
+        :return:
+        """
+        pass
+
     def _initAttributes(self):
         self._object = github.GithubObject.NotSet
         self._ref = github.GithubObject.NotSet
diff --git a/lib/github/GitRelease.py b/lib/github/GitRelease.py
index 9e068020a0695e78a3328f111a00a5239acdc533..2c1ae524cb2147458642d275f6fff7d1fa15c3f4 100644
--- a/lib/github/GitRelease.py
+++ b/lib/github/GitRelease.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2015 Ed Holland <eholland@alertlogic.com>                          #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -30,6 +31,9 @@ class GitRelease(github.GithubObject.CompletableGithubObject):
     This class represents GitRelease as returned for example by https://developer.github.com/v3/repos/releases
     """
 
+    def __repr__(self):
+        return self.get__repr__({"title": self._title.value})
+
     @property
     def body(self):
         """
@@ -78,6 +82,14 @@ class GitRelease(github.GithubObject.CompletableGithubObject):
         self._completeIfNotSet(self._upload_url)
         return self._upload_url.value
 
+    @property
+    def html_url(self):
+        """
+        :type: string
+        """
+        self._completeIfNotSet(self._html_url)
+        return self._html_url.value
+
     def delete_release(self):
         headers, data = self._requester.requestJsonAndCheck(
             "DELETE",
@@ -111,6 +123,7 @@ class GitRelease(github.GithubObject.CompletableGithubObject):
         self._author = github.GithubObject.NotSet
         self._url = github.GithubObject.NotSet
         self._upload_url = github.GithubObject.NotSet
+        self._html_url = github.GithubObject.NotSet
 
     def _useAttributes(self, attributes):
         if "body" in attributes:
@@ -125,3 +138,5 @@ class GitRelease(github.GithubObject.CompletableGithubObject):
             self._url = self._makeStringAttribute(attributes["url"])
         if "upload_url" in attributes:
             self._upload_url = self._makeStringAttribute(attributes["upload_url"])
+        if "html_url" in attributes:
+            self._html_url = self._makeStringAttribute(attributes["html_url"])
diff --git a/lib/github/GitTag.py b/lib/github/GitTag.py
index 554dd2a746d7204ebd930d827cdd42c2451ce6a0..347fd2474cb9a08dee9ee6d7eb80858b4f91db70 100644
--- a/lib/github/GitTag.py
+++ b/lib/github/GitTag.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,9 @@ class GitTag(github.GithubObject.CompletableGithubObject):
     This class represents GitTags as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value, "tag": self._tag.value})
+
     @property
     def message(self):
         """
diff --git a/lib/github/GitTree.py b/lib/github/GitTree.py
index 9c78986dda0c188a87a70264613ea52cc6d0b8d4..8012bb77cf37156ed14b2bc2b8ee4ce5200ce2d7 100644
--- a/lib/github/GitTree.py
+++ b/lib/github/GitTree.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -34,6 +35,9 @@ class GitTree(github.GithubObject.CompletableGithubObject):
     This class represents GitTrees as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value})
+
     @property
     def sha(self):
         """
diff --git a/lib/github/GitTreeElement.py b/lib/github/GitTreeElement.py
index a9b4615de79fe6ee1d711e19bd47b1156e6fc059..907fb58ff007e9384500665aa7d1a6ee0c79c017 100644
--- a/lib/github/GitTreeElement.py
+++ b/lib/github/GitTreeElement.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class GitTreeElement(github.GithubObject.NonCompletableGithubObject):
     This class represents GitTreeElements as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value, "path": self._path.value})
+
     @property
     def mode(self):
         """
diff --git a/lib/github/GithubException.py b/lib/github/GithubException.py
index 594f20131527d4f752e974d117f81af6ac3945e7..3b724f1fa60f477f7fafe1b2cd0d428f98c8e9c9 100644
--- a/lib/github/GithubException.py
+++ b/lib/github/GithubException.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -36,6 +37,7 @@ class GithubException(Exception):
         Exception.__init__(self)
         self.__status = status
         self.__data = data
+        self.args = [status, data]
 
     @property
     def status(self):
diff --git a/lib/github/GithubObject.py b/lib/github/GithubObject.py
index 35e4b5b57daa7035454b6b6072d2a826817c0168..cd8b7d02e0e324badbc1e81b232e55d30be75f14 100644
--- a/lib/github/GithubObject.py
+++ b/lib/github/GithubObject.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -23,12 +24,15 @@
 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>.             #
 #                                                                              #
 # ##############################################################################
-
+import sys
 import datetime
+from operator import itemgetter
 
 import GithubException
 import Consts
 
+atLeastPython3 = sys.hexversion >= 0x03000000
+
 
 class _NotSetType:
     def __repr__(self):
@@ -204,7 +208,26 @@ class GithubObject(object):
         '''
         :type: str
         '''
-        return self._headers.get(Consts.RES_LAST_MODIFED)
+        return self._headers.get(Consts.RES_LAST_MODIFIED)
+
+    def get__repr__(self, params):
+        """
+        Converts the object to a nicely printable string.
+        """
+        def format_params(params):
+            if atLeastPython3:
+                items = params.items()
+            else:
+                items = list(params.items())
+            for k, v in sorted(items, key=itemgetter(0), reverse=True):
+                isText = isinstance(v, (str, unicode))
+                if isText and not atLeastPython3:
+                    v = v.encode('utf-8')
+                yield '{k}="{v}"'.format(k=k, v=v) if isText else '{k}={v}'.format(k=k, v=v)
+        return '{class_name}({params})'.format(
+            class_name=self.__class__.__name__,
+            params=", ".join(list(format_params(params)))
+        )
 
 
 class NonCompletableGithubObject(GithubObject):
diff --git a/lib/github/GitignoreTemplate.py b/lib/github/GitignoreTemplate.py
index 75b96b1bdccc60b8718dcd12690d145559019f8a..55353dc354b9e881e7bf2f5058540ea121df7c37 100644
--- a/lib/github/GitignoreTemplate.py
+++ b/lib/github/GitignoreTemplate.py
@@ -6,7 +6,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -31,6 +32,9 @@ class GitignoreTemplate(github.GithubObject.NonCompletableGithubObject):
     This class represents GitignoreTemplates as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"name": self._name.value})
+
     @property
     def source(self):
         """
diff --git a/lib/github/Hook.py b/lib/github/Hook.py
index b212b646d51a75f2e47276e89bf1bd0c1dbd820a..8e09005924fc9938fc9ae53f6937bf9d6f1960c1 100644
--- a/lib/github/Hook.py
+++ b/lib/github/Hook.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -34,6 +35,9 @@ class Hook(github.GithubObject.CompletableGithubObject):
     This class represents Hooks as returned for example by http://developer.github.com/v3/repos/hooks
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "url": self._url.value})
+
     @property
     def active(self):
         """
diff --git a/lib/github/HookDescription.py b/lib/github/HookDescription.py
index 749aed1540e7974cfa09a85a606c9b144d90fd65..d1058307d65ba0ff8d6dcf4d2c3e4d63508b702e 100644
--- a/lib/github/HookDescription.py
+++ b/lib/github/HookDescription.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class HookDescription(github.GithubObject.NonCompletableGithubObject):
     This class represents HookDescriptions as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"name": self._name.value})
+
     @property
     def events(self):
         """
diff --git a/lib/github/HookResponse.py b/lib/github/HookResponse.py
index e0945735f4b2647e52091c112e4540aadefd1709..bce05e398a440871881c8e67c28b8349a6636191 100644
--- a/lib/github/HookResponse.py
+++ b/lib/github/HookResponse.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class HookResponse(github.GithubObject.NonCompletableGithubObject):
     This class represents HookResponses as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"status": self._status.value})
+
     @property
     def code(self):
         """
diff --git a/lib/github/InputFileContent.py b/lib/github/InputFileContent.py
index d16556f104269061a44a88b64060c266479dc954..a133eb1e3833a6a8cd553c61ce9ace01c58bcbab 100644
--- a/lib/github/InputFileContent.py
+++ b/lib/github/InputFileContent.py
@@ -6,7 +6,8 @@
 # Copyright 2012 Zearin <zearin@gonk.net>                                      #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/InputGitAuthor.py b/lib/github/InputGitAuthor.py
index 4567f4177e6953cb1fa6a2269e6a2a14f59958f4..f7a9ff142be819ee9702a9bf9a51d465ef9596f1 100644
--- a/lib/github/InputGitAuthor.py
+++ b/lib/github/InputGitAuthor.py
@@ -6,7 +6,8 @@
 # Copyright 2012 Zearin <zearin@gonk.net>                                      #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -45,6 +46,9 @@ class InputGitAuthor(object):
         self.__email = email
         self.__date = date
 
+    def __repr__(self):
+        return 'InputGitAuthor(name="{}")'.format(self.__name)
+
     @property
     def _identity(self):
         identity = {
diff --git a/lib/github/InputGitTreeElement.py b/lib/github/InputGitTreeElement.py
index 94685c13505bca7f3e3d74de183c51fc7be40b62..ee55452c62e5452fa94c6fdf807d117ad4d8f732 100644
--- a/lib/github/InputGitTreeElement.py
+++ b/lib/github/InputGitTreeElement.py
@@ -6,7 +6,8 @@
 # Copyright 2012 Zearin <zearin@gonk.net>                                      #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/Installation.py b/lib/github/Installation.py
new file mode 100644
index 0000000000000000000000000000000000000000..67a086b1e4cc793964ca8fb49076ec209d4defcf
--- /dev/null
+++ b/lib/github/Installation.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+# ########################## Copyrights and license ############################
+#                                                                              #
+# Copyright 2017 Jannis Gebauer <ja.geb@me.com>                                #
+#                                                                              #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
+#                                                                              #
+# PyGithub is free software: you can redistribute it and/or modify it under    #
+# the terms of the GNU Lesser General Public License as published by the Free  #
+# Software Foundation, either version 3 of the License, or (at your option)    #
+# any later version.                                                           #
+#                                                                              #
+# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY  #
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS    #
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
+# details.                                                                     #
+#                                                                              #
+# You should have received a copy of the GNU Lesser General Public License     #
+# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.             #
+#                                                                              #
+# ##############################################################################
+
+import github.GithubObject
+import github.PaginatedList
+
+import github.Gist
+import github.Repository
+import github.NamedUser
+import github.Plan
+import github.Organization
+import github.UserKey
+import github.Issue
+import github.Event
+import github.Authorization
+import github.Notification
+
+INTEGRATION_PREVIEW_HEADERS = {"Accept": "application/vnd.github.machine-man-preview+json"}
+
+
+class Installation(github.GithubObject.NonCompletableGithubObject):
+    """
+    This class represents Installations as in https://developer.github.com/v3/integrations/installations
+    """
+
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value})
+
+    @property
+    def id(self):
+        return self._id
+
+    def get_repos(self):
+        """
+        :calls: `GET /installation/repositories <https://developer.github.com/v3/integrations/installations/#list-repositories>`_
+        :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
+        """
+        url_parameters = dict()
+
+        return github.PaginatedList.PaginatedList(
+            contentClass=github.Repository.Repository,
+            requester=self._requester,
+            firstUrl="/installation/repositories",
+            firstParams=url_parameters,
+            headers=INTEGRATION_PREVIEW_HEADERS,
+            list_item='repositories'
+        )
+
+    def _initAttributes(self):
+        self._id = github.GithubObject.NotSet
+
+    def _useAttributes(self, attributes):
+        if "id" in attributes:  # pragma no branch
+            self._id = self._makeIntAttribute(attributes["id"])
\ No newline at end of file
diff --git a/lib/github/InstallationAuthorization.py b/lib/github/InstallationAuthorization.py
new file mode 100644
index 0000000000000000000000000000000000000000..5dfaf8cbffcd1ba234d7793ddfa16c02824ac03b
--- /dev/null
+++ b/lib/github/InstallationAuthorization.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+# ########################## Copyrights and license ############################
+#                                                                              #
+# Copyright 2016 Jannis gebauier <ja.geb@me.com>                               #
+#                                                                              #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
+#                                                                              #
+# PyGithub is free software: you can redistribute it and/or modify it under    #
+# the terms of the GNU Lesser General Public License as published by the Free  #
+# Software Foundation, either version 3 of the License, or (at your option)    #
+# any later version.                                                           #
+#                                                                              #
+# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY  #
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS    #
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
+# details.                                                                     #
+#                                                                              #
+# You should have received a copy of the GNU Lesser General Public License     #
+# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.             #
+#                                                                              #
+# ##############################################################################
+
+import datetime
+
+import github.GithubObject
+import github.PaginatedList
+import github.NamedUser
+
+
+class InstallationAuthorization(github.GithubObject.NonCompletableGithubObject):
+    """
+    InstallationAuthorization as obtained from a GitHub integration.
+    """
+
+    def __repr__(self):
+        return self.get__repr__({"expires_at": self._expires_at.value})
+
+    @property
+    def token(self):
+        """
+        :type: string
+        """
+        return self._token.value
+
+    @property
+    def expires_at(self):
+        """
+        :type: datetime
+        """
+        return self._expires_at.value
+
+    @property
+    def on_behalf_of(self):
+        """
+        :type: :class:`github.NamedUser.NamedUser`
+        """
+        return self._on_behalf_of.value
+
+    def _initAttributes(self):
+        self._token = github.GithubObject.NotSet
+        self._expires_at = github.GithubObject.NotSet
+        self._on_behalf_of = github.GithubObject.NotSet
+
+    def _useAttributes(self, attributes):
+        if "token" in attributes:  # pragma no branch
+            self._token = self._makeStringAttribute(attributes["token"])
+        if "expires_at" in attributes:  # pragma no branch
+            self._expires_at = self._makeDatetimeAttribute(attributes["expires_at"])
+        if "on_behalf_of" in attributes:  # pragma no branch
+            self._on_behalf_of = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["on_behalf_of"])
\ No newline at end of file
diff --git a/lib/github/Issue.py b/lib/github/Issue.py
index f88e9519b89a2a29af30634d21115a924e95520f..9ef4df28e52f45f82aaaa738fbfe0bf518e5eb17 100644
--- a/lib/github/Issue.py
+++ b/lib/github/Issue.py
@@ -10,7 +10,8 @@
 # Copyright 2013 Stuart Glaser <stuglaser@gmail.com>                           #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -28,6 +29,7 @@
 # ##############################################################################
 
 import urllib
+import datetime
 import github.GithubObject
 import github.PaginatedList
 
@@ -45,6 +47,9 @@ class Issue(github.GithubObject.CompletableGithubObject):
     This class represents Issues as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"number": self._number.value, "title": self._title.value})
+
     @property
     def assignee(self):
         """
@@ -53,6 +58,14 @@ class Issue(github.GithubObject.CompletableGithubObject):
         self._completeIfNotSet(self._assignee)
         return self._assignee.value
 
+    @property
+    def assignees(self):
+        """
+        :type: list of :class:`github.NamedUser.NamedUser`
+        """
+        self._completeIfNotSet(self._assignees)
+        return self._assignees.value
+
     @property
     def body(self):
         """
@@ -217,6 +230,21 @@ class Issue(github.GithubObject.CompletableGithubObject):
         self._completeIfNotSet(self._user)
         return self._user.value
 
+    def add_to_assignees(self, *assignees):
+        """
+        :calls: `POST /repos/:owner/:repo/issues/:number/assignees <https://developer.github.com/v3/issues/assignees>`_
+        :param assignee: :class:`github.NamedUser.NamedUser` or string
+        :rtype: None
+        """
+        assert all(isinstance(element, (github.NamedUser.NamedUser, str, unicode)) for element in assignees), assignees
+        post_parameters = {"assignees": [assignee.login if isinstance(assignee, github.NamedUser.NamedUser) else assignee for assignee in assignees]}
+        headers, data = self._requester.requestJsonAndCheck(
+            "POST",
+            self.url + "/assignees",
+            input=post_parameters
+        )
+        self._useAttributes(data)
+
     def add_to_labels(self, *labels):
         """
         :calls: `POST /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_
@@ -258,12 +286,13 @@ class Issue(github.GithubObject.CompletableGithubObject):
             self.url + "/labels"
         )
 
-    def edit(self, title=github.GithubObject.NotSet, body=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, state=github.GithubObject.NotSet, milestone=github.GithubObject.NotSet, labels=github.GithubObject.NotSet):
+    def edit(self, title=github.GithubObject.NotSet, body=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, state=github.GithubObject.NotSet, milestone=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, assignees=github.GithubObject.NotSet):
         """
         :calls: `PATCH /repos/:owner/:repo/issues/:number <http://developer.github.com/v3/issues>`_
         :param title: string
         :param body: string
         :param assignee: string or :class:`github.NamedUser.NamedUser` or None
+        :param assignees: list (of string or :class:`github.NamedUser.NamedUser`)
         :param state: string
         :param milestone: :class:`github.Milestone.Milestone` or None
         :param labels: list of string
@@ -272,6 +301,7 @@ class Issue(github.GithubObject.CompletableGithubObject):
         assert title is github.GithubObject.NotSet or isinstance(title, (str, unicode)), title
         assert body is github.GithubObject.NotSet or isinstance(body, (str, unicode)), body
         assert assignee is github.GithubObject.NotSet or assignee is None or isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
+        assert assignees is github.GithubObject.NotSet or all(isinstance(element, github.NamedUser.NamedUser) or isinstance(element, (str, unicode)) for element in assignees), assignees
         assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
         assert milestone is github.GithubObject.NotSet or milestone is None or isinstance(milestone, github.Milestone.Milestone), milestone
         assert labels is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in labels), labels
@@ -285,6 +315,8 @@ class Issue(github.GithubObject.CompletableGithubObject):
                 post_parameters["assignee"] = assignee
             else:
                 post_parameters["assignee"] = assignee._identity if assignee else ''
+        if assignees is not github.GithubObject.NotSet:
+            post_parameters["assignees"] = [element._identity if isinstance(element, github.NamedUser.NamedUser) else element for element in assignees]
         if state is not github.GithubObject.NotSet:
             post_parameters["state"] = state
         if milestone is not github.GithubObject.NotSet:
@@ -311,16 +343,21 @@ class Issue(github.GithubObject.CompletableGithubObject):
         )
         return github.IssueComment.IssueComment(self._requester, headers, data, completed=True)
 
-    def get_comments(self):
+    def get_comments(self, since=github.GithubObject.NotSet):
         """
         :calls: `GET /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
+        :param since: datetime.datetime format YYYY-MM-DDTHH:MM:SSZ
         :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
         """
+        assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
+        url_parameters = dict()
+        if since is not github.GithubObject.NotSet:
+            url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
         return github.PaginatedList.PaginatedList(
             github.IssueComment.IssueComment,
             self._requester,
             self.url + "/comments",
-            None
+            url_parameters
         )
 
     def get_events(self):
@@ -347,6 +384,21 @@ class Issue(github.GithubObject.CompletableGithubObject):
             None
         )
 
+    def remove_from_assignees(self, *assignees):
+        """
+        :calls: `DELETE /repos/:owner/:repo/issues/:number/assignees <https://developer.github.com/v3/issues/assignees>`_
+        :param assignee: :class:`github.NamedUser.NamedUser` or string
+        :rtype: None
+        """
+        assert all(isinstance(element, (github.NamedUser.NamedUser, str, unicode)) for element in assignees), assignees
+        post_parameters = {"assignees": [assignee.login if isinstance(assignee, github.NamedUser.NamedUser) else assignee for assignee in assignees]}
+        headers, data = self._requester.requestJsonAndCheck(
+            "DELETE",
+            self.url + "/assignees",
+            input=post_parameters
+        )
+        self._useAttributes(data)
+
     def remove_from_labels(self, label):
         """
         :calls: `DELETE /repos/:owner/:repo/issues/:number/labels/:name <http://developer.github.com/v3/issues/labels>`_
@@ -383,6 +435,7 @@ class Issue(github.GithubObject.CompletableGithubObject):
 
     def _initAttributes(self):
         self._assignee = github.GithubObject.NotSet
+        self._assignees = github.GithubObject.NotSet
         self._body = github.GithubObject.NotSet
         self._closed_at = github.GithubObject.NotSet
         self._closed_by = github.GithubObject.NotSet
@@ -407,6 +460,13 @@ class Issue(github.GithubObject.CompletableGithubObject):
     def _useAttributes(self, attributes):
         if "assignee" in attributes:  # pragma no branch
             self._assignee = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["assignee"])
+        if "assignees" in attributes:  # pragma no branch
+            self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, attributes["assignees"])
+        elif "assignee" in attributes:
+            if attributes["assignee"] is not None:
+                self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, [attributes["assignee"]])
+            else:
+                self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, [])
         if "body" in attributes:  # pragma no branch
             self._body = self._makeStringAttribute(attributes["body"])
         if "closed_at" in attributes:  # pragma no branch
diff --git a/lib/github/IssueComment.py b/lib/github/IssueComment.py
index 0b0fc5a905a2b1c16b70dc8ba3330ba15d166ca7..8593a8cd5b32cefe18002a9f33a4277438360f6a 100644
--- a/lib/github/IssueComment.py
+++ b/lib/github/IssueComment.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Michael Stead <michael.stead@gmail.com>                       #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,9 @@ class IssueComment(github.GithubObject.CompletableGithubObject):
     This class represents IssueComments as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "user": self._user.value})
+
     @property
     def body(self):
         """
diff --git a/lib/github/IssueEvent.py b/lib/github/IssueEvent.py
index ea372978d9e90d3c05b33cc158ab933fa63a7af4..d55cd35346367e4251f2ec17da38a9a3c20d109c 100644
--- a/lib/github/IssueEvent.py
+++ b/lib/github/IssueEvent.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,9 @@ class IssueEvent(github.GithubObject.CompletableGithubObject):
     This class represents IssueEvents as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value})
+    
     @property
     def actor(self):
         """
diff --git a/lib/github/IssuePullRequest.py b/lib/github/IssuePullRequest.py
index 6e5eec444f30b3dda1431309c9c1c1a997931993..18243d11db2ac6e516b2f26c3e1134abc066d16d 100644
--- a/lib/github/IssuePullRequest.py
+++ b/lib/github/IssuePullRequest.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/Label.py b/lib/github/Label.py
index 6fd413049e15bd8d61601a1690d0712e46fe6b54..56e6d083a9686bb825484711dbd4f966655f7db1 100644
--- a/lib/github/Label.py
+++ b/lib/github/Label.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,9 @@ class Label(github.GithubObject.CompletableGithubObject):
     This class represents Labels. The reference can be found here http://developer.github.com/v3/issues/labels/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"name": self._name.value})
+
     @property
     def color(self):
         """
diff --git a/lib/github/Legacy.py b/lib/github/Legacy.py
index 0cd5bc6c832f348a3d80257bea8d4f50f798a40c..7c8a7995a8fc46f817bd48b0331068afa315c2ae 100644
--- a/lib/github/Legacy.py
+++ b/lib/github/Legacy.py
@@ -8,7 +8,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/MainClass.py b/lib/github/MainClass.py
index f55be934c44d47dfdba0bb68a661f88b555510ff..2184547e542f93d3ea355e9342a8b903779e7291 100644
--- a/lib/github/MainClass.py
+++ b/lib/github/MainClass.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Peter Golm <golm.peter@gmail.com>                             #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -27,14 +28,19 @@
 
 import urllib
 import pickle
+import time
+import sys
+from httplib import HTTPSConnection
+import jwt
 
-from Requester import Requester
+from Requester import Requester, json
 import AuthenticatedUser
 import NamedUser
 import Organization
 import Gist
 import github.PaginatedList
 import Repository
+import Installation
 import Legacy
 import github.GithubObject
 import HookDescription
@@ -42,7 +48,10 @@ import GitignoreTemplate
 import Status
 import StatusMessage
 import RateLimit
+import InstallationAuthorization
+import GithubException
 
+atLeastPython3 = sys.hexversion >= 0x03000000
 
 DEFAULT_BASE_URL = "https://api.github.com"
 DEFAULT_TIMEOUT = 10
@@ -589,3 +598,94 @@ class Github(object):
             cnx="status"
         )
         return [StatusMessage.StatusMessage(self.__requester, headers, attributes, completed=True) for attributes in data]
+
+    def get_installation(self, id):
+        """
+
+        :param id:
+        :return:
+        """
+        return Installation.Installation(self.__requester, headers={}, attributes={"id": id}, completed=True)
+
+
+class GithubIntegration(object):
+    """
+    Main class to obtain tokens for a GitHub integration.
+    """
+
+    def __init__(self, integration_id, private_key):
+        """
+        :param integration_id: int
+        :param private_key: string
+        """
+        self.integration_id = integration_id
+        self.private_key = private_key
+
+    def create_jwt(self):
+        """
+        Creates a signed JWT, valid for 60 seconds.
+        :return:
+        """
+        now = int(time.time())
+        payload = {
+            "iat": now,
+            "exp": now + 60,
+            "iss": self.integration_id
+        }
+        return jwt.encode(
+            payload,
+            key=self.private_key,
+            algorithm="RS256"
+        )
+
+    def get_access_token(self, installation_id, user_id=None):
+        """
+        Get an access token for the given installation id.
+        POSTs https://api.github.com/installations/<installation_id>/access_tokens
+        :param user_id: int
+        :param installation_id: int
+        :return: :class:`github.InstallationAuthorization.InstallationAuthorization`
+        """
+        body = None
+        if user_id:
+            body = json.dumps({"user_id": user_id})
+        conn = HTTPSConnection("api.github.com")
+        conn.request(
+            method="POST",
+            url="/installations/{}/access_tokens".format(installation_id),
+            headers={
+                "Authorization": "Bearer {}".format(self.create_jwt()),
+                "Accept": "application/vnd.github.machine-man-preview+json",
+                "User-Agent": "PyGithub/Python"
+            },
+            body=body
+        )
+        response = conn.getresponse()
+        response_text = response.read()
+
+        if atLeastPython3:
+            response_text = response_text.decode('utf-8')
+
+        conn.close()
+        if response.status == 201:
+            data = json.loads(response_text)
+            return InstallationAuthorization.InstallationAuthorization(
+                requester=None,  # not required, this is a NonCompletableGithubObject
+                headers={},  # not required, this is a NonCompletableGithubObject
+                attributes=data,
+                completed=True
+            )
+        elif response.status == 403:
+            raise GithubException.BadCredentialsException(
+                status=response.status,
+                data=response_text
+            )
+        elif response.status == 404:
+            raise GithubException.UnknownObjectException(
+                status=response.status,
+                data=response_text
+            )
+        raise GithubException.GithubException(
+            status=response.status,
+            data=response_text
+        )
\ No newline at end of file
diff --git a/lib/github/Milestone.py b/lib/github/Milestone.py
index 7875ebccd8d462746cead792f1a95f057fa69a31..ebe59df14cbad75bb38c430ba5ae9f0a312040ff 100644
--- a/lib/github/Milestone.py
+++ b/lib/github/Milestone.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -39,6 +40,9 @@ class Milestone(github.GithubObject.CompletableGithubObject):
     This class represents Milestones. The reference can be found here http://developer.github.com/v3/issues/milestones/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"number": self._number.value})
+
     @property
     def closed_issues(self):
         """
diff --git a/lib/github/NamedUser.py b/lib/github/NamedUser.py
index 1b417546580ab02a7a3297850c4a77794842f319..2549e67500638b0b20b962cd557fa69415eb9708 100644
--- a/lib/github/NamedUser.py
+++ b/lib/github/NamedUser.py
@@ -8,7 +8,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -41,6 +42,9 @@ class NamedUser(github.GithubObject.CompletableGithubObject):
     This class represents NamedUsers as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"login": self._login.value})
+
     @property
     def avatar_url(self):
         """
diff --git a/lib/github/Notification.py b/lib/github/Notification.py
index 84533fbea14a0851d20b23776b1b7db936fd37bf..b4b0205a9f9241efb246df7d4fefaad344c1d626 100644
--- a/lib/github/Notification.py
+++ b/lib/github/Notification.py
@@ -7,7 +7,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,9 @@ class Notification(github.GithubObject.CompletableGithubObject):
     This class represents Notifications. The reference can be found here http://developer.github.com/v3/activity/notifications/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "subject": self._subject.value})
+
     @property
     def id(self):
         """
diff --git a/lib/github/NotificationSubject.py b/lib/github/NotificationSubject.py
index 62744a311b25e5ec9296bb9b3c4208601f1ef12c..c4289424efe55d2868acdb53afec0f3386fa39ec 100644
--- a/lib/github/NotificationSubject.py
+++ b/lib/github/NotificationSubject.py
@@ -5,7 +5,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -30,6 +31,9 @@ class NotificationSubject(github.GithubObject.NonCompletableGithubObject):
     This class represents Subjects of Notifications as returned for example by http://developer.github.com/v3/activity/notifications/#list-your-notifications
     """
 
+    def __repr__(self):
+        return self.get__repr__({"title": self._title.value})
+
     @property
     def title(self):
         """
diff --git a/lib/github/Organization.py b/lib/github/Organization.py
index e043cac17778b65374c176f9508b9f76412b494c..50e796f9ca846a83d8b650f458d803e912288266 100644
--- a/lib/github/Organization.py
+++ b/lib/github/Organization.py
@@ -9,7 +9,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -43,6 +44,9 @@ class Organization(github.GithubObject.CompletableGithubObject):
     This class represents Organizations. The reference can be found here http://developer.github.com/v3/orgs/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "name": self._name.value})
+
     @property
     def avatar_url(self):
         """
@@ -522,7 +526,7 @@ class Organization(github.GithubObject.CompletableGithubObject):
     def get_repos(self, type=github.GithubObject.NotSet):
         """
         :calls: `GET /orgs/:org/repos <http://developer.github.com/v3/repos>`_
-        :param type: string
+        :param type: string ('all', 'public', 'private', 'forks', 'sources', 'member')
         :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
         """
         assert type is github.GithubObject.NotSet or isinstance(type, (str, unicode)), type
@@ -572,6 +576,11 @@ class Organization(github.GithubObject.CompletableGithubObject):
             "GET",
             self.url + "/members/" + member._identity
         )
+        if status == 302:
+            status, headers, data = self._requester.requestJson(
+                "GET",
+                headers['location']
+            )
         return status == 204
 
     def has_in_public_members(self, public_member):
diff --git a/lib/github/PaginatedList.py b/lib/github/PaginatedList.py
index 12e1acc7e15f89a4b9f92dc9136e6822331053c7..81e21220a417455e110738bda62b120c236a9c03 100644
--- a/lib/github/PaginatedList.py
+++ b/lib/github/PaginatedList.py
@@ -9,7 +9,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 davidbrai <davidbrai@gmail.com>                               #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -106,7 +107,7 @@ class PaginatedList(PaginatedListBase):
         some_other_repos = user.get_repos().get_page(3)
     """
 
-    def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None):
+    def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None, list_item="items"):
         PaginatedListBase.__init__(self)
         self.__requester = requester
         self.__contentClass = contentClass
@@ -115,6 +116,7 @@ class PaginatedList(PaginatedListBase):
         self.__nextUrl = firstUrl
         self.__nextParams = firstParams or {}
         self.__headers = headers
+        self.__list_item = list_item
         if self.__requester.per_page != 30:
             self.__nextParams["per_page"] = self.__requester.per_page
         self._reversed = False
@@ -172,9 +174,9 @@ class PaginatedList(PaginatedListBase):
                 self.__nextUrl = links["next"]
         self.__nextParams = None
 
-        if 'items' in data:
+        if self.__list_item in data:
             self.__totalCount = data['total_count']
-            data = data["items"]
+            data = data[self.__list_item]
 
         content = [
             self.__contentClass(self.__requester, headers, element, completed=False)
@@ -208,9 +210,9 @@ class PaginatedList(PaginatedListBase):
             headers=self.__headers
         )
 
-        if 'items' in data:
+        if self.__list_item in data:
             self.__totalCount = data['total_count']
-            data = data["items"]
+            data = data[self.__list_item]
 
         return [
             self.__contentClass(self.__requester, headers, element, completed=False)
diff --git a/lib/github/Permissions.py b/lib/github/Permissions.py
index 7fdff591a5a52aec86412ca15f48c4746e53fbc2..3bb6faf8661eccb31c9a4d35edf3c71d677a7cf7 100644
--- a/lib/github/Permissions.py
+++ b/lib/github/Permissions.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,13 @@ class Permissions(github.GithubObject.NonCompletableGithubObject):
     This class represents Permissionss as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({
+            "admin": self._admin.value,
+            "pull": self._pull.value,
+            "push": self._push.value
+        })
+
     @property
     def admin(self):
         """
diff --git a/lib/github/Plan.py b/lib/github/Plan.py
index 7a3781dedcab101f6e32403c94fdceaf61bab4eb..0b6473ae665551c7d836bfcef8f08419bef13317 100644
--- a/lib/github/Plan.py
+++ b/lib/github/Plan.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -32,6 +33,9 @@ class Plan(github.GithubObject.NonCompletableGithubObject):
     This class represents Plans as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"name": self._name.value})
+
     @property
     def collaborators(self):
         """
diff --git a/lib/github/PullRequest.py b/lib/github/PullRequest.py
index ffd528845ec75735f049cb97351c268c1cab7a0d..7cee6f539ee002de6d6d4f40767677ab45402dc8 100644
--- a/lib/github/PullRequest.py
+++ b/lib/github/PullRequest.py
@@ -9,7 +9,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -36,6 +37,8 @@ import github.PullRequestComment
 import github.File
 import github.IssueComment
 import github.Commit
+import github.PullRequestReview
+import github.PullRequestReviewerRequest
 
 
 class PullRequest(github.GithubObject.CompletableGithubObject):
@@ -43,6 +46,9 @@ class PullRequest(github.GithubObject.CompletableGithubObject):
     This class represents PullRequests. The reference can be found here http://developer.github.com/v3/pulls/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"number": self._number.value, "title": self._title.value})
+
     @property
     def additions(self):
         """
@@ -59,6 +65,14 @@ class PullRequest(github.GithubObject.CompletableGithubObject):
         self._completeIfNotSet(self._assignee)
         return self._assignee.value
 
+    @property
+    def assignees(self):
+        """
+        :type: list of :class:`github.NamedUser.NamedUser`
+        """
+        self._completeIfNotSet(self._assignees)
+        return self._assignees.value
+
     @property
     def base(self):
         """
@@ -483,6 +497,46 @@ class PullRequest(github.GithubObject.CompletableGithubObject):
             None
         )
 
+    def get_review(self, id):
+        """
+        :calls: `GET /repos/:owner/:repo/pulls/:number/reviews/:id <https://developer.github.com/v3/pulls/reviews>`_
+        :param id: integer
+        :rtype: :class:`github.PullRequestReview.PullRequestReview`
+        """
+        assert isinstance(id, (int, long)), id
+        headers, data = self._requester.requestJsonAndCheck(
+            "GET",
+            self.url + "/reviews/" + str(id),
+            headers={'Accept': 'application/vnd.github.black-cat-preview+json'}
+        )
+        return github.PullRequestReview.PullRequestReview(self._requester, headers, data, completed=True)
+
+    def get_reviews(self):
+        """
+        :calls: `GET /repos/:owner/:repo/pulls/:number/reviews <https://developer.github.com/v3/pulls/reviews/>`_
+        :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestReview.PullRequestReview`
+        """
+        return github.PaginatedList.PaginatedList(
+            github.PullRequestReview.PullRequestReview,
+            self._requester,
+            self.url + "/reviews",
+            None,
+            headers={'Accept': 'application/vnd.github.black-cat-preview+json'}
+        )
+
+    def get_reviewer_requests(self):
+        """
+        :calls: `GET /repos/:owner/:repo/pulls/:number/requested_reviewers <https://developer.github.com/v3/pulls/review_requests/>`_
+        :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.InspectionReviewers.InspectionReviewers`
+        """
+        return github.PaginatedList.PaginatedList(
+            github.PullRequestReviewerRequest.PullRequestReviewerRequest,
+            self._requester,
+            self.url + "/requested_reviewers",
+            None,
+            headers={'Accept': 'application/vnd.github.black-cat-preview+json'}
+        )
+        
     def is_merged(self):
         """
         :calls: `GET /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
@@ -514,6 +568,7 @@ class PullRequest(github.GithubObject.CompletableGithubObject):
     def _initAttributes(self):
         self._additions = github.GithubObject.NotSet
         self._assignee = github.GithubObject.NotSet
+        self._assignees = github.GithubObject.NotSet
         self._base = github.GithubObject.NotSet
         self._body = github.GithubObject.NotSet
         self._changed_files = github.GithubObject.NotSet
@@ -552,6 +607,13 @@ class PullRequest(github.GithubObject.CompletableGithubObject):
             self._additions = self._makeIntAttribute(attributes["additions"])
         if "assignee" in attributes:  # pragma no branch
             self._assignee = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["assignee"])
+        if "assignees" in attributes:  # pragma no branch
+            self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, attributes["assignees"])
+        elif "assignee" in attributes:
+            if attributes["assignee"] is not None:
+                self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, [attributes["assignee"]])
+            else:
+                self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, [])
         if "base" in attributes:  # pragma no branch
             self._base = self._makeClassAttribute(github.PullRequestPart.PullRequestPart, attributes["base"])
         if "body" in attributes:  # pragma no branch
diff --git a/lib/github/PullRequestComment.py b/lib/github/PullRequestComment.py
index 4169afa2718af5c2323e1ab26e6eb09b2350c84d..1decabad9b3f284c356f3595323eb1ec706a895e 100644
--- a/lib/github/PullRequestComment.py
+++ b/lib/github/PullRequestComment.py
@@ -9,7 +9,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -36,6 +37,9 @@ class PullRequestComment(github.GithubObject.CompletableGithubObject):
     This class represents PullRequestComments. The reference can be found here http://developer.github.com/v3/pulls/comments/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "user": self._user.value})
+
     @property
     def body(self):
         """
diff --git a/lib/github/PullRequestMergeStatus.py b/lib/github/PullRequestMergeStatus.py
index 682472c58856f8fbf854621720dce6c84209c2d7..a59adfa9c58bd50624eeb79619f1a379485040ff 100644
--- a/lib/github/PullRequestMergeStatus.py
+++ b/lib/github/PullRequestMergeStatus.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -33,6 +34,9 @@ class PullRequestMergeStatus(github.GithubObject.NonCompletableGithubObject):
     This class represents PullRequestMergeStatuss. The reference can be found here http://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value, "merged": self._merged.value})
+
     @property
     def merged(self):
         """
diff --git a/lib/github/PullRequestPart.py b/lib/github/PullRequestPart.py
index d05948926b188897f8d2a8d3d2f54db848decaa2..8f03d6f101b93c51de33a2d4dc287d4018a9e8b2 100644
--- a/lib/github/PullRequestPart.py
+++ b/lib/github/PullRequestPart.py
@@ -7,7 +7,8 @@
 # Copyright 2013 AKFish <akfish@gmail.com>                                     #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,9 @@ class PullRequestPart(github.GithubObject.NonCompletableGithubObject):
     This class represents PullRequestParts as returned for example by http://developer.github.com/v3/todo
     """
 
+    def __repr__(self):
+        return self.get__repr__({"sha": self._sha.value})
+
     @property
     def label(self):
         """
diff --git a/lib/github/PullRequestReview.py b/lib/github/PullRequestReview.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c2189ee830731e17e596d8d6702972e6226e207
--- /dev/null
+++ b/lib/github/PullRequestReview.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+
+# ########################## Copyrights and license ############################
+#                                                                              #
+# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net>                 #
+# Copyright 2012 Zearin <zearin@gonk.net>                                      #
+# Copyright 2013 AKFish <akfish@gmail.com>                                     #
+# Copyright 2013 Michael Stead <michael.stead@gmail.com>                       #
+# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
+#                                                                              #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
+#                                                                              #
+# PyGithub is free software: you can redistribute it and/or modify it under    #
+# the terms of the GNU Lesser General Public License as published by the Free  #
+# Software Foundation, either version 3 of the License, or (at your option)    #
+# any later version.                                                           #
+#                                                                              #
+# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY  #
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS    #
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
+# details.                                                                     #
+#                                                                              #
+# You should have received a copy of the GNU Lesser General Public License     #
+# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.             #
+#                                                                              #
+# ##############################################################################
+
+import github.GithubObject
+
+import github.NamedUser
+
+
+class PullRequestReview(github.GithubObject.CompletableGithubObject):
+    """
+    This class represents Pull Request Reviews as returned for example by https://developer.github.com/v3/pulls/reviews/
+    """
+
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "user": self._user.value})
+
+    @property
+    def id(self):
+        """
+        :type: integer
+        """
+        self._completeIfNotSet(self._id)
+        return self._id.value
+
+    @property
+    def user(self):
+        """
+        :type: :class:`github.NamedUser.NamedUser`
+        """
+        self._completeIfNotSet(self._user)
+        return self._user.value
+
+    @property
+    def body(self):
+        """
+        :type: string
+        """
+        self._completeIfNotSet(self._body)
+        return self._body.value
+
+    @property
+    def commit_id(self):
+        """
+        :type: string
+        """
+        self._completeIfNotSet(self._commit_id)
+        return self._commit_id.value
+
+    @property
+    def state(self):
+        """
+        :type: string
+        """
+        self._completeIfNotSet(self._state)
+        return self._state.value
+
+    @property
+    def html_url(self):
+        """
+        :type: string
+        """
+        self._completeIfNotSet(self._html_url)
+        return self._html_url.value
+
+    @property
+    def pull_request_url(self):
+        """
+        :type: string
+        """
+        self._completeIfNotSet(self._pull_request_url)
+        return self._pull_request_url.value
+
+    def _initAttributes(self):
+        self._id = github.GithubObject.NotSet
+        self._user = github.GithubObject.NotSet
+        self._body = github.GithubObject.NotSet
+        self._commit_id = github.GithubObject.NotSet
+        self._state = github.GithubObject.NotSet
+        self._html_url = github.GithubObject.NotSet
+        self._pull_request_url = github.GithubObject.NotSet
+
+    def _useAttributes(self, attributes):
+        if "id" in attributes:  # pragma no branch
+            self._id = self._makeIntAttribute(attributes["id"])
+        if "user" in attributes:  # pragma no branch
+            self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
+        if "body" in attributes:  # pragma no branch
+            self._body = self._makeStringAttribute(attributes["body"])
+        if "commit_id" in attributes:  # pragma no branch
+            self._commit_id = self._makeStringAttribute(attributes["commit_id"])
+        if "state" in attributes:  # pragma no branch
+            self._state = self._makeStringAttribute(attributes["state"])
+        if "html_url" in attributes:  # pragma no branch
+            self._html_url = self._makeStringAttribute(attributes["html_url"])
+        if "pull_request_url" in attributes:  # pragma no branch
+            self._pull_request_url = self._makeStringAttribute(attributes["pull_request_url"])
diff --git a/lib/github/PullRequestReviewerRequest.py b/lib/github/PullRequestReviewerRequest.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f3ea688a276ffe646d1dc16ac0121345cc037ee
--- /dev/null
+++ b/lib/github/PullRequestReviewerRequest.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+
+# ########################## Copyrights and license ############################
+#                                                                              #
+# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net>                 #
+# Copyright 2012 Zearin <zearin@gonk.net>                                      #
+# Copyright 2013 AKFish <akfish@gmail.com>                                     #
+# Copyright 2013 Michael Stead <michael.stead@gmail.com>                       #
+# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
+#                                                                              #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
+#                                                                              #
+# PyGithub is free software: you can redistribute it and/or modify it under    #
+# the terms of the GNU Lesser General Public License as published by the Free  #
+# Software Foundation, either version 3 of the License, or (at your option)    #
+# any later version.                                                           #
+#                                                                              #
+# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY  #
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS    #
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
+# details.                                                                     #
+#                                                                              #
+# You should have received a copy of the GNU Lesser General Public License     #
+# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.             #
+#                                                                              #
+# ##############################################################################
+
+import github.GithubObject
+
+import github.NamedUser
+
+
+class PullRequestReviewerRequest(github.GithubObject.CompletableGithubObject):
+    """
+    This class represents Pull Request Reviewer Requests as returned for example by https://developer.github.com/v3/pulls/review_requests/
+    """
+
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "login": self._login.value})
+
+    @property
+    def login(self):
+        """
+        :type: string
+        """
+        self._completeIfNotSet(self._login)
+        return self._login.value
+
+    @property
+    def id(self):
+        """
+        :type: integer
+        """
+        self._completeIfNotSet(self._id)
+        return self._id.value
+
+    def _initAttributes(self):
+        self._login = github.GithubObject.NotSet
+        self._id = github.GithubObject.NotSet
+
+    def _useAttributes(self, attributes):
+        if "login" in attributes:  # pragma no branch
+            self._login = self._makeStringAttribute(attributes["login"])
+        if "id" in attributes:  # pragma no branch
+            self._id = self._makeIntAttribute(attributes["id"])
diff --git a/lib/github/Rate.py b/lib/github/Rate.py
index 60f2f2b3a4ff09010efacc0a45f2aad7aa2efaa5..5c7e4f5d14745b97648b9b2e8dbe2f76f0119b0b 100644
--- a/lib/github/Rate.py
+++ b/lib/github/Rate.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -30,6 +31,9 @@ class Rate(github.GithubObject.NonCompletableGithubObject):
     This class represents rate limits as defined in http://developer.github.com/v3/rate_limit
     """
 
+    def __repr__(self):
+        return self.get__repr__({"limit": self._limit.value, "remaining": self._remaining.value})
+
     @property
     def limit(self):
         """
diff --git a/lib/github/RateLimit.py b/lib/github/RateLimit.py
index f0cffc65c1ee9b5976d5afc2af701d39be8af530..9ff0f08cd8b8cf740680c3c36818ddbcbbd0d8f5 100644
--- a/lib/github/RateLimit.py
+++ b/lib/github/RateLimit.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -30,6 +31,9 @@ class RateLimit(github.GithubObject.NonCompletableGithubObject):
     This class represents rate limits as defined in http://developer.github.com/v3/rate_limit
     """
 
+    def __repr__(self):
+        return self.get__repr__({"rate": self._rate.value})
+
     @property
     def rate(self):
         """
diff --git a/lib/github/Repository.py b/lib/github/Repository.py
index 64c96e3eabac67388688cdd70b1e2775565878ef..b079bc88192942b31793753676a5e5fecc2347b6 100644
--- a/lib/github/Repository.py
+++ b/lib/github/Repository.py
@@ -13,7 +13,8 @@
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 # Copyright 2015 Jannis Gebauer <ja.geb@me.com>                                #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -29,9 +30,10 @@
 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>.             #
 #                                                                              #
 # ##############################################################################
-
+import sys
 import urllib
 import datetime
+from base64 import b64encode
 
 import github.GithubObject
 import github.PaginatedList
@@ -70,12 +72,18 @@ import github.StatsParticipation
 import github.StatsPunchCard
 import github.Stargazer
 
+atLeastPython26 = sys.hexversion >= 0x02060000
+atLeastPython3 = sys.hexversion >= 0x03000000
+
 
 class Repository(github.GithubObject.CompletableGithubObject):
     """
-    This class represents Repositorys. The reference can be found here http://developer.github.com/v3/repos/
+    This class represents Repositories. The reference can be found here http://developer.github.com/v3/repos/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"full_name": self._full_name.value})
+
     @property
     def archive_url(self):
         """
@@ -571,6 +579,14 @@ class Repository(github.GithubObject.CompletableGithubObject):
         """
         self._completeIfNotSet(self._subscribers_url)
         return self._subscribers_url.value
+    
+    @property
+    def subscribers_count(self):
+        """
+        :type: integer
+        """
+        self._completeIfNotSet(self._subscribers_count)
+        return self._subscribers_count.value
 
     @property
     def subscription_url(self):
@@ -849,12 +865,13 @@ class Repository(github.GithubObject.CompletableGithubObject):
         )
         return github.Hook.Hook(self._requester, headers, data, completed=True)
 
-    def create_issue(self, title, body=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, milestone=github.GithubObject.NotSet, labels=github.GithubObject.NotSet):
+    def create_issue(self, title, body=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, milestone=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, assignees=github.GithubObject.NotSet):
         """
         :calls: `POST /repos/:owner/:repo/issues <http://developer.github.com/v3/issues>`_
         :param title: string
         :param body: string
         :param assignee: string or :class:`github.NamedUser.NamedUser`
+        :param assignees: list (of string or :class:`github.NamedUser.NamedUser`)
         :param milestone: :class:`github.Milestone.Milestone`
         :param labels: list of :class:`github.Label.Label`
         :rtype: :class:`github.Issue.Issue`
@@ -862,8 +879,9 @@ class Repository(github.GithubObject.CompletableGithubObject):
         assert isinstance(title, (str, unicode)), title
         assert body is github.GithubObject.NotSet or isinstance(body, (str, unicode)), body
         assert assignee is github.GithubObject.NotSet or isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
+        assert assignees is github.GithubObject.NotSet or all(isinstance(element, github.NamedUser.NamedUser) or isinstance(element, (str, unicode)) for element in assignees), assignees
         assert milestone is github.GithubObject.NotSet or isinstance(milestone, github.Milestone.Milestone), milestone
-        assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) or isinstance(element, str) for element in labels), labels
+        assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) or isinstance(element, (str, unicode)) for element in labels), labels
 
         post_parameters = {
             "title": title,
@@ -875,6 +893,8 @@ class Repository(github.GithubObject.CompletableGithubObject):
                 post_parameters["assignee"] = assignee
             else:
                 post_parameters["assignee"] = assignee._identity
+        if assignees is not github.GithubObject.NotSet:
+            post_parameters["assignees"] = [element._identity if isinstance(element, github.NamedUser.NamedUser) else element for element in assignees]
         if milestone is not github.GithubObject.NotSet:
             post_parameters["milestone"] = milestone._identity
         if labels is not github.GithubObject.NotSet:
@@ -1002,7 +1022,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
             self.url
         )
 
-    def edit(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, default_branch=github.GithubObject.NotSet):
+    def edit(self, name=None, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, default_branch=github.GithubObject.NotSet):
         """
         :calls: `PATCH /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
         :param name: string
@@ -1015,6 +1035,8 @@ class Repository(github.GithubObject.CompletableGithubObject):
         :param default_branch: string
         :rtype: None
         """
+        if name is None:
+            name = self.name
         assert isinstance(name, (str, unicode)), name
         assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
         assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
@@ -1090,6 +1112,20 @@ class Repository(github.GithubObject.CompletableGithubObject):
         )
         return github.Branch.Branch(self._requester, headers, data, completed=True)
 
+    def get_protected_branch(self, branch):
+        """
+        :calls: `GET /repos/:owner/:repo/branches/:branch <https://developer.github.com/v3/repos/#response-10>`_
+        :param branch: string
+        :rtype: :class:`github.Branch.Branch`
+        """
+        assert isinstance(branch, (str, unicode)), branch
+        headers, data = self._requester.requestJsonAndCheck(
+            "GET",
+            self.url + "/branches/" + branch,
+            headers={'Accept': 'application/vnd.github.loki-preview+json'}
+        )
+        return github.Branch.Branch(self._requester, headers, data, completed=True)
+
     def get_branches(self):
         """
         :calls: `GET /repos/:owner/:repo/branches <http://developer.github.com/v3/repos>`_
@@ -1214,8 +1250,163 @@ class Repository(github.GithubObject.CompletableGithubObject):
             self.url + "/contents" + path,
             parameters=url_parameters
         )
+        if isinstance(data, list):
+            return [
+                github.ContentFile.ContentFile(self._requester, headers, item, completed=False)
+                for item in data
+            ]
         return github.ContentFile.ContentFile(self._requester, headers, data, completed=True)
 
+    def create_file(self, path, message, content,
+                    branch=github.GithubObject.NotSet,
+                    committer=github.GithubObject.NotSet,
+                    author=github.GithubObject.NotSet):
+        """Create a file in this repository.
+        :calls: `PUT /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents#create-a-file>`_
+        :param path: string, (required), path of the file in the repository
+        :param message: string, (required), commit message
+        :param content: string, (required), the actual data in the file
+        :param branch: string, (optional), branch to create the commit on. Defaults to the default branch of the repository
+        :param committer: dict, (optional), if no information is given the authenticated user's information will be used. You must specify both a name and email.
+        :param author: dict, (optional), if omitted this will be filled in with committer information. If passed, you must specify both a name and email.
+        :rtype: {
+            'content': :class:`ContentFile <github.ContentFile.ContentFile>`:,
+            'commit': :class:`Commit <github.Commit.Commit>`}
+        """
+        assert isinstance(path, (str, unicode)),                   \
+            'path must be str/unicode object'
+        assert isinstance(message, (str, unicode)),                \
+            'message must be str/unicode object'
+        assert isinstance(content, (str, unicode)),                \
+            'content must be a str/unicode object'
+        assert branch is github.GithubObject.NotSet                \
+            or isinstance(branch, (str, unicode)),                 \
+            'branch must be a str/unicode object'
+        assert author is github.GithubObject.NotSet                \
+            or isinstance(author, github.InputGitAuthor),          \
+            'author must be a github.InputGitAuthor object'
+        assert committer is github.GithubObject.NotSet             \
+            or isinstance(committer, github.InputGitAuthor),       \
+            'committer must be a github.InputGitAuthor object'
+
+        if atLeastPython3:
+            content = b64encode(content.encode('utf-8')).decode('utf-8')
+        else:
+            if isinstance(content, unicode):
+                content = content.encode('utf-8')
+            content = b64encode(content)
+        put_parameters = {'message': message, 'content': content}
+
+        if branch is not github.GithubObject.NotSet:
+            put_parameters['branch'] = branch
+        if author is not github.GithubObject.NotSet:
+            put_parameters["author"] = author._identity
+        if committer is not github.GithubObject.NotSet:
+            put_parameters["committer"] = committer._identity
+
+        headers, data = self._requester.requestJsonAndCheck(
+            "PUT",
+            self.url + "/contents" + path,
+            input=put_parameters
+        )
+
+        return {'content': github.ContentFile.ContentFile(self._requester, headers, data["content"], completed=False),
+                'commit': github.Commit.Commit(self._requester, headers, data["commit"], completed=True)}
+
+    def update_file(self, path, message, content, sha,
+                    branch=github.GithubObject.NotSet,
+                    committer=github.GithubObject.NotSet,
+                    author=github.GithubObject.NotSet):
+        """This method updates a file in a repository
+        :calls: `PUT /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents#update-a-file>`_
+        :param path: string, Required. The content path.
+        :param message: string, Required. The commit message.
+        :param content: string, Required. The updated file content, Base64 encoded.
+        :param sha: string, Required. The blob SHA of the file being replaced.
+        :param branch: string. The branch name. Default: the repository’s default branch (usually master)
+        :rtype: {
+            'content': :class:`ContentFile <github.ContentFile.ContentFile>`:,
+            'commit': :class:`Commit <github.Commit.Commit>`}
+        """
+        assert isinstance(path, (str, unicode)),                   \
+            'path must be str/unicode object'
+        assert isinstance(message, (str, unicode)),                \
+            'message must be str/unicode object'
+        assert isinstance(content, (str, unicode)),                \
+            'content must be a str/unicode object'
+        assert isinstance(sha, (str, unicode)),                    \
+            'sha must be a str/unicode object'
+        assert branch is github.GithubObject.NotSet                \
+            or isinstance(branch, (str, unicode)),                 \
+            'branch must be a str/unicode object'
+        assert author is github.GithubObject.NotSet                \
+            or isinstance(author, github.InputGitAuthor),          \
+            'author must be a github.InputGitAuthor object'
+        assert committer is github.GithubObject.NotSet             \
+            or isinstance(committer, github.InputGitAuthor),       \
+            'committer must be a github.InputGitAuthor object'
+
+        if atLeastPython3:
+            content = b64encode(content.encode('utf-8')).decode('utf-8')
+        else:
+            if isinstance(content, unicode):
+                content = content.encode('utf-8')
+            content = b64encode(content)
+
+        put_parameters = {'message': message, 'content': content,
+                          'sha': sha}
+
+        if branch is not github.GithubObject.NotSet:
+            put_parameters['branch'] = branch
+        if author is not github.GithubObject.NotSet:
+            put_parameters["author"] = author._identity
+        if committer is not github.GithubObject.NotSet:
+            put_parameters["committer"] = committer._identity
+
+        headers, data = self._requester.requestJsonAndCheck(
+            "PUT",
+            self.url + "/contents" + path,
+            input=put_parameters
+        )
+
+        return {'commit': github.Commit.Commit(self._requester, headers, data["commit"], completed=True),
+                'content': github.ContentFile.ContentFile(self._requester, headers, data["content"], completed=False)}
+
+    def delete_file(self, path, message, sha,
+                    branch=github.GithubObject.NotSet):
+        """This method delete a file in a repository
+        :calls: `DELETE /repos/:owner/:repo/contents/:path <https://developer.github.com/v3/repos/contents/#delete-a-file>`_
+        :param path: string, Required. The content path.
+        :param message: string, Required. The commit message.
+        :param sha: string, Required. The blob SHA of the file being replaced.
+        :param branch: string. The branch name. Default: the repository’s default branch (usually master)
+        :rtype: {
+            'content': :class:`null <github.GithubObject.NotSet>`:,
+            'commit': :class:`Commit <github.Commit.Commit>`}
+        """
+        assert isinstance(path, (str, unicode)),                   \
+            'path must be str/unicode object'
+        assert isinstance(message, (str, unicode)),                \
+            'message must be str/unicode object'
+        assert isinstance(sha, (str, unicode)),                    \
+            'sha must be a str/unicode object'
+        assert branch is github.GithubObject.NotSet                \
+            or isinstance(branch, (str, unicode)),                 \
+            'branch must be a str/unicode object'
+
+        url_parameters = {'message': message, 'sha': sha}
+        if branch is not github.GithubObject.NotSet:
+            url_parameters['branch'] = branch
+
+        headers, data = self._requester.requestJsonAndCheck(
+            "DELETE",
+            self.url + "/contents" + path,
+            input=url_parameters
+        )
+
+        return {'commit': github.Commit.Commit(self._requester, headers, data["commit"], completed=True),
+                'content': github.GithubObject.NotSet}
+
     def get_dir_contents(self, path, ref=github.GithubObject.NotSet):
         """
         :calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
@@ -1457,14 +1648,14 @@ class Repository(github.GithubObject.CompletableGithubObject):
         assert creator is github.GithubObject.NotSet or isinstance(creator, github.NamedUser.NamedUser) or isinstance(creator, (str, unicode)), creator
         url_parameters = dict()
         if milestone is not github.GithubObject.NotSet:
-            if isinstance(milestone, str):
+            if isinstance(milestone, (str, unicode)):
                 url_parameters["milestone"] = milestone
             else:
                 url_parameters["milestone"] = milestone._identity
         if state is not github.GithubObject.NotSet:
             url_parameters["state"] = state
         if assignee is not github.GithubObject.NotSet:
-            if isinstance(assignee, str):
+            if isinstance(assignee, (str, unicode)):
                 url_parameters["assignee"] = assignee
             else:
                 url_parameters["assignee"] = assignee._identity
@@ -1479,7 +1670,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
         if since is not github.GithubObject.NotSet:
             url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
         if creator is not github.GithubObject.NotSet:
-            if isinstance(creator, str):
+            if isinstance(creator, (str, unicode)):
                 url_parameters["creator"] = creator
             else:
                 url_parameters["creator"] = creator._identity
@@ -1664,19 +1855,21 @@ class Repository(github.GithubObject.CompletableGithubObject):
         )
         return github.PullRequest.PullRequest(self._requester, headers, data, completed=True)
 
-    def get_pulls(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, base=github.GithubObject.NotSet):
+    def get_pulls(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, base=github.GithubObject.NotSet, head=github.GithubObject.NotSet):
         """
         :calls: `GET /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
         :param state: string
         :param sort: string
         :param direction: string
         :param base: string
+        :param head: string
         :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequest.PullRequest`
         """
         assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
         assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
         assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
         assert base is github.GithubObject.NotSet or isinstance(base, (str, unicode)), base
+        assert head is github.GithubObject.NotSet or isinstance(head, (str, unicode)), head
         url_parameters = dict()
         if state is not github.GithubObject.NotSet:
             url_parameters["state"] = state
@@ -1686,6 +1879,8 @@ class Repository(github.GithubObject.CompletableGithubObject):
             url_parameters["direction"] = direction
         if base is not github.GithubObject.NotSet:
             url_parameters["base"] = base
+        if head is not github.GithubObject.NotSet:
+            url_parameters["head"] = head
         return github.PaginatedList.PaginatedList(
             github.PullRequest.PullRequest,
             self._requester,
@@ -1779,7 +1974,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
             "GET",
             self.url + "/stats/contributors"
         )
-        if data == {}:
+        if not data:
             return None
         else:
             return [
@@ -1796,7 +1991,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
             "GET",
             self.url + "/stats/commit_activity"
         )
-        if data == {}:
+        if not data:
             return None
         else:
             return [
@@ -1813,7 +2008,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
             "GET",
             self.url + "/stats/code_frequency"
         )
-        if data == {}:
+        if not data:
             return None
         else:
             return [
@@ -1830,7 +2025,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
             "GET",
             self.url + "/stats/participation"
         )
-        if data == {}:
+        if not data:
             return None
         else:
             return github.StatsParticipation.StatsParticipation(self._requester, headers, data, completed=True)
@@ -1844,7 +2039,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
             "GET",
             self.url + "/stats/punch_card"
         )
-        if data == {}:
+        if not data:
             return None
         else:
             return github.StatsPunchCard.StatsPunchCard(self._requester, headers, data, completed=True)
@@ -1897,7 +2092,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
                 self.url + "/releases/" + str(id)
             )
             return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
-        elif isinstance(id, str):
+        elif isinstance(id, (str, unicode)):
             headers, data = self._requester.requestJsonAndCheck(
                 "GET",
                 self.url + "/releases/tags/" + id
@@ -2007,6 +2202,38 @@ class Repository(github.GithubObject.CompletableGithubObject):
         else:
             return github.Commit.Commit(self._requester, headers, data, completed=True)
 
+    def protect_branch(self, branch, enabled, enforcement_level=github.GithubObject.NotSet, contexts=github.GithubObject.NotSet):
+        """
+        :calls: `PATCH /repos/:owner/:repo/branches/:branch <https://developer.github.com/v3/repos/#enabling-and-disabling-branch-protection>`_
+        :param branch: string
+        :param enabled: boolean
+        :param enforcement_level: string
+        :param contexts: list of strings
+        :rtype: None
+        """
+
+        assert isinstance(branch, (str, unicode))
+        assert isinstance(enabled, bool)
+        assert enforcement_level is github.GithubObject.NotSet or isinstance(enforcement_level, (str, unicode)), enforcement_level
+        assert contexts is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in contexts), contexts
+
+        post_parameters = {
+            "protection": {}
+        }
+        if enabled is not github.GithubObject.NotSet:
+            post_parameters["protection"]["enabled"] = enabled
+        if enforcement_level is not github.GithubObject.NotSet:
+            post_parameters["protection"]["required_status_checks"] = {}
+            post_parameters["protection"]["required_status_checks"]["enforcement_level"] = enforcement_level
+        if contexts is not github.GithubObject.NotSet:
+            post_parameters["protection"]["required_status_checks"]["contexts"] = contexts
+        headers, data = self._requester.requestJsonAndCheck(
+            "PATCH",
+            self.url + "/branches/" + branch,
+            input=post_parameters,
+            headers={'Accept': 'application/vnd.github.loki-preview+json'}
+        )
+
     def remove_from_collaborators(self, collaborator):
         """
         :calls: `DELETE /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
@@ -2130,6 +2357,7 @@ class Repository(github.GithubObject.CompletableGithubObject):
         self._stargazers_url = github.GithubObject.NotSet
         self._statuses_url = github.GithubObject.NotSet
         self._subscribers_url = github.GithubObject.NotSet
+        self._subscribers_count = github.GithubObject.NotSet
         self._subscription_url = github.GithubObject.NotSet
         self._svn_url = github.GithubObject.NotSet
         self._tags_url = github.GithubObject.NotSet
@@ -2265,6 +2493,8 @@ class Repository(github.GithubObject.CompletableGithubObject):
             self._statuses_url = self._makeStringAttribute(attributes["statuses_url"])
         if "subscribers_url" in attributes:  # pragma no branch
             self._subscribers_url = self._makeStringAttribute(attributes["subscribers_url"])
+        if "subscribers_count" in attributes:  # pragma no branch
+            self._subscribers_count = self._makeIntAttribute(attributes["subscribers_count"])
         if "subscription_url" in attributes:  # pragma no branch
             self._subscription_url = self._makeStringAttribute(attributes["subscription_url"])
         if "svn_url" in attributes:  # pragma no branch
diff --git a/lib/github/RepositoryKey.py b/lib/github/RepositoryKey.py
index 261bbfd3bb73229928b3d2f23533d5fc463010f0..847b7f82b86d79a01684dfd3f5380f645fe708de 100644
--- a/lib/github/RepositoryKey.py
+++ b/lib/github/RepositoryKey.py
@@ -9,7 +9,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -38,6 +39,9 @@ class RepositoryKey(github.GithubObject.CompletableGithubObject):
         github.GithubObject.CompletableGithubObject.__init__(self, requester, headers, attributes, completed)
         self.__repoUrl = repoUrl
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value})
+
     @property
     def __customUrl(self):
         return self.__repoUrl + "/keys/" + str(self.id)
diff --git a/lib/github/Requester.py b/lib/github/Requester.py
index 720d9d653c999de46352935dc0be9ea2d3268efc..9fd77b163b379834ea5b9f7fd0cbd46190e3451f 100644
--- a/lib/github/Requester.py
+++ b/lib/github/Requester.py
@@ -15,7 +15,8 @@
 # Copyright 2013 Mark Roddy <markroddy@gmail.com>                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -186,7 +187,7 @@ class Requester:
             cls = GithubException.TwoFactorException  # pragma no cover (Should be covered)
         elif status == 403 and output.get("message").startswith("Missing or invalid User Agent string"):
             cls = GithubException.BadUserAgentException
-        elif status == 403 and output.get("message").startswith("API Rate Limit Exceeded"):
+        elif status == 403 and output.get("message").lower().startswith("api rate limit exceeded"):
             cls = GithubException.RateLimitExceededException
         elif status == 404 and output.get("message") == "Not Found":
             cls = GithubException.UnknownObjectException
@@ -263,6 +264,7 @@ class Requester:
         return status, responseHeaders, output
 
     def __requestRaw(self, cnx, verb, url, requestHeaders, input):
+        original_cnx = cnx
         if cnx is None:
             cnx = self.__createConnection()
         else:
@@ -284,6 +286,9 @@ class Requester:
 
         self.__log(verb, url, requestHeaders, input, status, responseHeaders, output)
 
+        if status == 301 and 'location' in responseHeaders:
+            return self.__requestRaw(original_cnx, verb, responseHeaders['location'], requestHeaders, input)
+
         return status, responseHeaders, output
 
     def __authenticate(self, url, requestHeaders, parameters):
@@ -333,7 +338,10 @@ class Requester:
             headers = {}
             if url.username and url.password:
                 auth = '%s:%s' % (url.username, url.password)
-                headers['Proxy-Authorization'] = 'Basic ' + base64.b64encode(auth)
+                if atLeastPython3 and isinstance(auth, str):
+                    headers['Proxy-Authorization'] = 'Basic ' + base64.b64encode(auth.encode()).decode()
+                else:
+                    headers['Proxy-Authorization'] = 'Basic ' + base64.b64encode(auth)
             conn.set_tunnel(self.__hostname, self.__port, headers)
         else:
             conn = self.__connectionClass(self.__hostname, self.__port, **kwds)
diff --git a/lib/github/Stargazer.py b/lib/github/Stargazer.py
index 4f261c64cd8538457c165bced148a495d62bd1a7..6fca6a29138017e1265e0cae174a71b36c0ffc47 100644
--- a/lib/github/Stargazer.py
+++ b/lib/github/Stargazer.py
@@ -13,7 +13,8 @@
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 # Copyright 2015 Dan Vanderkam <danvdk@gmail.com>                              #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -38,6 +39,10 @@ class Stargazer(github.GithubObject.NonCompletableGithubObject):
     This class represents Stargazers with the date of starring as returned by
     https://developer.github.com/v3/activity/starring/#alternative-response-with-star-creation-timestamps
     """
+
+    def __repr__(self):
+        return self.get__repr__({"user": self._user.value._login.value})
+
     @property
     def starred_at(self):
         """
diff --git a/lib/github/StatsCodeFrequency.py b/lib/github/StatsCodeFrequency.py
index 0a19810402529f66b835298249b2128246c3d136..0635c0720c25ff31ee97fde466f58d85b1ffeae3 100755
--- a/lib/github/StatsCodeFrequency.py
+++ b/lib/github/StatsCodeFrequency.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/StatsCommitActivity.py b/lib/github/StatsCommitActivity.py
index 14bd75376e0ba4dcd6928f76abab0b9e336d26d0..ded0b93c706e512281766714b148a195b4deb238 100755
--- a/lib/github/StatsCommitActivity.py
+++ b/lib/github/StatsCommitActivity.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/StatsContributor.py b/lib/github/StatsContributor.py
index b869b0504b2675f7bc54b8f22e42bfde464bc9ff..fda45b03b622af03aa29c7a34b84c4f0d1cba383 100755
--- a/lib/github/StatsContributor.py
+++ b/lib/github/StatsContributor.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/StatsParticipation.py b/lib/github/StatsParticipation.py
index 356974eedfe16bd8eb839f2169fb90429701a10e..b4acd449820f3c36c27da7d48ba892adbac3059d 100755
--- a/lib/github/StatsParticipation.py
+++ b/lib/github/StatsParticipation.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/StatsPunchCard.py b/lib/github/StatsPunchCard.py
index 4c5de41e270728db717ce592cd823b52ac5454fb..733d4a81b5521305fff41d63c59d4e1235d0f84f 100755
--- a/lib/github/StatsPunchCard.py
+++ b/lib/github/StatsPunchCard.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
diff --git a/lib/github/Status.py b/lib/github/Status.py
index 46e3921263a811cc4a03339ae051568384366ab7..67a6b80483718e4559c182f101650671bac17d48 100644
--- a/lib/github/Status.py
+++ b/lib/github/Status.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -29,6 +30,9 @@ class Status(github.GithubObject.NonCompletableGithubObject):
     This class represents status as defined in https://status.github.com/api
     """
 
+    def __repr__(self):
+        return self.get__repr__({"status": self._status.value})
+
     @property
     def status(self):
         """
diff --git a/lib/github/StatusMessage.py b/lib/github/StatusMessage.py
index c0c99903ce74f6fb59381408e43670f51cd50e8b..ec081e393f56a6bfe46ad4ab14845854159dbeca 100644
--- a/lib/github/StatusMessage.py
+++ b/lib/github/StatusMessage.py
@@ -4,7 +4,8 @@
 #                                                                              #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -29,6 +30,9 @@ class StatusMessage(github.GithubObject.NonCompletableGithubObject):
     This class represents status messages as defined in https://status.github.com/api
     """
 
+    def __repr__(self):
+        return self.get__repr__({"body": self._body.value})
+
     @property
     def body(self):
         """
diff --git a/lib/github/Tag.py b/lib/github/Tag.py
index 5f19363fdb86f0eca3f89bda518a95c4517fa529..c837c74e27915a68ffcc48109f0c8ec97b59edd8 100644
--- a/lib/github/Tag.py
+++ b/lib/github/Tag.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -35,6 +36,12 @@ class Tag(github.GithubObject.NonCompletableGithubObject):
     This class represents Tags. The reference can be found here http://developer.github.com/v3/git/tags/
     """
 
+    def __repr__(self):
+        return self.get__repr__({
+            "name": self._name.value,
+            "commit": self._commit.value
+        })
+
     @property
     def commit(self):
         """
diff --git a/lib/github/Team.py b/lib/github/Team.py
index e1971f5f2cdf8e9d2b6359996b4db55cb0ba4008..9491020ba24b63cc76ec32a97b020ddad90d4bef 100644
--- a/lib/github/Team.py
+++ b/lib/github/Team.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -37,6 +38,9 @@ class Team(github.GithubObject.CompletableGithubObject):
     This class represents Teams. The reference can be found here http://developer.github.com/v3/orgs/teams/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "name": self._name.value})
+
     @property
     def id(self):
         """
@@ -128,7 +132,7 @@ class Team(github.GithubObject.CompletableGithubObject):
         :rtype: None
         """
         assert isinstance(member, github.NamedUser.NamedUser), member
-        headers, data = self._requester.requestjsonandcheck(
+        headers, data = self._requester.requestJsonAndCheck(
             "PUT",
             self.url + "/memberships/" + member._identity
         )
@@ -145,6 +149,23 @@ class Team(github.GithubObject.CompletableGithubObject):
             self.url + "/repos/" + repo._identity
         )
 
+    def set_repo_permission(self, repo, permission):
+        """
+        :calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
+        :param repo: :class:`github.Repository.Repository`
+        :param permission: string
+        :rtype: None
+        """
+        assert isinstance(repo, github.Repository.Repository), repo
+        put_parameters = {
+            "permission": permission,
+        }
+        headers, data = self._requester.requestJsonAndCheck(
+            "PUT",
+            self.url + "/repos/" + repo._identity,
+            input=put_parameters
+        )
+
     def delete(self):
         """
         :calls: `DELETE /teams/:id <http://developer.github.com/v3/orgs/teams>`_
diff --git a/lib/github/UserKey.py b/lib/github/UserKey.py
index cfc45823a9151d59ec117db527ee45888e88f22b..0f47cf086f1a2d2856910e489b5e5e579b8271ff 100644
--- a/lib/github/UserKey.py
+++ b/lib/github/UserKey.py
@@ -8,7 +8,8 @@
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 # Copyright 2013 martinqt <m.ki2@laposte.net>                                  #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -33,6 +34,9 @@ class UserKey(github.GithubObject.CompletableGithubObject):
     This class represents UserKeys. The reference can be found here http://developer.github.com/v3/users/keys/
     """
 
+    def __repr__(self):
+        return self.get__repr__({"id": self._id.value, "title": self._title.value})
+
     @property
     def id(self):
         """
diff --git a/lib/github/__init__.py b/lib/github/__init__.py
index 18a0e6127fd42fc7fad92cd7a45752d21be4b3b6..550c9afbf7160177cd4c8b7258a107ba0d509b67 100644
--- a/lib/github/__init__.py
+++ b/lib/github/__init__.py
@@ -6,7 +6,8 @@
 # Copyright 2012 Zearin <zearin@gonk.net>                                      #
 # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net>                 #
 #                                                                              #
-# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/          #
+# This file is part of PyGithub.                                               #
+# http://pygithub.github.io/PyGithub/v1/index.html                             #
 #                                                                              #
 # PyGithub is free software: you can redistribute it and/or modify it under    #
 # the terms of the GNU Lesser General Public License as published by the Free  #
@@ -33,8 +34,8 @@ All classes inherit from :class:`github.GithubObject.GithubObject`.
 
 import logging
 
-from MainClass import Github
-from GithubException import GithubException, BadCredentialsException, UnknownObjectException, BadUserAgentException, RateLimitExceededException, BadAttributeException, TwoFactorException
+from MainClass import Github, GithubIntegration
+from GithubException import GithubException, BadCredentialsException, UnknownObjectException, BadUserAgentException, RateLimitExceededException, BadAttributeException
 from InputFileContent import InputFileContent
 from InputGitAuthor import InputGitAuthor
 from InputGitTreeElement import InputGitTreeElement
diff --git a/lib/imdb/Character.py b/lib/imdb/Character.py
index bc225d6c49e57faf9f0e88dfe3eeb130ebb8a791..5a5239af79f7066a34c05e3f4ff3ce8805bab61d 100644
--- a/lib/imdb/Character.py
+++ b/lib/imdb/Character.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from copy import deepcopy
diff --git a/lib/imdb/Company.py b/lib/imdb/Company.py
index 64c434f7a7e39971243f21983aed2fd490d55e8b..5e05c840d3787d0cb8cbefc3cd40443f346c54d7 100644
--- a/lib/imdb/Company.py
+++ b/lib/imdb/Company.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from copy import deepcopy
diff --git a/lib/imdb/Movie.py b/lib/imdb/Movie.py
index b248bf8c2b2dc659b5e2cbd89312ffed886d5a48..5cdcde6504accb88356ad1260833878fc8ddf7c2 100644
--- a/lib/imdb/Movie.py
+++ b/lib/imdb/Movie.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from copy import deepcopy
diff --git a/lib/imdb/Person.py b/lib/imdb/Person.py
index 0dfdf20c13a48dd9790698744bba73af77ad6542..6e3e46231f795c41b34ab36170446f33d50d298a 100644
--- a/lib/imdb/Person.py
+++ b/lib/imdb/Person.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from copy import deepcopy
diff --git a/lib/imdb/__init__.py b/lib/imdb/__init__.py
index fd63921f10d575b124a8ad7a05693301dfe86bc1..92c40ec3ca2def986939ed817861a7b1b381e258 100644
--- a/lib/imdb/__init__.py
+++ b/lib/imdb/__init__.py
@@ -6,7 +6,7 @@ a person from the IMDb database.
 It can fetch data through different media (e.g.: the IMDb web pages,
 a SQL database, etc.)
 
-Copyright 2004-2014 Davide Alberani <da@erlug.linux.it>
+Copyright 2004-2016 Davide Alberani <da@erlug.linux.it>
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
@@ -20,12 +20,12 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 __all__ = ['IMDb', 'IMDbError', 'Movie', 'Person', 'Character', 'Company',
             'available_access_systems']
-__version__ = VERSION = '5.1dev20150705'
+__version__ = VERSION = '5.2dev20161118'
 
 # Import compatibility module (importing it is enough).
 import _compat
diff --git a/lib/imdb/_compat.py b/lib/imdb/_compat.py
index 4625ec0c4394846e671c97e71c9d3f4c1b5f6ae8..03b11564ea934a43ebb89fe01333d23acba851ff 100644
--- a/lib/imdb/_compat.py
+++ b/lib/imdb/_compat.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 # TODO: now we're heavily using the 'logging' module, which was not
diff --git a/lib/imdb/_exceptions.py b/lib/imdb/_exceptions.py
index 92f09245c7a0f066a55032de91a6166596dac1bc..55788a207d1e802b4d56a19cd88e7c40f6be39cf 100644
--- a/lib/imdb/_exceptions.py
+++ b/lib/imdb/_exceptions.py
@@ -17,7 +17,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import logging
diff --git a/lib/imdb/_logging.py b/lib/imdb/_logging.py
index 5c9b1957d21856b3365879404a7a9dd41e928986..2b8a286a0d74a60c3fff536dbc49d440615d03f5 100644
--- a/lib/imdb/_logging.py
+++ b/lib/imdb/_logging.py
@@ -17,7 +17,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import logging
diff --git a/lib/imdb/helpers.py b/lib/imdb/helpers.py
index 44b454ef25748af3845c2bdbac17d8d99d7c8a40..f22061429d8a7038ed4b1e43df5a8584cffe9600 100644
--- a/lib/imdb/helpers.py
+++ b/lib/imdb/helpers.py
@@ -19,7 +19,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 # XXX: find better names for the functions in this modules.
diff --git a/lib/imdb/linguistics.py b/lib/imdb/linguistics.py
index 9e39cc3262df57e7fb4ab9583eaba1677cbbda4a..d53597b2eb53525a172c256c91817af514fd4b08 100644
--- a/lib/imdb/linguistics.py
+++ b/lib/imdb/linguistics.py
@@ -20,7 +20,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 # List of generic articles used when the language of the title is unknown (or
diff --git a/lib/imdb/locale/__init__.py b/lib/imdb/locale/__init__.py
index 6c83abc229589852ed313d76217337a639df0696..9bc2e4668e8cf7d2545193f0f87812b6257f14b2 100644
--- a/lib/imdb/locale/__init__.py
+++ b/lib/imdb/locale/__init__.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import gettext
diff --git a/lib/imdb/locale/ar/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/ar/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..c3687cfaacbea039fd89fa4d096562df7a070a97
Binary files /dev/null and b/lib/imdb/locale/ar/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/locale/bg/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/bg/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..07eef0df19533305eba19066af159daf88022775
Binary files /dev/null and b/lib/imdb/locale/bg/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/locale/de/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/de/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..879ff6efaf37bdf00d4d2923a3113e7d63d9376b
Binary files /dev/null and b/lib/imdb/locale/de/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/locale/en/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/en/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..be2d4a29a3b50cfe4f3b5ddab221823bdc558d11
Binary files /dev/null and b/lib/imdb/locale/en/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/locale/es/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/es/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..814ffc5def499463296e86de27d542f1b7655d70
Binary files /dev/null and b/lib/imdb/locale/es/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/locale/fr/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/fr/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..800543323911b2c8d03bcc7e2d0d1f4126ffa1cc
Binary files /dev/null and b/lib/imdb/locale/fr/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/locale/generatepot.py b/lib/imdb/locale/generatepot.py
index 52ea2d785ea685d887c17d5ae2acb04340a67894..282f7d41e37390d208017235496fd489ee1999ec 100755
--- a/lib/imdb/locale/generatepot.py
+++ b/lib/imdb/locale/generatepot.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
diff --git a/lib/imdb/locale/imdbpy-ar.po b/lib/imdb/locale/imdbpy-ar.po
index 31489dbc40c6c0eb9b473dadb3d374665a29e45b..a9ed06e25d890cbe56bb963b04b5ce3848a6dcae 100644
--- a/lib/imdb/locale/imdbpy-ar.po
+++ b/lib/imdb/locale/imdbpy-ar.po
@@ -1,13 +1,13 @@
 # Gettext message file for imdbpy
 # Translators:
-# RainDropR <rajaa@hilltx.com>, 2013
+# Rajaa Jalil <rajaa@hilltx.com>, 2013
 msgid ""
 msgstr ""
 "Project-Id-Version: IMDbPY\n"
 "POT-Creation-Date: 2010-03-18 14:35+0000\n"
-"PO-Revision-Date: 2013-11-20 11:07+0000\n"
-"Last-Translator: RainDropR <rajaa@hilltx.com>\n"
-"Language-Team: Arabic (http://www.transifex.com/projects/p/imdbpy/language/ar/)\n"
+"PO-Revision-Date: 2016-03-28 20:40+0000\n"
+"Last-Translator: Rajaa Jalil <rajaa@hilltx.com>\n"
+"Language-Team: Arabic (http://www.transifex.com/davide_alberani/imdbpy/language/ar/)\n"
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
diff --git a/lib/imdb/locale/imdbpy-bg.po b/lib/imdb/locale/imdbpy-bg.po
index 41c4ce6ad91abe2504d47be43b7a54431031035a..9cbf2a5a7dfd895421992098d1224ac49edf77a1 100644
--- a/lib/imdb/locale/imdbpy-bg.po
+++ b/lib/imdb/locale/imdbpy-bg.po
@@ -1,13 +1,13 @@
 # Gettext message file for imdbpy
 # Translators:
-# Niko Kovach <crashdeburn@gmail.com>, 2014
+# Atanas Kovachki <crashdeburn@gmail.com>, 2014
 msgid ""
 msgstr ""
 "Project-Id-Version: IMDbPY\n"
 "POT-Creation-Date: 2010-03-18 14:35+0000\n"
-"PO-Revision-Date: 2014-03-16 10:46+0000\n"
-"Last-Translator: Niko Kovach <crashdeburn@gmail.com>\n"
-"Language-Team: Bulgarian (http://www.transifex.com/projects/p/imdbpy/language/bg/)\n"
+"PO-Revision-Date: 2016-03-28 20:40+0000\n"
+"Last-Translator: Atanas Kovachki <crashdeburn@gmail.com>\n"
+"Language-Team: Bulgarian (http://www.transifex.com/davide_alberani/imdbpy/language/bg/)\n"
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
diff --git a/lib/imdb/locale/imdbpy-de.po b/lib/imdb/locale/imdbpy-de.po
index ee3112b107c1b43c02aa696fbae5a88293d65873..aa8b6ef2a22a8795bb28f2250beee16147560e78 100644
--- a/lib/imdb/locale/imdbpy-de.po
+++ b/lib/imdb/locale/imdbpy-de.po
@@ -6,9 +6,9 @@ msgid ""
 msgstr ""
 "Project-Id-Version: IMDbPY\n"
 "POT-Creation-Date: 2010-03-18 14:35+0000\n"
-"PO-Revision-Date: 2014-10-21 15:24+0000\n"
+"PO-Revision-Date: 2016-03-28 20:40+0000\n"
 "Last-Translator: Raphael\n"
-"Language-Team: German (http://www.transifex.com/projects/p/imdbpy/language/de/)\n"
+"Language-Team: German (http://www.transifex.com/davide_alberani/imdbpy/language/de/)\n"
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
diff --git a/lib/imdb/locale/imdbpy-es.po b/lib/imdb/locale/imdbpy-es.po
index 729903df08eb79861136866c51f49ca05236bd5b..a7ad01a103cf8a70f5dacca4030b28831a2d8f90 100644
--- a/lib/imdb/locale/imdbpy-es.po
+++ b/lib/imdb/locale/imdbpy-es.po
@@ -1,14 +1,13 @@
 # Gettext message file for imdbpy
 # Translators:
-# strel <strelnic@gmail.com>, 2013
+# strel, 2013
 msgid ""
 msgstr ""
 "Project-Id-Version: IMDbPY\n"
-"Report-Msgid-Bugs-To: http://sourceforge.net/tracker/?group_id=105998&atid=642794\n"
 "POT-Creation-Date: 2010-03-18 14:35+0000\n"
-"PO-Revision-Date: 2013-03-11 17:18+0000\n"
-"Last-Translator: strel <strelnic@gmail.com>\n"
-"Language-Team: Spanish (http://www.transifex.com/projects/p/imdbpy/language/es/)\n"
+"PO-Revision-Date: 2016-03-28 20:40+0000\n"
+"Last-Translator: strel\n"
+"Language-Team: Spanish (http://www.transifex.com/davide_alberani/imdbpy/language/es/)\n"
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
diff --git a/lib/imdb/locale/imdbpy-fr.po b/lib/imdb/locale/imdbpy-fr.po
index f40012c12dc982782c72ef6fdce63d740c9b5e12..0fbed611def043c8bace45547919adb940b43b6e 100644
--- a/lib/imdb/locale/imdbpy-fr.po
+++ b/lib/imdb/locale/imdbpy-fr.po
@@ -1,15 +1,15 @@
 # Gettext message file for imdbpy
 # Translators:
-# lukophron, 2014
-# Rajaa Gutknecht <rajaa@hilltx.com>, 2013
+# lukophron, 2014-2016
+# Rajaa Jalil <rajaa@hilltx.com>, 2013
 # lkppo, 2012
 msgid ""
 msgstr ""
 "Project-Id-Version: IMDbPY\n"
 "POT-Creation-Date: 2010-03-18 14:35+0000\n"
-"PO-Revision-Date: 2014-10-08 02:52+0000\n"
+"PO-Revision-Date: 2016-03-20 05:27+0000\n"
 "Last-Translator: lukophron\n"
-"Language-Team: French (http://www.transifex.com/projects/p/imdbpy/language/fr/)\n"
+"Language-Team: French (http://www.transifex.com/davide_alberani/imdbpy/language/fr/)\n"
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
@@ -38,7 +38,7 @@ msgstr "information-additionnelle"
 
 # Default: Admissions
 msgid "admissions"
-msgstr "admissions"
+msgstr "entrées"
 
 # Default: Agent address
 msgid "agent-address"
@@ -46,15 +46,15 @@ msgstr ""
 
 # Default: Airing
 msgid "airing"
-msgstr ""
+msgstr "en-diffusion"
 
 # Default: Akas
 msgid "akas"
-msgstr ""
+msgstr "alias"
 
 # Default: Akas from release info
 msgid "akas-from-release-info"
-msgstr ""
+msgstr "alias-depuis-info-sortie"
 
 # Default: All products
 msgid "all-products"
@@ -70,7 +70,7 @@ msgstr ""
 
 # Default: Amazon reviews
 msgid "amazon-reviews"
-msgstr ""
+msgstr "critiques-amazon"
 
 # Default: Analog left
 msgid "analog-left"
@@ -82,7 +82,7 @@ msgstr ""
 
 # Default: Animation department
 msgid "animation-department"
-msgstr ""
+msgstr "département-animation"
 
 # Default: Archive footage
 msgid "archive-footage"
@@ -178,7 +178,7 @@ msgstr "livre"
 
 # Default: Books
 msgid "books"
-msgstr "vres"
+msgstr "livres"
 
 # Default: Bottom 100 rank
 msgid "bottom-100-rank"
diff --git a/lib/imdb/locale/imdbpy-pt_BR.po b/lib/imdb/locale/imdbpy-pt_BR.po
new file mode 100644
index 0000000000000000000000000000000000000000..51a7a82e7ed609720c6ffce76b60ea1686905791
--- /dev/null
+++ b/lib/imdb/locale/imdbpy-pt_BR.po
@@ -0,0 +1,1303 @@
+# Gettext message file for imdbpy
+# Translators:
+# Wagner Marques Oliveira <wagnermarques00@hotmail.com>, 2015
+msgid ""
+msgstr ""
+"Project-Id-Version: IMDbPY\n"
+"POT-Creation-Date: 2010-03-18 14:35+0000\n"
+"PO-Revision-Date: 2016-03-28 20:40+0000\n"
+"Last-Translator: Wagner Marques Oliveira <wagnermarques00@hotmail.com>\n"
+"Language-Team: Portuguese (Brazil) (http://www.transifex.com/davide_alberani/imdbpy/language/pt_BR/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Domain: imdbpy\n"
+"Language: pt_BR\n"
+"Language-Code: en\n"
+"Language-Name: English\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+"Preferred-Encodings: utf-8\n"
+
+# Default: Actor
+msgid "actor"
+msgstr "ator"
+
+# Default: Actress
+msgid "actress"
+msgstr "atriz"
+
+# Default: Adaption
+msgid "adaption"
+msgstr "adaptação"
+
+# Default: Additional information
+msgid "additional-information"
+msgstr "informação-adicional"
+
+# Default: Admissions
+msgid "admissions"
+msgstr "admissões"
+
+# Default: Agent address
+msgid "agent-address"
+msgstr "endereço-de-agente"
+
+# Default: Airing
+msgid "airing"
+msgstr "no ar"
+
+# Default: Akas
+msgid "akas"
+msgstr "mais conhecido como"
+
+# Default: Akas from release info
+msgid "akas-from-release-info"
+msgstr "mais conhecido como-para-lançamento-informação"
+
+# Default: All products
+msgid "all-products"
+msgstr "todos-produtos"
+
+# Default: Alternate language version of
+msgid "alternate-language-version-of"
+msgstr ""
+
+# Default: Alternate versions
+msgid "alternate-versions"
+msgstr ""
+
+# Default: Amazon reviews
+msgid "amazon-reviews"
+msgstr ""
+
+# Default: Analog left
+msgid "analog-left"
+msgstr ""
+
+# Default: Analog right
+msgid "analog-right"
+msgstr ""
+
+# Default: Animation department
+msgid "animation-department"
+msgstr ""
+
+# Default: Archive footage
+msgid "archive-footage"
+msgstr ""
+
+# Default: Arithmetic mean
+msgid "arithmetic-mean"
+msgstr ""
+
+# Default: Art department
+msgid "art-department"
+msgstr ""
+
+# Default: Art direction
+msgid "art-direction"
+msgstr ""
+
+# Default: Art director
+msgid "art-director"
+msgstr ""
+
+# Default: Article
+msgid "article"
+msgstr ""
+
+# Default: Asin
+msgid "asin"
+msgstr ""
+
+# Default: Aspect ratio
+msgid "aspect-ratio"
+msgstr ""
+
+# Default: Assigner
+msgid "assigner"
+msgstr ""
+
+# Default: Assistant director
+msgid "assistant-director"
+msgstr ""
+
+# Default: Auctions
+msgid "auctions"
+msgstr ""
+
+# Default: Audio noise
+msgid "audio-noise"
+msgstr ""
+
+# Default: Audio quality
+msgid "audio-quality"
+msgstr ""
+
+# Default: Award
+msgid "award"
+msgstr ""
+
+# Default: Awards
+msgid "awards"
+msgstr ""
+
+# Default: Biographical movies
+msgid "biographical-movies"
+msgstr ""
+
+# Default: Biography
+msgid "biography"
+msgstr ""
+
+# Default: Biography print
+msgid "biography-print"
+msgstr ""
+
+# Default: Birth date
+msgid "birth-date"
+msgstr ""
+
+# Default: Birth name
+msgid "birth-name"
+msgstr ""
+
+# Default: Birth notes
+msgid "birth-notes"
+msgstr ""
+
+# Default: Body
+msgid "body"
+msgstr ""
+
+# Default: Book
+msgid "book"
+msgstr ""
+
+# Default: Books
+msgid "books"
+msgstr ""
+
+# Default: Bottom 100 rank
+msgid "bottom-100-rank"
+msgstr ""
+
+# Default: Budget
+msgid "budget"
+msgstr ""
+
+# Default: Business
+msgid "business"
+msgstr ""
+
+# Default: By arrangement with
+msgid "by-arrangement-with"
+msgstr ""
+
+# Default: Camera
+msgid "camera"
+msgstr ""
+
+# Default: Camera and electrical department
+msgid "camera-and-electrical-department"
+msgstr ""
+
+# Default: Canonical episode title
+msgid "canonical-episode-title"
+msgstr ""
+
+# Default: Canonical name
+msgid "canonical-name"
+msgstr ""
+
+# Default: Canonical series title
+msgid "canonical-series-title"
+msgstr ""
+
+# Default: Canonical title
+msgid "canonical-title"
+msgstr ""
+
+# Default: Cast
+msgid "cast"
+msgstr ""
+
+# Default: Casting department
+msgid "casting-department"
+msgstr ""
+
+# Default: Casting director
+msgid "casting-director"
+msgstr ""
+
+# Default: Catalog number
+msgid "catalog-number"
+msgstr ""
+
+# Default: Category
+msgid "category"
+msgstr ""
+
+# Default: Certificate
+msgid "certificate"
+msgstr ""
+
+# Default: Certificates
+msgid "certificates"
+msgstr ""
+
+# Default: Certification
+msgid "certification"
+msgstr ""
+
+# Default: Channel
+msgid "channel"
+msgstr ""
+
+# Default: Character
+msgid "character"
+msgstr ""
+
+# Default: Cinematographer
+msgid "cinematographer"
+msgstr ""
+
+# Default: Cinematographic process
+msgid "cinematographic-process"
+msgstr ""
+
+# Default: Close captions teletext ld g
+msgid "close-captions-teletext-ld-g"
+msgstr ""
+
+# Default: Color info
+msgid "color-info"
+msgstr ""
+
+# Default: Color information
+msgid "color-information"
+msgstr ""
+
+# Default: Color rendition
+msgid "color-rendition"
+msgstr ""
+
+# Default: Company
+msgid "company"
+msgstr ""
+
+# Default: Complete cast
+msgid "complete-cast"
+msgstr ""
+
+# Default: Complete crew
+msgid "complete-crew"
+msgstr ""
+
+# Default: Composer
+msgid "composer"
+msgstr ""
+
+# Default: Connections
+msgid "connections"
+msgstr ""
+
+# Default: Contrast
+msgid "contrast"
+msgstr ""
+
+# Default: Copyright holder
+msgid "copyright-holder"
+msgstr ""
+
+# Default: Costume department
+msgid "costume-department"
+msgstr ""
+
+# Default: Costume designer
+msgid "costume-designer"
+msgstr ""
+
+# Default: Countries
+msgid "countries"
+msgstr ""
+
+# Default: Country
+msgid "country"
+msgstr ""
+
+# Default: Courtesy of
+msgid "courtesy-of"
+msgstr ""
+
+# Default: Cover
+msgid "cover"
+msgstr ""
+
+# Default: Cover url
+msgid "cover-url"
+msgstr ""
+
+# Default: Crazy credits
+msgid "crazy-credits"
+msgstr ""
+
+# Default: Creator
+msgid "creator"
+msgstr ""
+
+# Default: Current role
+msgid "current-role"
+msgstr ""
+
+# Default: Database
+msgid "database"
+msgstr ""
+
+# Default: Date
+msgid "date"
+msgstr ""
+
+# Default: Death date
+msgid "death-date"
+msgstr ""
+
+# Default: Death notes
+msgid "death-notes"
+msgstr ""
+
+# Default: Demographic
+msgid "demographic"
+msgstr ""
+
+# Default: Description
+msgid "description"
+msgstr ""
+
+# Default: Dialogue intellegibility
+msgid "dialogue-intellegibility"
+msgstr ""
+
+# Default: Digital sound
+msgid "digital-sound"
+msgstr ""
+
+# Default: Director
+msgid "director"
+msgstr ""
+
+# Default: Disc format
+msgid "disc-format"
+msgstr ""
+
+# Default: Disc size
+msgid "disc-size"
+msgstr ""
+
+# Default: Distributors
+msgid "distributors"
+msgstr ""
+
+# Default: Dvd
+msgid "dvd"
+msgstr ""
+
+# Default: Dvd features
+msgid "dvd-features"
+msgstr ""
+
+# Default: Dvd format
+msgid "dvd-format"
+msgstr ""
+
+# Default: Dvds
+msgid "dvds"
+msgstr ""
+
+# Default: Dynamic range
+msgid "dynamic-range"
+msgstr ""
+
+# Default: Edited from
+msgid "edited-from"
+msgstr ""
+
+# Default: Edited into
+msgid "edited-into"
+msgstr ""
+
+# Default: Editor
+msgid "editor"
+msgstr ""
+
+# Default: Editorial department
+msgid "editorial-department"
+msgstr ""
+
+# Default: Episode
+msgid "episode"
+msgstr ""
+
+# Default: Episode of
+msgid "episode-of"
+msgstr ""
+
+# Default: Episode title
+msgid "episode-title"
+msgstr ""
+
+# Default: Episodes
+msgid "episodes"
+msgstr ""
+
+# Default: Episodes rating
+msgid "episodes-rating"
+msgstr ""
+
+# Default: Essays
+msgid "essays"
+msgstr ""
+
+# Default: External reviews
+msgid "external-reviews"
+msgstr ""
+
+# Default: Faqs
+msgid "faqs"
+msgstr ""
+
+# Default: Feature
+msgid "feature"
+msgstr ""
+
+# Default: Featured in
+msgid "featured-in"
+msgstr ""
+
+# Default: Features
+msgid "features"
+msgstr ""
+
+# Default: Film negative format
+msgid "film-negative-format"
+msgstr ""
+
+# Default: Filming dates
+msgid "filming-dates"
+msgstr ""
+
+# Default: Filmography
+msgid "filmography"
+msgstr ""
+
+# Default: Followed by
+msgid "followed-by"
+msgstr ""
+
+# Default: Follows
+msgid "follows"
+msgstr ""
+
+# Default: For
+msgid "for"
+msgstr ""
+
+# Default: Frequency response
+msgid "frequency-response"
+msgstr ""
+
+# Default: From
+msgid "from"
+msgstr ""
+
+# Default: Full article link
+msgid "full-article-link"
+msgstr ""
+
+# Default: Full size cover url
+msgid "full-size-cover-url"
+msgstr ""
+
+# Default: Full size headshot
+msgid "full-size-headshot"
+msgstr ""
+
+# Default: Genres
+msgid "genres"
+msgstr ""
+
+# Default: Goofs
+msgid "goofs"
+msgstr ""
+
+# Default: Gross
+msgid "gross"
+msgstr ""
+
+# Default: Group genre
+msgid "group-genre"
+msgstr ""
+
+# Default: Headshot
+msgid "headshot"
+msgstr ""
+
+# Default: Height
+msgid "height"
+msgstr ""
+
+# Default: Imdbindex
+msgid "imdbindex"
+msgstr ""
+
+# Default: In development
+msgid "in-development"
+msgstr ""
+
+# Default: Interview
+msgid "interview"
+msgstr ""
+
+# Default: Interviews
+msgid "interviews"
+msgstr ""
+
+# Default: Introduction
+msgid "introduction"
+msgstr ""
+
+# Default: Item
+msgid "item"
+msgstr ""
+
+# Default: Keywords
+msgid "keywords"
+msgstr ""
+
+# Default: Kind
+msgid "kind"
+msgstr ""
+
+# Default: Label
+msgid "label"
+msgstr ""
+
+# Default: Laboratory
+msgid "laboratory"
+msgstr ""
+
+# Default: Language
+msgid "language"
+msgstr ""
+
+# Default: Languages
+msgid "languages"
+msgstr ""
+
+# Default: Laserdisc
+msgid "laserdisc"
+msgstr ""
+
+# Default: Laserdisc title
+msgid "laserdisc-title"
+msgstr ""
+
+# Default: Length
+msgid "length"
+msgstr ""
+
+# Default: Line
+msgid "line"
+msgstr ""
+
+# Default: Link
+msgid "link"
+msgstr ""
+
+# Default: Link text
+msgid "link-text"
+msgstr ""
+
+# Default: Literature
+msgid "literature"
+msgstr ""
+
+# Default: Locations
+msgid "locations"
+msgstr ""
+
+# Default: Long imdb canonical name
+msgid "long-imdb-canonical-name"
+msgstr ""
+
+# Default: Long imdb canonical title
+msgid "long-imdb-canonical-title"
+msgstr ""
+
+# Default: Long imdb episode title
+msgid "long-imdb-episode-title"
+msgstr ""
+
+# Default: Long imdb name
+msgid "long-imdb-name"
+msgstr ""
+
+# Default: Long imdb title
+msgid "long-imdb-title"
+msgstr ""
+
+# Default: Magazine cover photo
+msgid "magazine-cover-photo"
+msgstr ""
+
+# Default: Make up
+msgid "make-up"
+msgstr ""
+
+# Default: Master format
+msgid "master-format"
+msgstr ""
+
+# Default: Median
+msgid "median"
+msgstr ""
+
+# Default: Merchandising links
+msgid "merchandising-links"
+msgstr ""
+
+# Default: Mini biography
+msgid "mini-biography"
+msgstr ""
+
+# Default: Misc links
+msgid "misc-links"
+msgstr ""
+
+# Default: Miscellaneous companies
+msgid "miscellaneous-companies"
+msgstr ""
+
+# Default: Miscellaneous crew
+msgid "miscellaneous-crew"
+msgstr ""
+
+# Default: Movie
+msgid "movie"
+msgstr ""
+
+# Default: Mpaa
+msgid "mpaa"
+msgstr ""
+
+# Default: Music department
+msgid "music-department"
+msgstr ""
+
+# Default: Name
+msgid "name"
+msgstr ""
+
+# Default: News
+msgid "news"
+msgstr ""
+
+# Default: Newsgroup reviews
+msgid "newsgroup-reviews"
+msgstr ""
+
+# Default: Nick names
+msgid "nick-names"
+msgstr ""
+
+# Default: Notes
+msgid "notes"
+msgstr ""
+
+# Default: Novel
+msgid "novel"
+msgstr ""
+
+# Default: Number
+msgid "number"
+msgstr ""
+
+# Default: Number of chapter stops
+msgid "number-of-chapter-stops"
+msgstr ""
+
+# Default: Number of episodes
+msgid "number-of-episodes"
+msgstr ""
+
+# Default: Number of seasons
+msgid "number-of-seasons"
+msgstr ""
+
+# Default: Number of sides
+msgid "number-of-sides"
+msgstr ""
+
+# Default: Number of votes
+msgid "number-of-votes"
+msgstr ""
+
+# Default: Official retail price
+msgid "official-retail-price"
+msgstr ""
+
+# Default: Official sites
+msgid "official-sites"
+msgstr ""
+
+# Default: Opening weekend
+msgid "opening-weekend"
+msgstr ""
+
+# Default: Original air date
+msgid "original-air-date"
+msgstr ""
+
+# Default: Original music
+msgid "original-music"
+msgstr ""
+
+# Default: Original title
+msgid "original-title"
+msgstr ""
+
+# Default: Other literature
+msgid "other-literature"
+msgstr ""
+
+# Default: Other works
+msgid "other-works"
+msgstr ""
+
+# Default: Parents guide
+msgid "parents-guide"
+msgstr ""
+
+# Default: Performed by
+msgid "performed-by"
+msgstr ""
+
+# Default: Person
+msgid "person"
+msgstr ""
+
+# Default: Photo sites
+msgid "photo-sites"
+msgstr ""
+
+# Default: Pictorial
+msgid "pictorial"
+msgstr ""
+
+# Default: Picture format
+msgid "picture-format"
+msgstr ""
+
+# Default: Plot
+msgid "plot"
+msgstr ""
+
+# Default: Plot outline
+msgid "plot-outline"
+msgstr ""
+
+# Default: Portrayed in
+msgid "portrayed-in"
+msgstr ""
+
+# Default: Pressing plant
+msgid "pressing-plant"
+msgstr ""
+
+# Default: Printed film format
+msgid "printed-film-format"
+msgstr ""
+
+# Default: Printed media reviews
+msgid "printed-media-reviews"
+msgstr ""
+
+# Default: Producer
+msgid "producer"
+msgstr ""
+
+# Default: Production companies
+msgid "production-companies"
+msgstr ""
+
+# Default: Production country
+msgid "production-country"
+msgstr ""
+
+# Default: Production dates
+msgid "production-dates"
+msgstr ""
+
+# Default: Production design
+msgid "production-design"
+msgstr ""
+
+# Default: Production designer
+msgid "production-designer"
+msgstr ""
+
+# Default: Production manager
+msgid "production-manager"
+msgstr ""
+
+# Default: Production process protocol
+msgid "production-process-protocol"
+msgstr ""
+
+# Default: Quality of source
+msgid "quality-of-source"
+msgstr ""
+
+# Default: Quality program
+msgid "quality-program"
+msgstr ""
+
+# Default: Quote
+msgid "quote"
+msgstr ""
+
+# Default: Quotes
+msgid "quotes"
+msgstr ""
+
+# Default: Rating
+msgid "rating"
+msgstr ""
+
+# Default: Recommendations
+msgid "recommendations"
+msgstr ""
+
+# Default: Referenced in
+msgid "referenced-in"
+msgstr ""
+
+# Default: References
+msgid "references"
+msgstr ""
+
+# Default: Region
+msgid "region"
+msgstr ""
+
+# Default: Release country
+msgid "release-country"
+msgstr ""
+
+# Default: Release date
+msgid "release-date"
+msgstr ""
+
+# Default: Release dates
+msgid "release-dates"
+msgstr ""
+
+# Default: Remade as
+msgid "remade-as"
+msgstr ""
+
+# Default: Remake of
+msgid "remake-of"
+msgstr ""
+
+# Default: Rentals
+msgid "rentals"
+msgstr ""
+
+# Default: Result
+msgid "result"
+msgstr ""
+
+# Default: Review
+msgid "review"
+msgstr ""
+
+# Default: Review author
+msgid "review-author"
+msgstr ""
+
+# Default: Review kind
+msgid "review-kind"
+msgstr ""
+
+# Default: Runtime
+msgid "runtime"
+msgstr ""
+
+# Default: Runtimes
+msgid "runtimes"
+msgstr ""
+
+# Default: Salary history
+msgid "salary-history"
+msgstr ""
+
+# Default: Screenplay teleplay
+msgid "screenplay-teleplay"
+msgstr ""
+
+# Default: Season
+msgid "season"
+msgstr ""
+
+# Default: Second unit director or assistant director
+msgid "second-unit-director-or-assistant-director"
+msgstr ""
+
+# Default: Self
+msgid "self"
+msgstr ""
+
+# Default: Series animation department
+msgid "series-animation-department"
+msgstr ""
+
+# Default: Series art department
+msgid "series-art-department"
+msgstr ""
+
+# Default: Series assistant directors
+msgid "series-assistant-directors"
+msgstr ""
+
+# Default: Series camera department
+msgid "series-camera-department"
+msgstr ""
+
+# Default: Series casting department
+msgid "series-casting-department"
+msgstr ""
+
+# Default: Series cinematographers
+msgid "series-cinematographers"
+msgstr ""
+
+# Default: Series costume department
+msgid "series-costume-department"
+msgstr ""
+
+# Default: Series editorial department
+msgid "series-editorial-department"
+msgstr ""
+
+# Default: Series editors
+msgid "series-editors"
+msgstr ""
+
+# Default: Series make up department
+msgid "series-make-up-department"
+msgstr ""
+
+# Default: Series miscellaneous
+msgid "series-miscellaneous"
+msgstr ""
+
+# Default: Series music department
+msgid "series-music-department"
+msgstr ""
+
+# Default: Series producers
+msgid "series-producers"
+msgstr ""
+
+# Default: Series production designers
+msgid "series-production-designers"
+msgstr ""
+
+# Default: Series production managers
+msgid "series-production-managers"
+msgstr ""
+
+# Default: Series sound department
+msgid "series-sound-department"
+msgstr ""
+
+# Default: Series special effects department
+msgid "series-special-effects-department"
+msgstr ""
+
+# Default: Series stunts
+msgid "series-stunts"
+msgstr ""
+
+# Default: Series title
+msgid "series-title"
+msgstr ""
+
+# Default: Series transportation department
+msgid "series-transportation-department"
+msgstr ""
+
+# Default: Series visual effects department
+msgid "series-visual-effects-department"
+msgstr ""
+
+# Default: Series writers
+msgid "series-writers"
+msgstr ""
+
+# Default: Series years
+msgid "series-years"
+msgstr ""
+
+# Default: Set decoration
+msgid "set-decoration"
+msgstr ""
+
+# Default: Sharpness
+msgid "sharpness"
+msgstr ""
+
+# Default: Similar to
+msgid "similar-to"
+msgstr ""
+
+# Default: Smart canonical episode title
+msgid "smart-canonical-episode-title"
+msgstr ""
+
+# Default: Smart canonical series title
+msgid "smart-canonical-series-title"
+msgstr ""
+
+# Default: Smart canonical title
+msgid "smart-canonical-title"
+msgstr ""
+
+# Default: Smart long imdb canonical title
+msgid "smart-long-imdb-canonical-title"
+msgstr ""
+
+# Default: Sound clips
+msgid "sound-clips"
+msgstr ""
+
+# Default: Sound crew
+msgid "sound-crew"
+msgstr ""
+
+# Default: Sound encoding
+msgid "sound-encoding"
+msgstr ""
+
+# Default: Sound mix
+msgid "sound-mix"
+msgstr ""
+
+# Default: Soundtrack
+msgid "soundtrack"
+msgstr ""
+
+# Default: Spaciality
+msgid "spaciality"
+msgstr ""
+
+# Default: Special effects
+msgid "special-effects"
+msgstr ""
+
+# Default: Special effects companies
+msgid "special-effects-companies"
+msgstr ""
+
+# Default: Special effects department
+msgid "special-effects-department"
+msgstr ""
+
+# Default: Spin off
+msgid "spin-off"
+msgstr ""
+
+# Default: Spin off from
+msgid "spin-off-from"
+msgstr ""
+
+# Default: Spoofed in
+msgid "spoofed-in"
+msgstr ""
+
+# Default: Spoofs
+msgid "spoofs"
+msgstr ""
+
+# Default: Spouse
+msgid "spouse"
+msgstr ""
+
+# Default: Status of availablility
+msgid "status-of-availablility"
+msgstr ""
+
+# Default: Studio
+msgid "studio"
+msgstr ""
+
+# Default: Studios
+msgid "studios"
+msgstr ""
+
+# Default: Stunt performer
+msgid "stunt-performer"
+msgstr ""
+
+# Default: Stunts
+msgid "stunts"
+msgstr ""
+
+# Default: Subtitles
+msgid "subtitles"
+msgstr ""
+
+# Default: Supplement
+msgid "supplement"
+msgstr ""
+
+# Default: Supplements
+msgid "supplements"
+msgstr ""
+
+# Default: Synopsis
+msgid "synopsis"
+msgstr ""
+
+# Default: Taglines
+msgid "taglines"
+msgstr ""
+
+# Default: Tech info
+msgid "tech-info"
+msgstr ""
+
+# Default: Thanks
+msgid "thanks"
+msgstr ""
+
+# Default: Time
+msgid "time"
+msgstr ""
+
+# Default: Title
+msgid "title"
+msgstr ""
+
+# Default: Titles in this product
+msgid "titles-in-this-product"
+msgstr ""
+
+# Default: To
+msgid "to"
+msgstr ""
+
+# Default: Top 250 rank
+msgid "top-250-rank"
+msgstr ""
+
+# Default: Trade mark
+msgid "trade-mark"
+msgstr ""
+
+# Default: Transportation department
+msgid "transportation-department"
+msgstr ""
+
+# Default: Trivia
+msgid "trivia"
+msgstr ""
+
+# Default: Tv
+msgid "tv"
+msgstr ""
+
+# Default: Under license from
+msgid "under-license-from"
+msgstr ""
+
+# Default: Unknown link
+msgid "unknown-link"
+msgstr ""
+
+# Default: Upc
+msgid "upc"
+msgstr ""
+
+# Default: Version of
+msgid "version-of"
+msgstr ""
+
+# Default: Vhs
+msgid "vhs"
+msgstr ""
+
+# Default: Video
+msgid "video"
+msgstr ""
+
+# Default: Video artifacts
+msgid "video-artifacts"
+msgstr ""
+
+# Default: Video clips
+msgid "video-clips"
+msgstr ""
+
+# Default: Video noise
+msgid "video-noise"
+msgstr ""
+
+# Default: Video quality
+msgid "video-quality"
+msgstr ""
+
+# Default: Video standard
+msgid "video-standard"
+msgstr ""
+
+# Default: Visual effects
+msgid "visual-effects"
+msgstr ""
+
+# Default: Votes
+msgid "votes"
+msgstr ""
+
+# Default: Votes distribution
+msgid "votes-distribution"
+msgstr ""
+
+# Default: Weekend gross
+msgid "weekend-gross"
+msgstr ""
+
+# Default: Where now
+msgid "where-now"
+msgstr ""
+
+# Default: With
+msgid "with"
+msgstr ""
+
+# Default: Writer
+msgid "writer"
+msgstr ""
+
+# Default: Written by
+msgid "written-by"
+msgstr ""
+
+# Default: Year
+msgid "year"
+msgstr ""
+
+# Default: Zshops
+msgid "zshops"
+msgstr ""
diff --git a/lib/imdb/locale/it/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/it/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..4d06b657bd2aa4ca9fc1e7c7d73876e59a6bd8f7
Binary files /dev/null and b/lib/imdb/locale/it/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/locale/pt_BR/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/pt_BR/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..73549cef0ec3a3270be38ed77feb71906c015a3c
Binary files /dev/null and b/lib/imdb/locale/pt_BR/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/locale/rebuildmo.py b/lib/imdb/locale/rebuildmo.py
index 14fe17f1f91aa4495c74eff65cfd813e47e3f7ed..b72a74c3691fd3cb2a43f4ca2355a13921fa543b 100755
--- a/lib/imdb/locale/rebuildmo.py
+++ b/lib/imdb/locale/rebuildmo.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import glob
diff --git a/lib/imdb/locale/tr/LC_MESSAGES/imdbpy.mo b/lib/imdb/locale/tr/LC_MESSAGES/imdbpy.mo
new file mode 100644
index 0000000000000000000000000000000000000000..8ce4e3acad9ad9594e24949db3cd39bc3d965d6a
Binary files /dev/null and b/lib/imdb/locale/tr/LC_MESSAGES/imdbpy.mo differ
diff --git a/lib/imdb/parser/__init__.py b/lib/imdb/parser/__init__.py
index 7126af5f2089110f96d5aa4ab938770cbe95ef95..4c3c90a85187cbb573d4992f3e3315aa1568a31d 100644
--- a/lib/imdb/parser/__init__.py
+++ b/lib/imdb/parser/__init__.py
@@ -20,7 +20,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 __all__ = ['http', 'mobile', 'sql']
diff --git a/lib/imdb/parser/http/__init__.py b/lib/imdb/parser/http/__init__.py
index 2bfeeb7474fd96821aaa3b5907353d3e36ca124e..a3001a08daccc68b5454642fedfda1b13d11b2bf 100644
--- a/lib/imdb/parser/http/__init__.py
+++ b/lib/imdb/parser/http/__init__.py
@@ -22,7 +22,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import sys
diff --git a/lib/imdb/parser/http/bsouplxml/bsoupxpath.py b/lib/imdb/parser/http/bsouplxml/bsoupxpath.py
index 4b7930746579766c1e5a94d3ef62c85b8efadcee..c5c489db7fc5b46500a79ef1a985485a29d49223 100644
--- a/lib/imdb/parser/http/bsouplxml/bsoupxpath.py
+++ b/lib/imdb/parser/http/bsouplxml/bsoupxpath.py
@@ -17,7 +17,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 __author__ = 'H. Turgut Uyar <uyar@tekir.org>'
diff --git a/lib/imdb/parser/http/bsouplxml/etree.py b/lib/imdb/parser/http/bsouplxml/etree.py
index 72a8ba1cfe6e926a2fc84fda0835badce5816ad6..28465f5c246a065895d79354ea1702002491cb67 100644
--- a/lib/imdb/parser/http/bsouplxml/etree.py
+++ b/lib/imdb/parser/http/bsouplxml/etree.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import _bsoup as BeautifulSoup
diff --git a/lib/imdb/parser/http/bsouplxml/html.py b/lib/imdb/parser/http/bsouplxml/html.py
index dca08fe3c53934373033e533dac22a6bf7645b04..bbf13bd642a34f3c2eeb3468c911921bac00163a 100644
--- a/lib/imdb/parser/http/bsouplxml/html.py
+++ b/lib/imdb/parser/http/bsouplxml/html.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import _bsoup as BeautifulSoup
diff --git a/lib/imdb/parser/http/characterParser.py b/lib/imdb/parser/http/characterParser.py
index dcbaa9c5eb3e47ca89d808df21f2401ac22a4716..ff5ea09bc2872b6aa227fe5324227d248f5aeefb 100644
--- a/lib/imdb/parser/http/characterParser.py
+++ b/lib/imdb/parser/http/characterParser.py
@@ -23,7 +23,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
diff --git a/lib/imdb/parser/http/companyParser.py b/lib/imdb/parser/http/companyParser.py
index e058e25ae8e76160a4c4b3a47bb2fb1da4fa48b3..843379169cc595c59f611a635500753a9ea827f2 100644
--- a/lib/imdb/parser/http/companyParser.py
+++ b/lib/imdb/parser/http/companyParser.py
@@ -21,7 +21,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
diff --git a/lib/imdb/parser/http/movieParser.py b/lib/imdb/parser/http/movieParser.py
index 790e07d3c3518c0f0c9723e3117db08c81d68fca..f4589d7a0cfa729b280898d7201fbac896b8b25f 100644
--- a/lib/imdb/parser/http/movieParser.py
+++ b/lib/imdb/parser/http/movieParser.py
@@ -9,7 +9,7 @@ pages would be:
     plot summary:       http://akas.imdb.com/title/tt0094226/plotsummary
     ...and so on...
 
-Copyright 2004-2013 Davide Alberani <da@erlug.linux.it>
+Copyright 2004-2016 Davide Alberani <da@erlug.linux.it>
                2008 H. Turgut Uyar <uyar@tekir.org>
 
 This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
@@ -207,6 +207,11 @@ class DOMHTMLMovieParser(DOMParserBase):
                             multi=True,
                             path="./text()")),
 
+                Extractor(label='myrating',
+                        path="//span[@id='voteuser']",
+                        attrs=Attribute(key='myrating',
+                                        path=".//text()")),
+
                 Extractor(label='h5sections',
                         path="//div[@class='info']/h5/..",
                         attrs=[
@@ -226,7 +231,7 @@ class DOMHTMLMovieParser(DOMParserBase):
                             Attribute(key="countries",
                                 path="./h5[starts-with(text(), " \
                             "'Countr')]/../div[@class='info-content']//text()",
-                            postprocess=makeSplitter('|')),
+                                postprocess=makeSplitter('|')),
                             Attribute(key="language",
                                 path="./h5[starts-with(text(), " \
                                         "'Language')]/..//text()",
@@ -234,7 +239,7 @@ class DOMHTMLMovieParser(DOMParserBase):
                             Attribute(key='color info',
                                 path="./h5[starts-with(text(), " \
                                         "'Color')]/..//text()",
-                                postprocess=makeSplitter('Color:')),
+                                postprocess=makeSplitter('|')),
                             Attribute(key='sound mix',
                                 path="./h5[starts-with(text(), " \
                                         "'Sound Mix')]/..//text()",
@@ -462,6 +467,8 @@ class DOMHTMLMovieParser(DOMParserBase):
                 del data['other akas']
             if nakas:
                 data['akas'] = nakas
+        if 'color info' in data:
+            data['color info'] = [x.replace('Color:', '', 1) for x in data['color info']]
         if 'runtimes' in data:
             data['runtimes'] = [x.replace(' min', u'')
                                 for x in data['runtimes']]
@@ -552,10 +559,10 @@ class DOMHTMLPlotParser(DOMParserBase):
     # Notice that recently IMDb started to put the email of the
     # author only in the link, that we're not collecting, here.
     extractors = [Extractor(label='plot',
-                            path="//ul[@class='zebraList']//p",
+                            path="//p[@class='plotSummary']",
                             attrs=Attribute(key='plot',
                                             multi=True,
-                                            path={'plot': './text()[1]',
+                                            path={'plot': './/text()',
                                                   'author': './span/em/a/text()'},
                                             postprocess=_process_plotsummary))]
 
@@ -783,17 +790,20 @@ class DOMHTMLTriviaParser(DOMParserBase):
 
 
 
-class DOMHTMLSoundtrackParser(DOMHTMLAlternateVersionsParser):
-    kind = 'soundtrack'
-
-    preprocessors = [
-        ('<br>', '\n')
-        ]
+class DOMHTMLSoundtrackParser(DOMParserBase):
+    _defGetRefs = True
+    preprocessors = [('<br />', '\n'), ('<br>', '\n')]
+    extractors = [Extractor(label='soundtrack',
+                            path="//div[@class='list']//div",
+                            attrs=Attribute(key='soundtrack',
+                                            multi=True,
+                                            path=".//text()",
+                                            postprocess=lambda x: x.strip()))]
 
     def postprocess_data(self, data):
-        if 'alternate versions' in data:
+        if 'soundtrack' in data:
             nd = []
-            for x in data['alternate versions']:
+            for x in data['soundtrack']:
                 ds = x.split('\n')
                 title = ds[0]
                 if title[0] == '"' and title[-1] == '"':
@@ -1177,7 +1187,7 @@ class DOMHTMLCriticReviewsParser(DOMParserBase):
                 path="//div[@class='article']/div[@class='see-more']/a",
                 attrs=Attribute(key='metacritic url',
                                 path="./@href")) ]
-    
+
 class DOMHTMLOfficialsitesParser(DOMParserBase):
     """Parser for the "official sites", "external reviews", "newsgroup
     reviews", "miscellaneous links", "sound clips", "video clips" and
@@ -1289,16 +1299,17 @@ class DOMHTMLTechParser(DOMParserBase):
         result = tparser.parse(technical_html_string)
     """
     kind = 'tech'
+    re_space = re.compile(r'\s+')
 
     extractors = [Extractor(label='tech',
-                        group="//h5",
+                        group="//table//tr/td[@class='label']",
                         group_key="./text()",
-                        group_key_normalize=lambda x: x.lower(),
-                        path="./following-sibling::div[1]",
+                        group_key_normalize=lambda x: x.lower().strip(),
+                        path=".",
                         attrs=Attribute(key=None,
-                                    path=".//text()",
+                                        path="..//td[2]//text()",
                                     postprocess=lambda x: [t.strip()
-                                        for t in x.split('\n') if t.strip()]))]
+                                                           for t in x.split(':::') if t.strip()]))]
 
     preprocessors = [
         (re.compile('(<h5>.*?</h5>)', re.I), r'</div>\1<div class="_imdbpy">'),
@@ -1308,12 +1319,15 @@ class DOMHTMLTechParser(DOMParserBase):
         (re.compile('<p>(.*?)</p>', re.I), r'\1<br/>'),
         (re.compile('(</td><td valign="top">)', re.I), r'\1::'),
         (re.compile('(</tr><tr>)', re.I), r'\n\1'),
+        (re.compile('<span class="ghost">\|</span>', re.I), r':::'),
+        (re.compile('<br/?>', re.I), r':::'),
         # this is for splitting individual entries
-        (re.compile('<br/>', re.I), r'\n'),
         ]
 
     def postprocess_data(self, data):
         for key in data:
+            data[key] = filter(lambda x: x != '|', data[key])
+            data[key] = [self.re_space.sub(' ', x).strip() for x in data[key]]
             data[key] = filter(None, data[key])
         if self.kind in ('literature', 'business', 'contacts') and data:
             if 'screenplay/teleplay' in data:
@@ -1905,7 +1919,7 @@ _OBJECTS = {
     'goofs_parser':  ((DOMHTMLGoofsParser,), None),
     'alternateversions_parser':  ((DOMHTMLAlternateVersionsParser,), None),
     'trivia_parser':  ((DOMHTMLTriviaParser,), None),
-    'soundtrack_parser':  ((DOMHTMLSoundtrackParser,), {'kind': 'soundtrack'}),
+    'soundtrack_parser':  ((DOMHTMLSoundtrackParser,), None),
     'quotes_parser':  ((DOMHTMLQuotesParser,), None),
     'releasedates_parser':  ((DOMHTMLReleaseinfoParser,), None),
     'ratings_parser':  ((DOMHTMLRatingsParser,), None),
diff --git a/lib/imdb/parser/http/personParser.py b/lib/imdb/parser/http/personParser.py
index fbaf5571d4e87034d4397fbdccc866fd6d94e1f1..caf8b2ef873fb08330074b2451622292abee5702 100644
--- a/lib/imdb/parser/http/personParser.py
+++ b/lib/imdb/parser/http/personParser.py
@@ -23,7 +23,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
@@ -204,7 +204,7 @@ class DOMHTMLBioParser(DOMParserBase):
     _birth_attrs = [Attribute(key='birth date',
                         path={
                             'day': "./a[starts-with(@href, " \
-                                    "'/date/')]/text()",
+                                    "'/search/name?birth_monthday=')]/text()",
                             'year': "./a[starts-with(@href, " \
                                     "'/search/name?birth_year=')]/text()"
                             },
@@ -215,7 +215,7 @@ class DOMHTMLBioParser(DOMParserBase):
     _death_attrs = [Attribute(key='death date',
                         path={
                             'day': "./a[starts-with(@href, " \
-                                    "'/date/')]/text()",
+                                    "'/search/name?death_monthday=')]/text()",
                             'year': "./a[starts-with(@href, " \
                                     "'/search/name?death_date=')]/text()"
                             },
@@ -396,7 +396,7 @@ class DOMHTMLResumeParser(DOMParserBase):
         ]
 
     def postprocess_data(self, data):
-        
+
         for key in data.keys():
             if data[key] == '':
                 del data[key]
diff --git a/lib/imdb/parser/http/searchCharacterParser.py b/lib/imdb/parser/http/searchCharacterParser.py
index 038e6b44304244345109b9869c19a9075b5eb237..5f281fa0c3bbef2fcc47d9b36865e488c193ea5a 100644
--- a/lib/imdb/parser/http/searchCharacterParser.py
+++ b/lib/imdb/parser/http/searchCharacterParser.py
@@ -22,7 +22,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from imdb.utils import analyze_name, build_name
diff --git a/lib/imdb/parser/http/searchCompanyParser.py b/lib/imdb/parser/http/searchCompanyParser.py
index ffd5bb6bba00526f7b2b730fc4c1f40e8751d5bf..40ea8a7226ef21f62d6316c7b1d4791b5a8f9903 100644
--- a/lib/imdb/parser/http/searchCompanyParser.py
+++ b/lib/imdb/parser/http/searchCompanyParser.py
@@ -22,7 +22,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from imdb.utils import analyze_company_name, build_company_name
diff --git a/lib/imdb/parser/http/searchKeywordParser.py b/lib/imdb/parser/http/searchKeywordParser.py
index c0343365a8ac176c6c5fe1b70aa1a8ead968d2ed..4161fa484f34b0f4ef0e007be993272595209558 100644
--- a/lib/imdb/parser/http/searchKeywordParser.py
+++ b/lib/imdb/parser/http/searchKeywordParser.py
@@ -21,7 +21,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from utils import Extractor, Attribute, analyze_imdbid
@@ -47,7 +47,7 @@ class DOMHTMLSearchKeywordParser(DOMHTMLSearchMovieParser):
     the one given."""
 
     _BaseParser = DOMBasicKeywordParser
-    _notDirectHitTitle = '<title>imdb keyword'
+    _notDirectHitTitle = '<title>find - imdb'
     _titleBuilder = lambda self, x: x
     _linkPrefix = '/keyword/'
 
@@ -56,7 +56,7 @@ class DOMHTMLSearchKeywordParser(DOMHTMLSearchMovieParser):
                         path="./a[1]/text()"
                             )]
     extractors = [Extractor(label='search',
-                            path="//td[3]/a[starts-with(@href, " \
+                            path="//a[starts-with(@href, " \
                                     "'/keyword/')]/..",
                             attrs=_attrs)]
 
@@ -80,7 +80,7 @@ class DOMHTMLSearchMovieKeywordParser(DOMHTMLSearchMovieParser):
     "new search system" is used, searching for movies with the given
     keyword."""
 
-    _notDirectHitTitle = '<title>best'
+    _notDirectHitTitle = '<title>most'
 
     _attrs = [Attribute(key='data',
                         multi=True,
@@ -98,7 +98,7 @@ class DOMHTMLSearchMovieKeywordParser(DOMHTMLSearchMovieParser):
                         ))]
 
     extractors = [Extractor(label='search',
-                            path="//td[3]/a[starts-with(@href, " \
+                            path="//div[@class='lister-list']//h3//a[starts-with(@href, " \
                                     "'/title/tt')]/..",
                             attrs=_attrs)]
 
diff --git a/lib/imdb/parser/http/searchMovieParser.py b/lib/imdb/parser/http/searchMovieParser.py
index 5d53318094acd271c0cd68913595ba85546da741..781610cf7e49135ddecb3d2ec64507f6767c9a0b 100644
--- a/lib/imdb/parser/http/searchMovieParser.py
+++ b/lib/imdb/parser/http/searchMovieParser.py
@@ -23,7 +23,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
@@ -118,6 +118,7 @@ class DOMHTMLSearchMovieParser(DOMParserBase):
         self.url = u''
 
     def preprocess_string(self, html_string):
+
         if self._notDirectHitTitle in html_string[:10240].lower():
             if self._linkPrefix == '/title/tt':
                 # Only for movies.
diff --git a/lib/imdb/parser/http/searchPersonParser.py b/lib/imdb/parser/http/searchPersonParser.py
index 1e5f7f375a817c19f2ea9fadb76377600339a555..2dd2694105dc5c81fcb9ec47b3b6ee57841c53bd 100644
--- a/lib/imdb/parser/http/searchPersonParser.py
+++ b/lib/imdb/parser/http/searchPersonParser.py
@@ -22,7 +22,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
diff --git a/lib/imdb/parser/http/topBottomParser.py b/lib/imdb/parser/http/topBottomParser.py
index 75d06f57c5be657d4e386201a9660e5ebcf6bf1f..1b8bb9f0befd6119f8d5045f34f2f44c573b9466 100644
--- a/lib/imdb/parser/http/topBottomParser.py
+++ b/lib/imdb/parser/http/topBottomParser.py
@@ -7,7 +7,7 @@ E.g.:
     http://akas.imdb.com/chart/top
     http://akas.imdb.com/chart/bottom
 
-Copyright 2009 Davide Alberani <da@erlug.linux.it>
+Copyright 2009-2015 Davide Alberani <da@erlug.linux.it>
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
@@ -21,7 +21,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from imdb.utils import analyze_title
@@ -43,14 +43,15 @@ class DOMHTMLTop250Parser(DOMParserBase):
 
     def _init(self):
         self.extractors = [Extractor(label=self.label,
-                        path="//div[@id='main']//table//tr",
+                        path="//div[@id='main']//div[1]//div//table//tbody//tr",
                         attrs=Attribute(key=None,
                                 multi=True,
-                                path={self.ranktext: "./td[1]//text()",
-                                        'rating': "./td[2]//text()",
-                                        'title': "./td[3]//text()",
-                                        'movieID': "./td[3]//a/@href",
-                                        'votes': "./td[4]//text()"
+                                path={self.ranktext: "./td[2]//text()",
+                                        'rating': "./td[3]//strong//text()",
+                                        'title': "./td[2]//a//text()",
+                                        'year': "./td[2]//span//text()",
+                                        'movieID': "./td[2]//a/@href",
+                                        'votes': "./td[3]//strong/@title"
                                         }))]
 
     def postprocess_data(self, data):
@@ -72,12 +73,16 @@ class DOMHTMLTop250Parser(DOMParserBase):
             if theID in seenIDs:
                 continue
             seenIDs.append(theID)
-            minfo = analyze_title(d['title'])
+            minfo = analyze_title(d['title']+" "+d['year'])
             try: minfo[self.ranktext] = int(d[self.ranktext].replace('.', ''))
             except: pass
             if 'votes' in d:
-                try: minfo['votes'] = int(d['votes'].replace(',', ''))
-                except: pass
+                try:
+                    votes = d['votes'].replace(' votes','')
+                    votes = votes.split(' based on ')[1]
+                    minfo['votes'] = int(votes.replace(',', ''))
+                except:
+                    pass
             if 'rating' in d:
                 try: minfo['rating'] = float(d['rating'])
                 except: pass
diff --git a/lib/imdb/parser/http/utils.py b/lib/imdb/parser/http/utils.py
index 9b059ebfd79d2b44204aff39fb2506b5ca74b4bc..5aefb3ce3cdbd6cbf07396b5847876b8ac591d58 100644
--- a/lib/imdb/parser/http/utils.py
+++ b/lib/imdb/parser/http/utils.py
@@ -19,7 +19,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
@@ -404,6 +404,15 @@ def build_movie(txt, movieID=None, roleID=None, status=None,
     m = Movie(title=title, movieID=movieID, notes=notes, currentRole=role,
                 roleID=roleID, roleIsPerson=_parsingCharacter,
                 modFunct=modFunct, accessSystem=accessSystem)
+    if additionalNotes:
+        if '(TV Series)' in additionalNotes:
+            m['kind'] = u'tv series'
+        elif '(Video Game)' in additionalNotes:
+            m['kind'] = u'video game'
+        elif '(TV Movie)' in additionalNotes:
+            m['kind'] = u'tv movie'
+        elif '(TV Short)' in additionalNotes:
+            m['kind'] = u'tv short'
     if roleNotes and len(roleNotes) == len(roleID):
         for idx, role in enumerate(m.currentRole):
             try:
diff --git a/lib/imdb/parser/mobile/__init__.py b/lib/imdb/parser/mobile/__init__.py
index 371c809cce9e28ad6b9d59ea85792b4385185043..e7ab589d36f7b0d6db0293dba73fe5a8f6b00263 100644
--- a/lib/imdb/parser/mobile/__init__.py
+++ b/lib/imdb/parser/mobile/__init__.py
@@ -20,7 +20,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 import re
diff --git a/lib/imdb/parser/sql/__init__.py b/lib/imdb/parser/sql/__init__.py
deleted file mode 100644
index bc0319d82de1e4dec261da07cba657b24bea190f..0000000000000000000000000000000000000000
--- a/lib/imdb/parser/sql/__init__.py
+++ /dev/null
@@ -1,1595 +0,0 @@
-"""
-parser.sql package (imdb package).
-
-This package provides the IMDbSqlAccessSystem class used to access
-IMDb's data through a SQL database.  Every database supported by
-the SQLObject _AND_ SQLAlchemy Object Relational Managers is available.
-the imdb.IMDb function will return an instance of this class when
-called with the 'accessSystem' argument set to "sql", "database" or "db".
-
-Copyright 2005-2012 Davide Alberani <da@erlug.linux.it>
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
-"""
-
-# FIXME: this whole module was written in a veeery short amount of time.
-#        The code should be commented, rewritten and cleaned. :-)
-
-import re
-import logging
-from difflib import SequenceMatcher
-from codecs import lookup
-
-from imdb import IMDbBase
-from imdb.utils import normalizeName, normalizeTitle, build_title, \
-                        build_name, analyze_name, analyze_title, \
-                        canonicalTitle, canonicalName, re_titleRef, \
-                        build_company_name, re_episodes, _unicodeArticles, \
-                        analyze_company_name, re_year_index, re_nameRef
-from imdb.Person import Person
-from imdb.Movie import Movie
-from imdb.Company import Company
-from imdb._exceptions import IMDbDataAccessError, IMDbError
-
-
-# Logger for miscellaneous functions.
-_aux_logger = logging.getLogger('imdbpy.parser.sql.aux')
-
-# =============================
-# Things that once upon a time were in imdb.parser.common.locsql.
-
-def titleVariations(title, fromPtdf=0):
-    """Build title variations useful for searches; if fromPtdf is true,
-    the input is assumed to be in the plain text data files format."""
-    if fromPtdf: title1 = u''
-    else: title1 = title
-    title2 = title3 = u''
-    if fromPtdf or re_year_index.search(title):
-        # If it appears to have a (year[/imdbIndex]) indication,
-        # assume that a long imdb canonical name was provided.
-        titldict = analyze_title(title, canonical=1)
-        # title1: the canonical name.
-        title1 = titldict['title']
-        if titldict['kind'] != 'episode':
-            # title3: the long imdb canonical name.
-            if fromPtdf: title3 = title
-            else: title3 = build_title(titldict, canonical=1, ptdf=1)
-        else:
-            title1 = normalizeTitle(title1)
-            title3 = build_title(titldict, canonical=1, ptdf=1)
-    else:
-        # Just a title.
-        # title1: the canonical title.
-        title1 = canonicalTitle(title)
-        title3 = u''
-    # title2 is title1 without the article, or title1 unchanged.
-    if title1:
-        title2 = title1
-        t2s = title2.split(u', ')
-        if t2s[-1].lower() in _unicodeArticles:
-            title2 = u', '.join(t2s[:-1])
-    _aux_logger.debug('title variations: 1:[%s] 2:[%s] 3:[%s]',
-                        title1, title2, title3)
-    return title1, title2, title3
-
-
-re_nameIndex = re.compile(r'\(([IVXLCDM]+)\)')
-
-def nameVariations(name, fromPtdf=0):
-    """Build name variations useful for searches; if fromPtdf is true,
-    the input is assumed to be in the plain text data files format."""
-    name1 = name2 = name3 = u''
-    if fromPtdf or re_nameIndex.search(name):
-        # We've a name with an (imdbIndex)
-        namedict = analyze_name(name, canonical=1)
-        # name1 is the name in the canonical format.
-        name1 = namedict['name']
-        # name3 is the canonical name with the imdbIndex.
-        if fromPtdf:
-            if namedict.has_key('imdbIndex'):
-                name3 = name
-        else:
-            name3 = build_name(namedict, canonical=1)
-    else:
-        # name1 is the name in the canonical format.
-        name1 = canonicalName(name)
-        name3 = u''
-    # name2 is the name in the normal format, if it differs from name1.
-    name2 = normalizeName(name1)
-    if name1 == name2: name2 = u''
-    _aux_logger.debug('name variations: 1:[%s] 2:[%s] 3:[%s]',
-                        name1, name2, name3)
-    return name1, name2, name3
-
-
-try:
-    from cutils import ratcliff as _ratcliff
-    def ratcliff(s1, s2, sm):
-        """Return the Ratcliff-Obershelp value between the two strings,
-        using the C implementation."""
-        return _ratcliff(s1.encode('latin_1', 'replace'),
-                        s2.encode('latin_1', 'replace'))
-except ImportError:
-    _aux_logger.warn('Unable to import the cutils.ratcliff function.'
-                    '  Searching names and titles using the "sql"'
-                    ' data access system will be slower.')
-
-    def ratcliff(s1, s2, sm):
-        """Ratcliff-Obershelp similarity."""
-        STRING_MAXLENDIFFER = 0.7
-        s1len = len(s1)
-        s2len = len(s2)
-        if s1len < s2len:
-            threshold = float(s1len) / s2len
-        else:
-            threshold = float(s2len) / s1len
-        if threshold < STRING_MAXLENDIFFER:
-            return 0.0
-        sm.set_seq2(s2.lower())
-        return sm.ratio()
-
-
-def merge_roles(mop):
-    """Merge multiple roles."""
-    new_list = []
-    for m in mop:
-        if m in new_list:
-            keep_this = new_list[new_list.index(m)]
-            if not isinstance(keep_this.currentRole, list):
-                keep_this.currentRole = [keep_this.currentRole]
-            keep_this.currentRole.append(m.currentRole)
-        else:
-            new_list.append(m)
-    return new_list
-
-
-def scan_names(name_list, name1, name2, name3, results=0, ro_thresold=None,
-                _scan_character=False):
-    """Scan a list of names, searching for best matches against
-    the given variations."""
-    if ro_thresold is not None: RO_THRESHOLD = ro_thresold
-    else: RO_THRESHOLD = 0.6
-    sm1 = SequenceMatcher()
-    sm2 = SequenceMatcher()
-    sm3 = SequenceMatcher()
-    sm1.set_seq1(name1.lower())
-    if name2: sm2.set_seq1(name2.lower())
-    if name3: sm3.set_seq1(name3.lower())
-    resd = {}
-    for i, n_data in name_list:
-        nil = n_data['name']
-        # XXX: on Symbian, here we get a str; not sure this is the
-        #      right place to fix it.
-        if isinstance(nil, str):
-            nil = unicode(nil, 'latin1', 'ignore')
-        # Distance with the canonical name.
-        ratios = [ratcliff(name1, nil, sm1) + 0.05]
-        namesurname = u''
-        if not _scan_character:
-            nils = nil.split(', ', 1)
-            surname = nils[0]
-            if len(nils) == 2: namesurname = '%s %s' % (nils[1], surname)
-        else:
-            nils = nil.split(' ', 1)
-            surname = nils[-1]
-            namesurname = nil
-        if surname != nil:
-            # Distance with the "Surname" in the database.
-            ratios.append(ratcliff(name1, surname, sm1))
-            if not _scan_character:
-                ratios.append(ratcliff(name1, namesurname, sm1))
-            if name2:
-                ratios.append(ratcliff(name2, surname, sm2))
-                # Distance with the "Name Surname" in the database.
-                if namesurname:
-                    ratios.append(ratcliff(name2, namesurname, sm2))
-        if name3:
-            # Distance with the long imdb canonical name.
-            ratios.append(ratcliff(name3,
-                        build_name(n_data, canonical=1), sm3) + 0.1)
-        ratio = max(ratios)
-        if ratio >= RO_THRESHOLD:
-            if resd.has_key(i):
-                if ratio > resd[i][0]: resd[i] = (ratio, (i, n_data))
-            else: resd[i] = (ratio, (i, n_data))
-    res = resd.values()
-    res.sort()
-    res.reverse()
-    if results > 0: res[:] = res[:results]
-    return res
-
-
-def scan_titles(titles_list, title1, title2, title3, results=0,
-                searchingEpisode=0, onlyEpisodes=0, ro_thresold=None):
-    """Scan a list of titles, searching for best matches against
-    the given variations."""
-    if ro_thresold is not None: RO_THRESHOLD = ro_thresold
-    else: RO_THRESHOLD = 0.6
-    sm1 = SequenceMatcher()
-    sm2 = SequenceMatcher()
-    sm3 = SequenceMatcher()
-    sm1.set_seq1(title1.lower())
-    sm2.set_seq2(title2.lower())
-    if title3:
-        sm3.set_seq1(title3.lower())
-        if title3[-1] == '}': searchingEpisode = 1
-    hasArt = 0
-    if title2 != title1: hasArt = 1
-    resd = {}
-    for i, t_data in titles_list:
-        if onlyEpisodes:
-            if t_data.get('kind') != 'episode':
-                continue
-            til = t_data['title']
-            if til[-1] == ')':
-                dateIdx = til.rfind('(')
-                if dateIdx != -1:
-                    til = til[:dateIdx].rstrip()
-            if not til:
-                continue
-            ratio = ratcliff(title1, til, sm1)
-            if ratio >= RO_THRESHOLD:
-                resd[i] = (ratio, (i, t_data))
-            continue
-        if searchingEpisode:
-            if t_data.get('kind') != 'episode': continue
-        elif t_data.get('kind') == 'episode': continue
-        til = t_data['title']
-        # XXX: on Symbian, here we get a str; not sure this is the
-        #      right place to fix it.
-        if isinstance(til, str):
-            til = unicode(til, 'latin1', 'ignore')
-        # Distance with the canonical title (with or without article).
-        #   titleS      -> titleR
-        #   titleS, the -> titleR, the
-        if not searchingEpisode:
-            til = canonicalTitle(til)
-            ratios = [ratcliff(title1, til, sm1) + 0.05]
-            # til2 is til without the article, if present.
-            til2 = til
-            tils = til2.split(', ')
-            matchHasArt = 0
-            if tils[-1].lower() in _unicodeArticles:
-                til2 = ', '.join(tils[:-1])
-                matchHasArt = 1
-            if hasArt and not matchHasArt:
-                #   titleS[, the]  -> titleR
-                ratios.append(ratcliff(title2, til, sm2))
-            elif matchHasArt and not hasArt:
-                #   titleS  -> titleR[, the]
-                ratios.append(ratcliff(title1, til2, sm1))
-        else:
-            ratios = [0.0]
-        if title3:
-            # Distance with the long imdb canonical title.
-            ratios.append(ratcliff(title3,
-                        build_title(t_data, canonical=1, ptdf=1), sm3) + 0.1)
-        ratio = max(ratios)
-        if ratio >= RO_THRESHOLD:
-            if resd.has_key(i):
-                if ratio > resd[i][0]:
-                    resd[i] = (ratio, (i, t_data))
-            else: resd[i] = (ratio, (i, t_data))
-    res = resd.values()
-    res.sort()
-    res.reverse()
-    if results > 0: res[:] = res[:results]
-    return res
-
-
-def scan_company_names(name_list, name1, results=0, ro_thresold=None):
-    """Scan a list of company names, searching for best matches against
-    the given name.  Notice that this function takes a list of
-    strings, and not a list of dictionaries."""
-    if ro_thresold is not None: RO_THRESHOLD = ro_thresold
-    else: RO_THRESHOLD = 0.6
-    sm1 = SequenceMatcher()
-    sm1.set_seq1(name1.lower())
-    resd = {}
-    withoutCountry = not name1.endswith(']')
-    for i, n in name_list:
-        # XXX: on Symbian, here we get a str; not sure this is the
-        #      right place to fix it.
-        if isinstance(n, str):
-            n = unicode(n, 'latin1', 'ignore')
-        o_name = n
-        var = 0.0
-        if withoutCountry and n.endswith(']'):
-            cidx = n.rfind('[')
-            if cidx != -1:
-                n = n[:cidx].rstrip()
-                var = -0.05
-        # Distance with the company name.
-        ratio = ratcliff(name1, n, sm1) + var
-        if ratio >= RO_THRESHOLD:
-            if resd.has_key(i):
-                if ratio > resd[i][0]: resd[i] = (ratio,
-                                            (i, analyze_company_name(o_name)))
-            else:
-                resd[i] = (ratio, (i, analyze_company_name(o_name)))
-    res = resd.values()
-    res.sort()
-    res.reverse()
-    if results > 0: res[:] = res[:results]
-    return res
-
-
-try:
-    from cutils import soundex
-except ImportError:
-    _aux_logger.warn('Unable to import the cutils.soundex function.'
-                    '  Searches of movie titles and person names will be'
-                    ' a bit slower.')
-
-    _translate = dict(B='1', C='2', D='3', F='1', G='2', J='2', K='2', L='4',
-                      M='5', N='5', P='1', Q='2', R='6', S='2', T='3', V='1',
-                      X='2', Z='2')
-    _translateget = _translate.get
-    _re_non_ascii = re.compile(r'^[^a-z]*', re.I)
-    SOUNDEX_LEN = 5
-
-    def soundex(s):
-        """Return the soundex code for the given string."""
-        # Maximum length of the soundex code.
-        s = _re_non_ascii.sub('', s)
-        if not s: return None
-        s = s.upper()
-        soundCode =  s[0]
-        for c in s[1:]:
-            cw = _translateget(c, '0')
-            if cw != '0' and soundCode[-1] != cw:
-                soundCode += cw
-        return soundCode[:SOUNDEX_LEN] or None
-
-
-def _sortKeywords(keyword, kwds):
-    """Sort a list of keywords, based on the searched one."""
-    sm = SequenceMatcher()
-    sm.set_seq1(keyword.lower())
-    ratios = [(ratcliff(keyword, k, sm), k) for k in kwds]
-    checkContained = False
-    if len(keyword) > 4:
-        checkContained = True
-    for idx, data in enumerate(ratios):
-        ratio, key = data
-        if key.startswith(keyword):
-            ratios[idx] = (ratio+0.5, key)
-        elif checkContained and keyword in key:
-            ratios[idx] = (ratio+0.3, key)
-    ratios.sort()
-    ratios.reverse()
-    return [r[1] for r in ratios]
-
-
-def filterSimilarKeywords(keyword, kwdsIterator):
-    """Return a sorted list of keywords similar to the one given."""
-    seenDict = {}
-    kwdSndx = soundex(keyword.encode('ascii', 'ignore'))
-    matches = []
-    matchesappend = matches.append
-    checkContained = False
-    if len(keyword) > 4:
-        checkContained = True
-    for movieID, key in kwdsIterator:
-        if key in seenDict:
-            continue
-        seenDict[key] = None
-        if checkContained and keyword in key:
-            matchesappend(key)
-            continue
-        if kwdSndx == soundex(key.encode('ascii', 'ignore')):
-            matchesappend(key)
-    return _sortKeywords(keyword, matches)
-
-
-
-# =============================
-
-_litlist = ['screenplay/teleplay', 'novel', 'adaption', 'book',
-            'production process protocol', 'interviews',
-            'printed media reviews', 'essays', 'other literature']
-_litd = dict([(x, ('literature', x)) for x in _litlist])
-
-_buslist = ['budget', 'weekend gross', 'gross', 'opening weekend', 'rentals',
-            'admissions', 'filming dates', 'production dates', 'studios',
-            'copyright holder']
-_busd = dict([(x, ('business', x)) for x in _buslist])
-
-
-def _reGroupDict(d, newgr):
-    """Regroup keys in the d dictionary in subdictionaries, based on
-    the scheme in the newgr dictionary.
-    E.g.: in the newgr, an entry 'LD label': ('laserdisc', 'label')
-    tells the _reGroupDict() function to take the entry with
-    label 'LD label' (as received from the sql database)
-    and put it in the subsection (another dictionary) named
-    'laserdisc', using the key 'label'."""
-    r = {}
-    newgrks = newgr.keys()
-    for k, v in d.items():
-        if k in newgrks:
-            r.setdefault(newgr[k][0], {})[newgr[k][1]] = v
-            # A not-so-clearer version:
-            ##r.setdefault(newgr[k][0], {})
-            ##r[newgr[k][0]][newgr[k][1]] = v
-        else: r[k] = v
-    return r
-
-
-def _groupListBy(l, index):
-    """Regroup items in a list in a list of lists, grouped by
-    the value at the given index."""
-    tmpd = {}
-    for item in l:
-        tmpd.setdefault(item[index], []).append(item)
-    res = tmpd.values()
-    return res
-
-
-def sub_dict(d, keys):
-    """Return the subdictionary of 'd', with just the keys listed in 'keys'."""
-    return dict([(k, d[k]) for k in keys if k in d])
-
-
-def get_movie_data(movieID, kindDict, fromAka=0, _table=None):
-    """Return a dictionary containing data about the given movieID;
-    if fromAka is true, the AkaTitle table is searched; _table is
-    reserved for the imdbpy2sql.py script."""
-    if _table is not None:
-        Table = _table
-    else:
-        if not fromAka: Table = Title
-        else: Table = AkaTitle
-    try:
-        m = Table.get(movieID)
-    except Exception, e:
-        _aux_logger.warn('Unable to fetch information for movieID %s: %s', movieID, e)
-        mdict = {}
-        return mdict
-    mdict = {'title': m.title, 'kind': kindDict[m.kindID],
-            'year': m.productionYear, 'imdbIndex': m.imdbIndex,
-            'season': m.seasonNr, 'episode': m.episodeNr}
-    if not fromAka:
-        if m.seriesYears is not None:
-            mdict['series years'] = unicode(m.seriesYears)
-    if mdict['imdbIndex'] is None: del mdict['imdbIndex']
-    if mdict['year'] is None: del mdict['year']
-    else:
-        try:
-            mdict['year'] = int(mdict['year'])
-        except (TypeError, ValueError):
-            del mdict['year']
-    if mdict['season'] is None: del mdict['season']
-    else:
-        try: mdict['season'] = int(mdict['season'])
-        except: pass
-    if mdict['episode'] is None: del mdict['episode']
-    else:
-        try: mdict['episode'] = int(mdict['episode'])
-        except: pass
-    episodeOfID = m.episodeOfID
-    if episodeOfID is not None:
-        ser_dict = get_movie_data(episodeOfID, kindDict, fromAka)
-        mdict['episode of'] = Movie(data=ser_dict, movieID=episodeOfID,
-                                    accessSystem='sql')
-        if fromAka:
-            ser_note = AkaTitle.get(episodeOfID).note
-            if ser_note:
-                mdict['episode of'].notes = ser_note
-    return mdict
-
-
-def _iterKeywords(results):
-    """Iterate over (key.id, key.keyword) columns of a selection of
-    the Keyword table."""
-    for key in results:
-        yield key.id, key.keyword
-
-
-def getSingleInfo(table, movieID, infoType, notAList=False):
-    """Return a dictionary in the form {infoType: infoListOrString},
-    retrieving a single set of information about a given movie, from
-    the specified table."""
-    infoTypeID = InfoType.select(InfoType.q.info == infoType)
-    if infoTypeID.count() == 0:
-        return {}
-    res = table.select(AND(table.q.movieID == movieID,
-                        table.q.infoTypeID == infoTypeID[0].id))
-    retList = []
-    for r in res:
-        info = r.info
-        note = r.note
-        if note:
-            info += u'::%s' % note
-        retList.append(info)
-    if not retList:
-        return {}
-    if not notAList: return {infoType: retList}
-    else: return {infoType: retList[0]}
-
-
-def _cmpTop(a, b, what='top 250 rank'):
-    """Compare function used to sort top 250/bottom 10 rank."""
-    av = int(a[1].get(what))
-    bv = int(b[1].get(what))
-    if av == bv:
-        return 0
-    return (-1, 1)[av > bv]
-
-def _cmpBottom(a, b):
-    """Compare function used to sort top 250/bottom 10 rank."""
-    return _cmpTop(a, b, what='bottom 10 rank')
-
-
-class IMDbSqlAccessSystem(IMDbBase):
-    """The class used to access IMDb's data through a SQL database."""
-
-    accessSystem = 'sql'
-    _sql_logger = logging.getLogger('imdbpy.parser.sql')
-
-    def __init__(self, uri, adultSearch=1, useORM=None, *arguments, **keywords):
-        """Initialize the access system."""
-        IMDbBase.__init__(self, *arguments, **keywords)
-        if useORM is None:
-            useORM = ('sqlobject', 'sqlalchemy')
-        if not isinstance(useORM, (tuple, list)):
-            if ',' in useORM:
-                useORM = useORM.split(',')
-            else:
-                useORM = [useORM]
-        self.useORM = useORM
-        nrMods = len(useORM)
-        _gotError = False
-        DB_TABLES = []
-        for idx, mod in enumerate(useORM):
-            mod = mod.strip().lower()
-            try:
-                if mod == 'sqlalchemy':
-                    from alchemyadapter import getDBTables, NotFoundError, \
-                                                setConnection, AND, OR, IN, \
-                                                ISNULL, CONTAINSSTRING, toUTF8
-                elif mod == 'sqlobject':
-                    from objectadapter import getDBTables, NotFoundError, \
-                                                setConnection, AND, OR, IN, \
-                                                ISNULL, CONTAINSSTRING, toUTF8
-                else:
-                    self._sql_logger.warn('unknown module "%s"' % mod)
-                    continue
-                self._sql_logger.info('using %s ORM', mod)
-                # XXX: look ma'... black magic!  It's used to make
-                #      TableClasses and some functions accessible
-                #      through the whole module.
-                for k, v in [('NotFoundError', NotFoundError),
-                            ('AND', AND), ('OR', OR), ('IN', IN),
-                            ('ISNULL', ISNULL),
-                            ('CONTAINSSTRING', CONTAINSSTRING)]:
-                    globals()[k] = v
-                self.toUTF8 = toUTF8
-                DB_TABLES = getDBTables(uri)
-                for t in DB_TABLES:
-                    globals()[t._imdbpyName] = t
-                if _gotError:
-                    self._sql_logger.warn('falling back to "%s"' % mod)
-                break
-            except ImportError, e:
-                if idx+1 >= nrMods:
-                    raise IMDbError('unable to use any ORM in %s: %s' % (
-                                                    str(useORM), str(e)))
-                else:
-                    self._sql_logger.warn('unable to use "%s": %s' % (mod,
-                                                                    str(e)))
-                    _gotError = True
-                continue
-        else:
-            raise IMDbError('unable to use any ORM in %s' % str(useORM))
-        # Set the connection to the database.
-        self._sql_logger.debug('connecting to %s', uri)
-        try:
-            self._connection = setConnection(uri, DB_TABLES)
-        except AssertionError, e:
-            raise IMDbDataAccessError( \
-                    'unable to connect to the database server; ' + \
-                    'complete message: "%s"' % str(e))
-        self.Error = self._connection.module.Error
-        # Maps some IDs to the corresponding strings.
-        self._kind = {}
-        self._kindRev = {}
-        self._sql_logger.debug('reading constants from the database')
-        try:
-            for kt in KindType.select():
-                self._kind[kt.id] = kt.kind
-                self._kindRev[str(kt.kind)] = kt.id
-        except self.Error:
-            # NOTE: you can also get the error, but - at least with
-            #       MySQL - it also contains the password, and I don't
-            #       like the idea to print it out.
-            raise IMDbDataAccessError( \
-                    'unable to connect to the database server')
-        self._role = {}
-        for rl in RoleType.select():
-            self._role[rl.id] = str(rl.role)
-        self._info = {}
-        self._infoRev = {}
-        for inf in InfoType.select():
-            self._info[inf.id] = str(inf.info)
-            self._infoRev[str(inf.info)] = inf.id
-        self._compType = {}
-        for cType in CompanyType.select():
-            self._compType[cType.id] = cType.kind
-        info = [(it.id, it.info) for it in InfoType.select()]
-        self._compcast = {}
-        for cc in CompCastType.select():
-            self._compcast[cc.id] = str(cc.kind)
-        self._link = {}
-        for lt in LinkType.select():
-            self._link[lt.id] = str(lt.link)
-        self._moviesubs = {}
-        # Build self._moviesubs, a dictionary used to rearrange
-        # the data structure for a movie object.
-        for vid, vinfo in info:
-            if not vinfo.startswith('LD '): continue
-            self._moviesubs[vinfo] = ('laserdisc', vinfo[3:])
-        self._moviesubs.update(_litd)
-        self._moviesubs.update(_busd)
-        self.do_adult_search(adultSearch)
-
-    def _findRefs(self, o, trefs, nrefs):
-        """Find titles or names references in strings."""
-        if isinstance(o, (unicode, str)):
-            for title in re_titleRef.findall(o):
-                a_title = analyze_title(title, canonical=0)
-                rtitle = build_title(a_title, ptdf=1)
-                if trefs.has_key(rtitle): continue
-                movieID = self._getTitleID(rtitle)
-                if movieID is None:
-                    movieID = self._getTitleID(title)
-                if movieID is None:
-                    continue
-                m = Movie(title=rtitle, movieID=movieID,
-                            accessSystem=self.accessSystem)
-                trefs[rtitle] = m
-                rtitle2 = canonicalTitle(a_title.get('title', u''))
-                if rtitle2 and rtitle2 != rtitle and rtitle2 != title:
-                    trefs[rtitle2] = m
-                if title != rtitle:
-                    trefs[title] = m
-            for name in re_nameRef.findall(o):
-                a_name = analyze_name(name, canonical=1)
-                rname = build_name(a_name, canonical=1)
-                if nrefs.has_key(rname): continue
-                personID = self._getNameID(rname)
-                if personID is None:
-                    personID = self._getNameID(name)
-                if personID is None: continue
-                p = Person(name=rname, personID=personID,
-                            accessSystem=self.accessSystem)
-                nrefs[rname] = p
-                rname2 = normalizeName(a_name.get('name', u''))
-                if rname2 and rname2 != rname:
-                    nrefs[rname2] = p
-                if name != rname and name != rname2:
-                    nrefs[name] = p
-        elif isinstance(o, (list, tuple)):
-            for item in o:
-                self._findRefs(item, trefs, nrefs)
-        elif isinstance(o, dict):
-            for value in o.values():
-                self._findRefs(value, trefs, nrefs)
-        return (trefs, nrefs)
-
-    def _extractRefs(self, o):
-        """Scan for titles or names references in strings."""
-        trefs = {}
-        nrefs = {}
-        try:
-            return self._findRefs(o, trefs, nrefs)
-        except RuntimeError, e:
-            # Symbian/python 2.2 has a poor regexp implementation.
-            import warnings
-            warnings.warn('RuntimeError in '
-                    "imdb.parser.sql.IMDbSqlAccessSystem; "
-                    "if it's not a recursion limit exceeded and we're not "
-                    "running in a Symbian environment, it's a bug:\n%s" % e)
-            return (trefs, nrefs)
-
-    def _changeAKAencoding(self, akanotes, akatitle):
-        """Return akatitle in the correct charset, as specified in
-        the akanotes field; if akatitle doesn't need to be modified,
-        return None."""
-        oti = akanotes.find('(original ')
-        if oti == -1: return None
-        ote = akanotes[oti+10:].find(' title)')
-        if ote != -1:
-            cs_info = akanotes[oti+10:oti+10+ote].lower().split()
-            for e in cs_info:
-                # excludes some strings that clearly are not encoding.
-                if e in ('script', '', 'cyrillic', 'greek'): continue
-                if e.startswith('iso-') and e.find('latin') != -1:
-                    e = e[4:].replace('-', '')
-                try:
-                    lookup(e)
-                    lat1 = akatitle.encode('latin_1', 'replace')
-                    return unicode(lat1, e, 'replace')
-                except (LookupError, ValueError, TypeError):
-                    continue
-        return None
-
-    def _buildNULLCondition(self, col, val):
-        """Build a comparison for columns where values can be NULL."""
-        if val is None:
-            return ISNULL(col)
-        else:
-            if isinstance(val, (int, long)):
-                return col == val
-            else:
-                return col == self.toUTF8(val)
-
-    def _getTitleID(self, title):
-        """Given a long imdb canonical title, returns a movieID or
-        None if not found."""
-        td = analyze_title(title)
-        condition = None
-        if td['kind'] == 'episode':
-            epof = td['episode of']
-            seriesID = [s.id for s in Title.select(
-                        AND(Title.q.title == self.toUTF8(epof['title']),
-                            self._buildNULLCondition(Title.q.imdbIndex,
-                                                    epof.get('imdbIndex')),
-                           Title.q.kindID == self._kindRev[epof['kind']],
-                           self._buildNULLCondition(Title.q.productionYear,
-                                                    epof.get('year'))))]
-            if seriesID:
-                condition = AND(IN(Title.q.episodeOfID, seriesID),
-                                Title.q.title == self.toUTF8(td['title']),
-                                self._buildNULLCondition(Title.q.imdbIndex,
-                                                        td.get('imdbIndex')),
-                                Title.q.kindID == self._kindRev[td['kind']],
-                                self._buildNULLCondition(Title.q.productionYear,
-                                                        td.get('year')))
-        if condition is None:
-            condition = AND(Title.q.title == self.toUTF8(td['title']),
-                            self._buildNULLCondition(Title.q.imdbIndex,
-                                                    td.get('imdbIndex')),
-                            Title.q.kindID == self._kindRev[td['kind']],
-                            self._buildNULLCondition(Title.q.productionYear,
-                                                    td.get('year')))
-        res = Title.select(condition)
-        try:
-            if res.count() != 1:
-                return None
-        except (UnicodeDecodeError, TypeError):
-            return None
-        return res[0].id
-
-    def _getNameID(self, name):
-        """Given a long imdb canonical name, returns a personID or
-        None if not found."""
-        nd = analyze_name(name)
-        res = Name.select(AND(Name.q.name == self.toUTF8(nd['name']),
-                                self._buildNULLCondition(Name.q.imdbIndex,
-                                                        nd.get('imdbIndex'))))
-        try:
-            c = res.count()
-            if res.count() != 1:
-                return None
-        except (UnicodeDecodeError, TypeError):
-            return None
-        return res[0].id
-
-    def _normalize_movieID(self, movieID):
-        """Normalize the given movieID."""
-        try:
-            return int(movieID)
-        except (ValueError, OverflowError):
-            raise IMDbError('movieID "%s" can\'t be converted to integer' % \
-                            movieID)
-
-    def _normalize_personID(self, personID):
-        """Normalize the given personID."""
-        try:
-            return int(personID)
-        except (ValueError, OverflowError):
-            raise IMDbError('personID "%s" can\'t be converted to integer' % \
-                            personID)
-
-    def _normalize_characterID(self, characterID):
-        """Normalize the given characterID."""
-        try:
-            return int(characterID)
-        except (ValueError, OverflowError):
-            raise IMDbError('characterID "%s" can\'t be converted to integer' \
-                            % characterID)
-
-    def _normalize_companyID(self, companyID):
-        """Normalize the given companyID."""
-        try:
-            return int(companyID)
-        except (ValueError, OverflowError):
-            raise IMDbError('companyID "%s" can\'t be converted to integer' \
-                            % companyID)
-
-    def get_imdbMovieID(self, movieID):
-        """Translate a movieID in an imdbID.
-        If not in the database, try an Exact Primary Title search on IMDb;
-        return None if it's unable to get the imdbID.
-        """
-        try: movie = Title.get(movieID)
-        except NotFoundError: return None
-        imdbID = movie.imdbID
-        if imdbID is not None: return '%07d' % imdbID
-        m_dict = get_movie_data(movie.id, self._kind)
-        titline = build_title(m_dict, ptdf=0)
-        imdbID = self.title2imdbID(titline, m_dict['kind'])
-        # If the imdbID was retrieved from the web and was not in the
-        # database, update the database (ignoring errors, because it's
-        # possibile that the current user has not update privileges).
-        # There're times when I think I'm a genius; this one of
-        # those times... <g>
-        if imdbID is not None and not isinstance(imdbID, list):
-            try: movie.imdbID = int(imdbID)
-            except: pass
-        return imdbID
-
-    def get_imdbPersonID(self, personID):
-        """Translate a personID in an imdbID.
-        If not in the database, try an Exact Primary Name search on IMDb;
-        return None if it's unable to get the imdbID.
-        """
-        try: person = Name.get(personID)
-        except NotFoundError: return None
-        imdbID = person.imdbID
-        if imdbID is not None: return '%07d' % imdbID
-        n_dict = {'name': person.name, 'imdbIndex': person.imdbIndex}
-        namline = build_name(n_dict, canonical=False)
-        imdbID = self.name2imdbID(namline)
-        if imdbID is not None and not isinstance(imdbID, list):
-            try: person.imdbID = int(imdbID)
-            except: pass
-        return imdbID
-
-    def get_imdbCharacterID(self, characterID):
-        """Translate a characterID in an imdbID.
-        If not in the database, try an Exact Primary Name search on IMDb;
-        return None if it's unable to get the imdbID.
-        """
-        try: character = CharName.get(characterID)
-        except NotFoundError: return None
-        imdbID = character.imdbID
-        if imdbID is not None: return '%07d' % imdbID
-        n_dict = {'name': character.name, 'imdbIndex': character.imdbIndex}
-        namline = build_name(n_dict, canonical=False)
-        imdbID = self.character2imdbID(namline)
-        if imdbID is not None and not isinstance(imdbID, list):
-            try: character.imdbID = int(imdbID)
-            except: pass
-        return imdbID
-
-    def get_imdbCompanyID(self, companyID):
-        """Translate a companyID in an imdbID.
-        If not in the database, try an Exact Primary Name search on IMDb;
-        return None if it's unable to get the imdbID.
-        """
-        try: company = CompanyName.get(companyID)
-        except NotFoundError: return None
-        imdbID = company.imdbID
-        if imdbID is not None: return '%07d' % imdbID
-        n_dict = {'name': company.name, 'country': company.countryCode}
-        namline = build_company_name(n_dict)
-        imdbID = self.company2imdbID(namline)
-        if imdbID is not None and not isinstance(imdbID, list):
-            try: company.imdbID = int(imdbID)
-            except: pass
-        return imdbID
-
-    def do_adult_search(self, doAdult):
-        """If set to 0 or False, movies in the Adult category are not
-        episodeOf = title_dict.get('episode of')
-        shown in the results of a search."""
-        self.doAdult = doAdult
-
-    def _search_movie(self, title, results, _episodes=False):
-        title = title.strip()
-        if not title: return []
-        title_dict = analyze_title(title, canonical=1)
-        s_title = title_dict['title']
-        if not s_title: return []
-        episodeOf = title_dict.get('episode of')
-        if episodeOf:
-            _episodes = False
-        s_title_split = s_title.split(', ')
-        if len(s_title_split) > 1 and \
-                s_title_split[-1].lower() in _unicodeArticles:
-            s_title_rebuilt = ', '.join(s_title_split[:-1])
-            if s_title_rebuilt:
-                s_title = s_title_rebuilt
-        #if not episodeOf:
-        #    if not _episodes:
-        #        s_title_split = s_title.split(', ')
-        #        if len(s_title_split) > 1 and \
-        #                s_title_split[-1].lower() in _articles:
-        #            s_title_rebuilt = ', '.join(s_title_split[:-1])
-        #            if s_title_rebuilt:
-        #                s_title = s_title_rebuilt
-        #else:
-        #    _episodes = False
-        if isinstance(s_title, unicode):
-            s_title = s_title.encode('ascii', 'ignore')
-
-        soundexCode = soundex(s_title)
-
-        # XXX: improve the search restricting the kindID if the
-        #      "kind" of the input differs from "movie"?
-        condition = conditionAka = None
-        if _episodes:
-            condition = AND(Title.q.phoneticCode == soundexCode,
-                            Title.q.kindID == self._kindRev['episode'])
-            conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
-                            AkaTitle.q.kindID == self._kindRev['episode'])
-        elif title_dict['kind'] == 'episode' and episodeOf is not None:
-            # set canonical=0 ?  Should not make much difference.
-            series_title = build_title(episodeOf, canonical=1)
-            # XXX: is it safe to get "results" results?
-            #      Too many?  Too few?
-            serRes = results
-            if serRes < 3 or serRes > 10:
-                serRes = 10
-            searchSeries = self._search_movie(series_title, serRes)
-            seriesIDs = [result[0] for result in searchSeries]
-            if seriesIDs:
-                condition = AND(Title.q.phoneticCode == soundexCode,
-                                IN(Title.q.episodeOfID, seriesIDs),
-                                Title.q.kindID == self._kindRev['episode'])
-                conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
-                                IN(AkaTitle.q.episodeOfID, seriesIDs),
-                                AkaTitle.q.kindID == self._kindRev['episode'])
-            else:
-                # XXX: bad situation: we have found no matching series;
-                #      try searching everything (both episodes and
-                #      non-episodes) for the title.
-                condition = AND(Title.q.phoneticCode == soundexCode,
-                                IN(Title.q.episodeOfID, seriesIDs))
-                conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
-                                IN(AkaTitle.q.episodeOfID, seriesIDs))
-        if condition is None:
-            # XXX: excludes episodes?
-            condition = AND(Title.q.kindID != self._kindRev['episode'],
-                            Title.q.phoneticCode == soundexCode)
-            conditionAka = AND(AkaTitle.q.kindID != self._kindRev['episode'],
-                            AkaTitle.q.phoneticCode == soundexCode)
-
-        # Up to 3 variations of the title are searched, plus the
-        # long imdb canonical title, if provided.
-        if not _episodes:
-            title1, title2, title3 = titleVariations(title)
-        else:
-            title1 = title
-            title2 = ''
-            title3 = ''
-        try:
-            qr = [(q.id, get_movie_data(q.id, self._kind))
-                    for q in Title.select(condition)]
-            q2 = [(q.movieID, get_movie_data(q.id, self._kind, fromAka=1))
-                    for q in AkaTitle.select(conditionAka)]
-            qr += q2
-        except NotFoundError, e:
-            raise IMDbDataAccessError( \
-                    'unable to search the database: "%s"' % str(e))
-
-        resultsST = results * 3
-        res = scan_titles(qr, title1, title2, title3, resultsST,
-                            searchingEpisode=episodeOf is not None,
-                            onlyEpisodes=_episodes,
-                            ro_thresold=0.0)
-        res[:] = [x[1] for x in res]
-
-        if res and not self.doAdult:
-            mids = [x[0] for x in res]
-            genreID = self._infoRev['genres']
-            adultlist = [al.movieID for al
-                        in MovieInfo.select(
-                            AND(MovieInfo.q.infoTypeID == genreID,
-                                MovieInfo.q.info == 'Adult',
-                                IN(MovieInfo.q.movieID, mids)))]
-            res[:] = [x for x in res if x[0] not in adultlist]
-
-        new_res = []
-        # XXX: can there be duplicates?
-        for r in res:
-            if r not in q2:
-                new_res.append(r)
-                continue
-            mdict = r[1]
-            aka_title = build_title(mdict, ptdf=1)
-            orig_dict = get_movie_data(r[0], self._kind)
-            orig_title = build_title(orig_dict, ptdf=1)
-            if aka_title == orig_title:
-                new_res.append(r)
-                continue
-            orig_dict['akas'] = [aka_title]
-            new_res.append((r[0], orig_dict))
-        if results > 0: new_res[:] = new_res[:results]
-        return new_res
-
-    def _search_episode(self, title, results):
-        return self._search_movie(title, results, _episodes=True)
-
-    def get_movie_main(self, movieID):
-        # Every movie information is retrieved from here.
-        infosets = self.get_movie_infoset()
-        try:
-            res = get_movie_data(movieID, self._kind)
-        except NotFoundError, e:
-            raise IMDbDataAccessError( \
-                    'unable to get movieID "%s": "%s"' % (movieID, str(e)))
-        if not res:
-            raise IMDbDataAccessError('unable to get movieID "%s"' % movieID)
-        # Collect cast information.
-        castdata = [[cd.personID, cd.personRoleID, cd.note, cd.nrOrder,
-                    self._role[cd.roleID]]
-                    for cd in CastInfo.select(CastInfo.q.movieID == movieID)]
-        for p in castdata:
-            person = Name.get(p[0])
-            p += [person.name, person.imdbIndex]
-            if p[4] in ('actor', 'actress'):
-                p[4] = 'cast'
-        # Regroup by role/duty (cast, writer, director, ...)
-        castdata[:] =  _groupListBy(castdata, 4)
-        for group in castdata:
-            duty = group[0][4]
-            for pdata in group:
-                curRole = pdata[1]
-                curRoleID = None
-                if curRole is not None:
-                    robj = CharName.get(curRole)
-                    curRole = robj.name
-                    curRoleID = robj.id
-                p = Person(personID=pdata[0], name=pdata[5],
-                            currentRole=curRole or u'',
-                            roleID=curRoleID,
-                            notes=pdata[2] or u'',
-                            accessSystem='sql')
-                if pdata[6]: p['imdbIndex'] = pdata[6]
-                p.billingPos = pdata[3]
-                res.setdefault(duty, []).append(p)
-            if duty == 'cast':
-                res[duty] = merge_roles(res[duty])
-            res[duty].sort()
-        # Info about the movie.
-        minfo = [(self._info[m.infoTypeID], m.info, m.note)
-                for m in MovieInfo.select(MovieInfo.q.movieID == movieID)]
-        minfo += [(self._info[m.infoTypeID], m.info, m.note)
-                for m in MovieInfoIdx.select(MovieInfoIdx.q.movieID == movieID)]
-        minfo += [('keywords', Keyword.get(m.keywordID).keyword, None)
-                for m in MovieKeyword.select(MovieKeyword.q.movieID == movieID)]
-        minfo = _groupListBy(minfo, 0)
-        for group in minfo:
-            sect = group[0][0]
-            for mdata in group:
-                data = mdata[1]
-                if mdata[2]: data += '::%s' % mdata[2]
-                res.setdefault(sect, []).append(data)
-        # Companies info about a movie.
-        cinfo = [(self._compType[m.companyTypeID], m.companyID, m.note) for m
-                in MovieCompanies.select(MovieCompanies.q.movieID == movieID)]
-        cinfo = _groupListBy(cinfo, 0)
-        for group in cinfo:
-            sect = group[0][0]
-            for mdata in group:
-                cDb = CompanyName.get(mdata[1])
-                cDbTxt = cDb.name
-                if cDb.countryCode:
-                    cDbTxt += ' %s' % cDb.countryCode
-                company = Company(name=cDbTxt,
-                                companyID=mdata[1],
-                                notes=mdata[2] or u'',
-                                accessSystem=self.accessSystem)
-                res.setdefault(sect, []).append(company)
-        # AKA titles.
-        akat = [(get_movie_data(at.id, self._kind, fromAka=1), at.note)
-                for at in AkaTitle.select(AkaTitle.q.movieID == movieID)]
-        if akat:
-            res['akas'] = []
-            for td, note in akat:
-                nt = build_title(td, ptdf=1)
-                if note:
-                    net = self._changeAKAencoding(note, nt)
-                    if net is not None: nt = net
-                    nt += '::%s' % note
-                if nt not in res['akas']: res['akas'].append(nt)
-        # Complete cast/crew.
-        compcast = [(self._compcast[cc.subjectID], self._compcast[cc.statusID])
-            for cc in CompleteCast.select(CompleteCast.q.movieID == movieID)]
-        if compcast:
-            for entry in compcast:
-                val = unicode(entry[1])
-                res[u'complete %s' % entry[0]] = val
-        # Movie connections.
-        mlinks = [[ml.linkedMovieID, self._link[ml.linkTypeID]]
-                    for ml in MovieLink.select(MovieLink.q.movieID == movieID)]
-        if mlinks:
-            for ml in mlinks:
-                lmovieData = get_movie_data(ml[0], self._kind)
-                if lmovieData:
-                    m = Movie(movieID=ml[0], data=lmovieData, accessSystem='sql')
-                    ml[0] = m
-            res['connections'] = {}
-            mlinks[:] = _groupListBy(mlinks, 1)
-            for group in mlinks:
-                lt = group[0][1]
-                res['connections'][lt] = [i[0] for i in group]
-        # Episodes.
-        episodes = {}
-        eps_list = list(Title.select(Title.q.episodeOfID == movieID))
-        eps_list.sort()
-        if eps_list:
-            ps_data = {'title': res['title'], 'kind': res['kind'],
-                        'year': res.get('year'),
-                        'imdbIndex': res.get('imdbIndex')}
-            parentSeries = Movie(movieID=movieID, data=ps_data,
-                                accessSystem='sql')
-            for episode in eps_list:
-                episodeID = episode.id
-                episode_data = get_movie_data(episodeID, self._kind)
-                m = Movie(movieID=episodeID, data=episode_data,
-                            accessSystem='sql')
-                m['episode of'] = parentSeries
-                season = episode_data.get('season', 'UNKNOWN')
-                if season not in episodes: episodes[season] = {}
-                ep_number = episode_data.get('episode')
-                if ep_number is None:
-                    ep_number = max((episodes[season].keys() or [0])) + 1
-                episodes[season][ep_number] = m
-            res['episodes'] = episodes
-            res['number of episodes'] = sum([len(x) for x in episodes.values()])
-            res['number of seasons'] = len(episodes.keys())
-        # Regroup laserdisc information.
-        res = _reGroupDict(res, self._moviesubs)
-        # Do some transformation to preserve consistency with other
-        # data access systems.
-        if 'quotes' in res:
-            for idx, quote in enumerate(res['quotes']):
-                res['quotes'][idx] = quote.split('::')
-        if 'runtimes' in res and len(res['runtimes']) > 0:
-            rt = res['runtimes'][0]
-            episodes = re_episodes.findall(rt)
-            if episodes:
-                res['runtimes'][0] = re_episodes.sub('', rt)
-                if res['runtimes'][0][-2:] == '::':
-                    res['runtimes'][0] = res['runtimes'][0][:-2]
-        if 'votes' in res:
-            res['votes'] = int(res['votes'][0])
-        if 'rating' in res:
-            res['rating'] = float(res['rating'][0])
-        if 'votes distribution' in res:
-            res['votes distribution'] = res['votes distribution'][0]
-        if 'mpaa' in res:
-            res['mpaa'] = res['mpaa'][0]
-        if 'top 250 rank' in res:
-            try: res['top 250 rank'] = int(res['top 250 rank'])
-            except: pass
-        if 'bottom 10 rank' in res:
-            try: res['bottom 100 rank'] = int(res['bottom 10 rank'])
-            except: pass
-            del res['bottom 10 rank']
-        for old, new in [('guest', 'guests'), ('trademarks', 'trade-mark'),
-                        ('articles', 'article'), ('pictorials', 'pictorial'),
-                        ('magazine-covers', 'magazine-cover-photo')]:
-            if old in res:
-                res[new] = res[old]
-                del res[old]
-        trefs,nrefs = {}, {}
-        trefs,nrefs = self._extractRefs(sub_dict(res,Movie.keys_tomodify_list))
-        return {'data': res, 'titlesRefs': trefs, 'namesRefs': nrefs,
-                'info sets': infosets}
-
-    # Just to know what kind of information are available.
-    get_movie_alternate_versions = get_movie_main
-    get_movie_business = get_movie_main
-    get_movie_connections = get_movie_main
-    get_movie_crazy_credits = get_movie_main
-    get_movie_goofs = get_movie_main
-    get_movie_keywords = get_movie_main
-    get_movie_literature = get_movie_main
-    get_movie_locations = get_movie_main
-    get_movie_plot = get_movie_main
-    get_movie_quotes = get_movie_main
-    get_movie_release_dates = get_movie_main
-    get_movie_soundtrack = get_movie_main
-    get_movie_taglines = get_movie_main
-    get_movie_technical = get_movie_main
-    get_movie_trivia = get_movie_main
-    get_movie_vote_details = get_movie_main
-    get_movie_episodes = get_movie_main
-
-    def _search_person(self, name, results):
-        name = name.strip()
-        if not name: return []
-        s_name = analyze_name(name)['name']
-        if not s_name: return []
-        if isinstance(s_name, unicode):
-            s_name = s_name.encode('ascii', 'ignore')
-        soundexCode = soundex(s_name)
-        name1, name2, name3 = nameVariations(name)
-
-        # If the soundex is None, compare only with the first
-        # phoneticCode column.
-        if soundexCode is not None:
-            condition = IN(soundexCode, [Name.q.namePcodeCf,
-                                        Name.q.namePcodeNf,
-                                        Name.q.surnamePcode])
-            conditionAka = IN(soundexCode, [AkaName.q.namePcodeCf,
-                                            AkaName.q.namePcodeNf,
-                                            AkaName.q.surnamePcode])
-        else:
-            condition = ISNULL(Name.q.namePcodeCf)
-            conditionAka = ISNULL(AkaName.q.namePcodeCf)
-
-        try:
-            qr = [(q.id, {'name': q.name, 'imdbIndex': q.imdbIndex})
-                    for q in Name.select(condition)]
-
-            q2 = [(q.personID, {'name': q.name, 'imdbIndex': q.imdbIndex})
-                    for q in AkaName.select(conditionAka)]
-            qr += q2
-        except NotFoundError, e:
-            raise IMDbDataAccessError( \
-                    'unable to search the database: "%s"' % str(e))
-
-        res = scan_names(qr, name1, name2, name3, results)
-        res[:] = [x[1] for x in res]
-        # Purge empty imdbIndex.
-        returnl = []
-        for x in res:
-            tmpd = x[1]
-            if tmpd['imdbIndex'] is None:
-                del tmpd['imdbIndex']
-            returnl.append((x[0], tmpd))
-
-        new_res = []
-        # XXX: can there be duplicates?
-        for r in returnl:
-            if r not in q2:
-                new_res.append(r)
-                continue
-            pdict = r[1]
-            aka_name = build_name(pdict, canonical=1)
-            p = Name.get(r[0])
-            orig_dict = {'name': p.name, 'imdbIndex': p.imdbIndex}
-            if orig_dict['imdbIndex'] is None:
-                del orig_dict['imdbIndex']
-            orig_name = build_name(orig_dict, canonical=1)
-            if aka_name == orig_name:
-                new_res.append(r)
-                continue
-            orig_dict['akas'] = [aka_name]
-            new_res.append((r[0], orig_dict))
-        if results > 0: new_res[:] = new_res[:results]
-
-        return new_res
-
-    def get_person_main(self, personID):
-        # Every person information is retrieved from here.
-        infosets = self.get_person_infoset()
-        try:
-            p = Name.get(personID)
-        except NotFoundError, e:
-            raise IMDbDataAccessError( \
-                    'unable to get personID "%s": "%s"' % (personID, str(e)))
-        res = {'name': p.name, 'imdbIndex': p.imdbIndex}
-        if res['imdbIndex'] is None: del res['imdbIndex']
-        if not res:
-            raise IMDbDataAccessError('unable to get personID "%s"' % personID)
-        # Collect cast information.
-        castdata = [(cd.movieID, cd.personRoleID, cd.note,
-                    self._role[cd.roleID],
-                    get_movie_data(cd.movieID, self._kind))
-                for cd in CastInfo.select(CastInfo.q.personID == personID)]
-        # Regroup by role/duty (cast, writer, director, ...)
-        castdata[:] =  _groupListBy(castdata, 3)
-        episodes = {}
-        seenDuties = []
-        for group in castdata:
-            for mdata in group:
-                duty = orig_duty = group[0][3]
-                if duty not in seenDuties: seenDuties.append(orig_duty)
-                note = mdata[2] or u''
-                if 'episode of' in mdata[4]:
-                    duty = 'episodes'
-                    if orig_duty not in ('actor', 'actress'):
-                        if note: note = ' %s' % note
-                        note = '[%s]%s' % (orig_duty, note)
-                curRole = mdata[1]
-                curRoleID = None
-                if curRole is not None:
-                    robj = CharName.get(curRole)
-                    curRole = robj.name
-                    curRoleID = robj.id
-                m = Movie(movieID=mdata[0], data=mdata[4],
-                            currentRole=curRole or u'',
-                            roleID=curRoleID,
-                            notes=note, accessSystem='sql')
-                if duty != 'episodes':
-                    res.setdefault(duty, []).append(m)
-                else:
-                    episodes.setdefault(m['episode of'], []).append(m)
-        if episodes:
-            for k in episodes:
-                episodes[k].sort()
-                episodes[k].reverse()
-            res['episodes'] = episodes
-        for duty in seenDuties:
-            if duty in res:
-                if duty in ('actor', 'actress', 'himself', 'herself',
-                            'themselves'):
-                    res[duty] = merge_roles(res[duty])
-                res[duty].sort()
-        # Info about the person.
-        pinfo = [(self._info[pi.infoTypeID], pi.info, pi.note)
-                for pi in PersonInfo.select(PersonInfo.q.personID == personID)]
-        # Regroup by duty.
-        pinfo = _groupListBy(pinfo, 0)
-        for group in pinfo:
-            sect = group[0][0]
-            for pdata in group:
-                data = pdata[1]
-                if pdata[2]: data += '::%s' % pdata[2]
-                res.setdefault(sect, []).append(data)
-        # AKA names.
-        akan = [(an.name, an.imdbIndex)
-                for an in AkaName.select(AkaName.q.personID == personID)]
-        if akan:
-            res['akas'] = []
-            for n in akan:
-                nd = {'name': n[0]}
-                if n[1]: nd['imdbIndex'] = n[1]
-                nt = build_name(nd, canonical=1)
-                res['akas'].append(nt)
-        # Do some transformation to preserve consistency with other
-        # data access systems.
-        for key in ('birth date', 'birth notes', 'death date', 'death notes',
-                        'birth name', 'height'):
-            if key in res:
-                res[key] = res[key][0]
-        if 'guest' in res:
-            res['notable tv guest appearances'] = res['guest']
-            del res['guest']
-        miscnames = res.get('nick names', [])
-        if 'birth name' in res: miscnames.append(res['birth name'])
-        if 'akas' in res:
-            for mname in miscnames:
-                if mname in res['akas']: res['akas'].remove(mname)
-            if not res['akas']: del res['akas']
-        trefs,nrefs = self._extractRefs(sub_dict(res,Person.keys_tomodify_list))
-        return {'data': res, 'titlesRefs': trefs, 'namesRefs': nrefs,
-                'info sets': infosets}
-
-    # Just to know what kind of information are available.
-    get_person_filmography = get_person_main
-    get_person_biography = get_person_main
-    get_person_other_works = get_person_main
-    get_person_episodes = get_person_main
-
-    def _search_character(self, name, results):
-        name = name.strip()
-        if not name: return []
-        s_name = analyze_name(name)['name']
-        if not s_name: return []
-        if isinstance(s_name, unicode):
-            s_name = s_name.encode('ascii', 'ignore')
-        s_name = normalizeName(s_name)
-        soundexCode = soundex(s_name)
-        surname = s_name.split(' ')[-1]
-        surnameSoundex = soundex(surname)
-        name2 = ''
-        soundexName2 = None
-        nsplit = s_name.split()
-        if len(nsplit) > 1:
-            name2 = '%s %s' % (nsplit[-1], ' '.join(nsplit[:-1]))
-            if s_name == name2:
-                name2 = ''
-            else:
-                soundexName2 = soundex(name2)
-        # If the soundex is None, compare only with the first
-        # phoneticCode column.
-        if soundexCode is not None:
-            if soundexName2 is not None:
-                condition = OR(surnameSoundex == CharName.q.surnamePcode,
-                            IN(CharName.q.namePcodeNf, [soundexCode,
-                                                        soundexName2]),
-                            IN(CharName.q.surnamePcode, [soundexCode,
-                                                        soundexName2]))
-            else:
-                condition = OR(surnameSoundex == CharName.q.surnamePcode,
-                            IN(soundexCode, [CharName.q.namePcodeNf,
-                                            CharName.q.surnamePcode]))
-        else:
-            condition = ISNULL(Name.q.namePcodeNf)
-        try:
-            qr = [(q.id, {'name': q.name, 'imdbIndex': q.imdbIndex})
-                    for q in CharName.select(condition)]
-        except NotFoundError, e:
-            raise IMDbDataAccessError( \
-                    'unable to search the database: "%s"' % str(e))
-        res = scan_names(qr, s_name, name2, '', results,
-                        _scan_character=True)
-        res[:] = [x[1] for x in res]
-        # Purge empty imdbIndex.
-        returnl = []
-        for x in res:
-            tmpd = x[1]
-            if tmpd['imdbIndex'] is None:
-                del tmpd['imdbIndex']
-            returnl.append((x[0], tmpd))
-        return returnl
-
-    def get_character_main(self, characterID, results=1000):
-        # Every character information is retrieved from here.
-        infosets = self.get_character_infoset()
-        try:
-            c = CharName.get(characterID)
-        except NotFoundError, e:
-            raise IMDbDataAccessError( \
-                    'unable to get characterID "%s": "%s"' % (characterID, e))
-        res = {'name': c.name, 'imdbIndex': c.imdbIndex}
-        if res['imdbIndex'] is None: del res['imdbIndex']
-        if not res:
-            raise IMDbDataAccessError('unable to get characterID "%s"' % \
-                                        characterID)
-        # Collect filmography information.
-        items = CastInfo.select(CastInfo.q.personRoleID == characterID)
-        if results > 0:
-            items = items[:results]
-        filmodata = [(cd.movieID, cd.personID, cd.note,
-                    get_movie_data(cd.movieID, self._kind)) for cd in items
-                    if self._role[cd.roleID] in ('actor', 'actress')]
-        fdata = []
-        for f in filmodata:
-            curRole = None
-            curRoleID = f[1]
-            note = f[2] or u''
-            if curRoleID is not None:
-                robj = Name.get(curRoleID)
-                curRole = robj.name
-            m = Movie(movieID=f[0], data=f[3],
-                        currentRole=curRole or u'',
-                        roleID=curRoleID, roleIsPerson=True,
-                        notes=note, accessSystem='sql')
-            fdata.append(m)
-        fdata = merge_roles(fdata)
-        fdata.sort()
-        if fdata:
-            res['filmography'] = fdata
-        return {'data': res, 'info sets': infosets}
-
-    get_character_filmography = get_character_main
-    get_character_biography = get_character_main
-
-    def _search_company(self, name, results):
-        name = name.strip()
-        if not name: return []
-        if isinstance(name, unicode):
-            name = name.encode('ascii', 'ignore')
-        soundexCode = soundex(name)
-        # If the soundex is None, compare only with the first
-        # phoneticCode column.
-        if soundexCode is None:
-            condition = ISNULL(CompanyName.q.namePcodeNf)
-        else:
-            if name.endswith(']'):
-                condition = CompanyName.q.namePcodeSf == soundexCode
-            else:
-                condition = CompanyName.q.namePcodeNf == soundexCode
-        try:
-            qr = [(q.id, {'name': q.name, 'country': q.countryCode})
-                    for q in CompanyName.select(condition)]
-        except NotFoundError, e:
-            raise IMDbDataAccessError( \
-                    'unable to search the database: "%s"' % str(e))
-        qr[:] = [(x[0], build_company_name(x[1])) for x in qr]
-        res = scan_company_names(qr, name, results)
-        res[:] = [x[1] for x in res]
-        # Purge empty country keys.
-        returnl = []
-        for x in res:
-            tmpd = x[1]
-            country = tmpd.get('country')
-            if country is None and 'country' in tmpd:
-                del tmpd['country']
-            returnl.append((x[0], tmpd))
-        return returnl
-
-    def get_company_main(self, companyID, results=0):
-        # Every company information is retrieved from here.
-        infosets = self.get_company_infoset()
-        try:
-            c = CompanyName.get(companyID)
-        except NotFoundError, e:
-            raise IMDbDataAccessError( \
-                    'unable to get companyID "%s": "%s"' % (companyID, e))
-        res = {'name': c.name, 'country': c.countryCode}
-        if res['country'] is None: del res['country']
-        if not res:
-            raise IMDbDataAccessError('unable to get companyID "%s"' % \
-                                        companyID)
-        # Collect filmography information.
-        items = MovieCompanies.select(MovieCompanies.q.companyID == companyID)
-        if results > 0:
-            items = items[:results]
-        filmodata = [(cd.movieID, cd.companyID,
-                    self._compType[cd.companyTypeID], cd.note,
-                    get_movie_data(cd.movieID, self._kind)) for cd in items]
-        filmodata = _groupListBy(filmodata, 2)
-        for group in filmodata:
-            ctype = group[0][2]
-            for movieID, companyID, ctype, note, movieData in group:
-                movie = Movie(data=movieData, movieID=movieID,
-                            notes=note or u'', accessSystem=self.accessSystem)
-                res.setdefault(ctype, []).append(movie)
-            res.get(ctype, []).sort()
-        return {'data': res, 'info sets': infosets}
-
-    def _search_keyword(self, keyword, results):
-        constr = OR(Keyword.q.phoneticCode ==
-                    soundex(keyword.encode('ascii', 'ignore')),
-                    CONTAINSSTRING(Keyword.q.keyword, self.toUTF8(keyword)))
-        return filterSimilarKeywords(keyword,
-                        _iterKeywords(Keyword.select(constr)))[:results]
-
-    def _get_keyword(self, keyword, results):
-        keyID = Keyword.select(Keyword.q.keyword == keyword)
-        if keyID.count() == 0:
-            return []
-        keyID = keyID[0].id
-        movies = MovieKeyword.select(MovieKeyword.q.keywordID ==
-                                    keyID)[:results]
-        return [(m.movieID, get_movie_data(m.movieID, self._kind))
-                for m in movies]
-
-    def _get_top_bottom_movies(self, kind):
-        if kind == 'top':
-            kind = 'top 250 rank'
-        elif kind == 'bottom':
-            # Not a refuse: the plain text data files contains only
-            # the bottom 10 movies.
-            kind = 'bottom 10 rank'
-        else:
-            return []
-        infoID = InfoType.select(InfoType.q.info == kind)
-        if infoID.count() == 0:
-            return []
-        infoID = infoID[0].id
-        movies = MovieInfoIdx.select(MovieInfoIdx.q.infoTypeID == infoID)
-        ml = []
-        for m in movies:
-            minfo = get_movie_data(m.movieID, self._kind)
-            for k in kind, 'votes', 'rating', 'votes distribution':
-                valueDict = getSingleInfo(MovieInfoIdx, m.movieID,
-                                            k, notAList=True)
-                if k in (kind, 'votes') and k in valueDict:
-                    valueDict[k] = int(valueDict[k])
-                elif k == 'rating' and k in valueDict:
-                    valueDict[k] = float(valueDict[k])
-                minfo.update(valueDict)
-            ml.append((m.movieID, minfo))
-        sorter = (_cmpBottom, _cmpTop)[kind == 'top 250 rank']
-        ml.sort(sorter)
-        return ml
-
-    def __del__(self):
-        """Ensure that the connection is closed."""
-        if not hasattr(self, '_connection'): return
-        self._sql_logger.debug('closing connection to the database')
-        self._connection.close()
-
diff --git a/lib/imdb/parser/sql/alchemyadapter.py b/lib/imdb/parser/sql/alchemyadapter.py
deleted file mode 100644
index 3872dcb77da72d9d27583127e2cc09a2c677e9d4..0000000000000000000000000000000000000000
--- a/lib/imdb/parser/sql/alchemyadapter.py
+++ /dev/null
@@ -1,513 +0,0 @@
-"""
-parser.sql.alchemyadapter module (imdb.parser.sql package).
-
-This module adapts the SQLAlchemy ORM to the internal mechanism.
-
-Copyright 2008-2010 Davide Alberani <da@erlug.linux.it>
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
-"""
-
-import re
-import sys
-import logging
-from sqlalchemy import *
-from sqlalchemy import schema
-try: from sqlalchemy import exc # 0.5
-except ImportError: from sqlalchemy import exceptions as exc # 0.4
-
-_alchemy_logger = logging.getLogger('imdbpy.parser.sql.alchemy')
-
-try:
-    import migrate.changeset
-    HAS_MC = True
-except ImportError:
-    HAS_MC = False
-    _alchemy_logger.warn('Unable to import migrate.changeset: Foreign ' \
-                         'Keys will not be created.')
-
-from imdb._exceptions import IMDbDataAccessError
-from dbschema import *
-
-# Used to convert table and column names.
-re_upper = re.compile(r'([A-Z])')
-
-# XXX: I'm not sure at all that this is the best method to connect
-#      to the database and bind that connection to every table.
-metadata = MetaData()
-
-# Maps our placeholders to SQLAlchemy's column types.
-MAP_COLS = {
-    INTCOL: Integer,
-    UNICODECOL: UnicodeText,
-    STRINGCOL: String
-}
-
-
-class NotFoundError(IMDbDataAccessError):
-    """Exception raised when Table.get(id) returns no value."""
-    pass
-
-
-def _renameTable(tname):
-    """Build the name of a table, as done by SQLObject."""
-    tname = re_upper.sub(r'_\1', tname)
-    if tname.startswith('_'):
-        tname = tname[1:]
-    return tname.lower()
-
-def _renameColumn(cname):
-    """Build the name of a column, as done by SQLObject."""
-    cname = cname.replace('ID', 'Id')
-    return _renameTable(cname)
-
-
-class DNNameObj(object):
-    """Used to access table.sqlmeta.columns[column].dbName (a string)."""
-    def __init__(self, dbName):
-        self.dbName = dbName
-
-    def __repr__(self):
-        return '<DNNameObj(dbName=%s) [id=%s]>' % (self.dbName, id(self))
-
-
-class DNNameDict(object):
-    """Used to access table.sqlmeta.columns (a dictionary)."""
-    def __init__(self, colMap):
-        self.colMap = colMap
-
-    def __getitem__(self, key):
-        return DNNameObj(self.colMap[key])
-
-    def __repr__(self):
-        return '<DNNameDict(colMap=%s) [id=%s]>' % (self.colMap, id(self))
-
-
-class SQLMetaAdapter(object):
-    """Used to access table.sqlmeta (an object with .table, .columns and
-    .idName attributes)."""
-    def __init__(self, table, colMap=None):
-        self.table = table
-        if colMap is None:
-            colMap = {}
-        self.colMap = colMap
-
-    def __getattr__(self, name):
-        if name == 'table':
-            return getattr(self.table, name)
-        if name == 'columns':
-            return DNNameDict(self.colMap)
-        if name == 'idName':
-            return self.colMap.get('id', 'id')
-        return None
-
-    def __repr__(self):
-        return '<SQLMetaAdapter(table=%s, colMap=%s) [id=%s]>' % \
-                (repr(self.table), repr(self.colMap), id(self))
-
-
-class QAdapter(object):
-    """Used to access table.q attribute (remapped to SQLAlchemy table.c)."""
-    def __init__(self, table, colMap=None):
-        self.table = table
-        if colMap is None:
-            colMap = {}
-        self.colMap = colMap
-
-    def __getattr__(self, name):
-        try: return getattr(self.table.c, self.colMap[name])
-        except KeyError, e: raise AttributeError("unable to get '%s'" % name)
-
-    def __repr__(self):
-        return '<QAdapter(table=%s, colMap=%s) [id=%s]>' % \
-                (repr(self.table), repr(self.colMap), id(self))
-
-
-class RowAdapter(object):
-    """Adapter for a SQLAlchemy RowProxy object."""
-    def __init__(self, row, table, colMap=None):
-        self.row = row
-        # FIXME: it's OBSCENE that 'table' should be passed from
-        #        TableAdapter through ResultAdapter only to land here,
-        #        where it's used to directly update a row item.
-        self.table = table
-        if colMap is None:
-            colMap = {}
-        self.colMap = colMap
-        self.colMapKeys = colMap.keys()
-
-    def __getattr__(self, name):
-        try: return getattr(self.row, self.colMap[name])
-        except KeyError, e: raise AttributeError("unable to get '%s'" % name)
-
-    def __setattr__(self, name, value):
-        # FIXME: I can't even think about how much performances suffer,
-        #        for this horrible hack (and it's used so rarely...)
-        #        For sure something like a "property" to map column names
-        #        to getter/setter functions would be much better, but it's
-        #        not possible (or at least not easy) to build them for a
-        #        single instance.
-        if name in self.__dict__.get('colMapKeys', ()):
-            # Trying to update a value in the database.
-            row = self.__dict__['row']
-            table = self.__dict__['table']
-            colMap = self.__dict__['colMap']
-            params = {colMap[name]: value}
-            table.update(table.c.id==row.id).execute(**params)
-            # XXX: minor bug: after a value is assigned with the
-            #      'rowAdapterInstance.colName = value' syntax, for some
-            #      reason rowAdapterInstance.colName still returns the
-            #      previous value (even if the database is updated).
-            #      Fix it?  I'm not even sure it's ever used.
-            return
-        # For every other attribute.
-        object.__setattr__(self, name, value)
-
-    def __repr__(self):
-        return '<RowAdapter(row=%s, table=%s, colMap=%s) [id=%s]>' % \
-                (repr(self.row), repr(self.table), repr(self.colMap), id(self))
-
-
-class ResultAdapter(object):
-    """Adapter for a SQLAlchemy ResultProxy object."""
-    def __init__(self, result, table, colMap=None):
-        self.result = result
-        self.table = table
-        if colMap is None:
-            colMap = {}
-        self.colMap = colMap
-
-    def count(self):
-        return len(self)
-
-    def __len__(self):
-        # FIXME: why sqlite returns -1? (that's wrooong!)
-        if self.result.rowcount == -1:
-            return 0
-        return self.result.rowcount
-
-    def __getitem__(self, key):
-        res = list(self.result)[key]
-        if not isinstance(key, slice):
-            # A single item.
-            return RowAdapter(res, self.table, colMap=self.colMap)
-        else:
-            # A (possible empty) list of items.
-            return [RowAdapter(x, self.table, colMap=self.colMap)
-                    for x in res]
-
-    def __iter__(self):
-        for item in self.result:
-            yield RowAdapter(item, self.table, colMap=self.colMap)
-
-    def __repr__(self):
-        return '<ResultAdapter(result=%s, table=%s, colMap=%s) [id=%s]>' % \
-                (repr(self.result), repr(self.table),
-                    repr(self.colMap), id(self))
-
-
-class TableAdapter(object):
-    """Adapter for a SQLAlchemy Table object, to mimic a SQLObject class."""
-    def __init__(self, table, uri=None):
-        """Initialize a TableAdapter object."""
-        self._imdbpySchema = table
-        self._imdbpyName = table.name
-        self.connectionURI = uri
-        self.colMap = {}
-        columns = []
-        for col in table.cols:
-            # Column's paramters.
-            params = {'nullable': True}
-            params.update(col.params)
-            if col.name == 'id':
-                params['primary_key'] = True
-            if 'notNone' in params:
-                params['nullable'] = not params['notNone']
-                del params['notNone']
-            cname = _renameColumn(col.name)
-            self.colMap[col.name] = cname
-            colClass = MAP_COLS[col.kind]
-            colKindParams = {}
-            if 'length' in params:
-                colKindParams['length'] = params['length']
-                del params['length']
-            elif colClass is UnicodeText and col.index:
-                # XXX: limit length for UNICODECOLs that will have an index.
-                #      this can result in name.name and title.title truncations!
-                colClass = Unicode
-                # Should work for most of the database servers.
-                length = 511
-                if self.connectionURI:
-                    if self.connectionURI.startswith('mysql'):
-                        # To stay compatible with MySQL 4.x.
-                        length = 255
-                colKindParams['length'] = length
-            elif self._imdbpyName == 'PersonInfo' and col.name == 'info':
-                if self.connectionURI:
-                    if self.connectionURI.startswith('ibm'):
-                        # There are some entries longer than 32KB.
-                        colClass = CLOB
-                        # I really do hope that this space isn't wasted
-                        # for each other shorter entry... <g>
-                        colKindParams['length'] = 68*1024
-            colKind = colClass(**colKindParams)
-            if 'alternateID' in params:
-                # There's no need to handle them here.
-                del params['alternateID']
-            # Create a column.
-            colObj = Column(cname, colKind, **params)
-            columns.append(colObj)
-        self.tableName = _renameTable(table.name)
-        # Create the table.
-        self.table = Table(self.tableName, metadata, *columns)
-        self._ta_insert = self.table.insert()
-        self._ta_select = self.table.select
-        # Adapters for special attributes.
-        self.q = QAdapter(self.table, colMap=self.colMap)
-        self.sqlmeta = SQLMetaAdapter(self.table, colMap=self.colMap)
-
-    def select(self, conditions=None):
-        """Return a list of results."""
-        result = self._ta_select(conditions).execute()
-        return ResultAdapter(result, self.table, colMap=self.colMap)
-
-    def get(self, theID):
-        """Get an object given its ID."""
-        result = self.select(self.table.c.id == theID)
-        #if not result:
-        #    raise NotFoundError, 'no data for ID %s' % theID
-        # FIXME: isn't this a bit risky?  We can't check len(result),
-        #        because sqlite returns -1...
-        #        What about converting it to a list and getting the first item?
-        try:
-            return result[0]
-        except KeyError:
-            raise NotFoundError('no data for ID %s' % theID)
-
-    def dropTable(self, checkfirst=True):
-        """Drop the table."""
-        dropParams = {'checkfirst': checkfirst}
-        # Guess what?  Another work-around for a ibm_db bug.
-        if self.table.bind.engine.url.drivername.startswith('ibm_db'):
-            del dropParams['checkfirst']
-        try:
-            self.table.drop(**dropParams)
-        except exc.ProgrammingError:
-            # As above: re-raise the exception, but only if it's not ibm_db.
-            if not self.table.bind.engine.url.drivername.startswith('ibm_db'):
-                raise
-
-    def createTable(self, checkfirst=True):
-        """Create the table."""
-        self.table.create(checkfirst=checkfirst)
-        # Create indexes for alternateID columns (other indexes will be
-        # created later, at explicit request for performances reasons).
-        for col in self._imdbpySchema.cols:
-            if col.name == 'id':
-                continue
-            if col.params.get('alternateID', False):
-                self._createIndex(col, checkfirst=checkfirst)
-
-    def _createIndex(self, col, checkfirst=True):
-        """Create an index for a given (schema) column."""
-        # XXX: indexLen is ignored in SQLAlchemy, and that means that
-        #      indexes will be over the whole 255 chars strings...
-        # NOTE: don't use a dot as a separator, or DB2 will do
-        #       nasty things.
-        idx_name = '%s_%s' % (self.table.name, col.index or col.name)
-        if checkfirst:
-            for index in self.table.indexes:
-                if index.name == idx_name:
-                    return
-        idx = Index(idx_name, getattr(self.table.c, self.colMap[col.name]))
-        # XXX: beware that exc.OperationalError can be raised, is some
-        #      strange circumstances; that's why the index name doesn't
-        #      follow the SQLObject convention, but includes the table name:
-        #      sqlite, for example, expects index names to be unique at
-        #      db-level.
-        try:
-            idx.create()
-        except exc.OperationalError, e:
-            _alchemy_logger.warn('Skipping creation of the %s.%s index: %s' %
-                                (self.sqlmeta.table, col.name, e))
-
-    def addIndexes(self, ifNotExists=True):
-        """Create all required indexes."""
-        for col in self._imdbpySchema.cols:
-            if col.index:
-                self._createIndex(col, checkfirst=ifNotExists)
-
-    def addForeignKeys(self, mapTables, ifNotExists=True):
-        """Create all required foreign keys."""
-        if not HAS_MC:
-            return
-        # It seems that there's no reason to prevent the creation of
-        # indexes for columns with FK constrains: if there's already
-        # an index, the FK index is not created.
-        countCols = 0
-        for col in self._imdbpySchema.cols:
-            countCols += 1
-            if not col.foreignKey:
-                continue
-            fks = col.foreignKey.split('.', 1)
-            foreignTableName = fks[0]
-            if len(fks) == 2:
-                foreignColName = fks[1]
-            else:
-                foreignColName = 'id'
-            foreignColName = mapTables[foreignTableName].colMap.get(
-                                                foreignColName, foreignColName)
-            thisColName = self.colMap.get(col.name, col.name)
-            thisCol = self.table.columns[thisColName]
-            foreignTable = mapTables[foreignTableName].table
-            foreignCol = getattr(foreignTable.c, foreignColName)
-            # Need to explicitly set an unique name, otherwise it will
-            # explode, if two cols points to the same table.
-            fkName = 'fk_%s_%s_%d' % (foreignTable.name, foreignColName,
-                                        countCols)
-            constrain = migrate.changeset.ForeignKeyConstraint([thisCol],
-                                                        [foreignCol],
-                                                        name=fkName)
-            try:
-                constrain.create()
-            except exc.OperationalError:
-                continue
-
-    def __call__(self, *args, **kwds):
-        """To insert a new row with the syntax: TableClass(key=value, ...)"""
-        taArgs = {}
-        for key, value in kwds.items():
-            taArgs[self.colMap.get(key, key)] = value
-        self._ta_insert.execute(*args, **taArgs)
-
-    def __repr__(self):
-        return '<TableAdapter(table=%s) [id=%s]>' % (repr(self.table), id(self))
-
-
-# Module-level "cache" for SQLObject classes, to prevent
-# "Table 'tableName' is already defined for this MetaData instance" errors,
-# when two or more connections to the database are made.
-# XXX: is this the best way to act?
-TABLES_REPOSITORY = {}
-
-def getDBTables(uri=None):
-    """Return a list of TableAdapter objects to be used to access the
-    database through the SQLAlchemy ORM.  The connection uri is optional, and
-    can be used to tailor the db schema to specific needs."""
-    DB_TABLES = []
-    for table in DB_SCHEMA:
-        if table.name in TABLES_REPOSITORY:
-            DB_TABLES.append(TABLES_REPOSITORY[table.name])
-            continue
-        tableAdapter = TableAdapter(table, uri)
-        DB_TABLES.append(tableAdapter)
-        TABLES_REPOSITORY[table.name] = tableAdapter
-    return DB_TABLES
-
-
-# Functions used to emulate SQLObject's logical operators.
-def AND(*params):
-    """Emulate SQLObject's AND."""
-    return and_(*params)
-
-def OR(*params):
-    """Emulate SQLObject's OR."""
-    return or_(*params)
-
-def IN(item, inList):
-    """Emulate SQLObject's IN."""
-    if not isinstance(item, schema.Column):
-        return OR(*[x == item for x in inList])
-    else:
-        return item.in_(inList)
-
-def ISNULL(x):
-    """Emulate SQLObject's ISNULL."""
-    # XXX: Should we use null()?  Can null() be a global instance?
-    # XXX: Is it safe to test None with the == operator, in this case?
-    return x == None
-
-def ISNOTNULL(x):
-    """Emulate SQLObject's ISNOTNULL."""
-    return x != None
-
-def CONTAINSSTRING(expr, pattern):
-    """Emulate SQLObject's CONTAINSSTRING."""
-    return expr.like('%%%s%%' % pattern)
-
-
-def toUTF8(s):
-    """For some strange reason, sometimes SQLObject wants utf8 strings
-    instead of unicode; with SQLAlchemy we just return the unicode text."""
-    return s
-
-
-class _AlchemyConnection(object):
-    """A proxy for the connection object, required since _ConnectionFairy
-    uses __slots__."""
-    def __init__(self, conn):
-        self.conn = conn
-
-    def __getattr__(self, name):
-        return getattr(self.conn, name)
-
-
-def setConnection(uri, tables, encoding='utf8', debug=False):
-    """Set connection for every table."""
-    params = {'encoding': encoding}
-    # FIXME: why on earth MySQL requires an additional parameter,
-    #        is well beyond my understanding...
-    if uri.startswith('mysql'):
-        if '?' in uri:
-            uri += '&'
-        else:
-            uri += '?'
-        uri += 'charset=%s' % encoding
-        
-        # On some server configurations, we will need to explictly enable
-        # loading data from local files
-        params['local_infile'] = 1
-   
-    if debug:
-        params['echo'] = True
-    if uri.startswith('ibm_db'):
-        # Try to work-around a possible bug of the ibm_db DB2 driver.
-        params['convert_unicode'] = True
-    # XXX: is this the best way to connect?
-    engine = create_engine(uri, **params)
-    metadata.bind = engine
-    eng_conn = engine.connect()
-    if uri.startswith('sqlite'):
-        major = sys.version_info[0]
-        minor = sys.version_info[1]
-        if major > 2 or (major == 2 and minor > 5):
-            eng_conn.connection.connection.text_factory = str
-    # XXX: OH MY, THAT'S A MESS!
-    #      We need to return a "connection" object, with the .dbName
-    #      attribute set to the db engine name (e.g. "mysql"), .paramstyle
-    #      set to the style of the paramters for query() calls, and the
-    #      .module attribute set to a module (?) with .OperationalError and
-    #      .IntegrityError attributes.
-    #      Another attribute of "connection" is the getConnection() function,
-    #      used to return an object with a .cursor() method.
-    connection = _AlchemyConnection(eng_conn.connection)
-    paramstyle = eng_conn.dialect.paramstyle
-    connection.module = eng_conn.dialect.dbapi
-    connection.paramstyle = paramstyle
-    connection.getConnection = lambda: connection.connection
-    connection.dbName = engine.url.drivername
-    return connection
-
-
diff --git a/lib/imdb/parser/sql/cutils.c b/lib/imdb/parser/sql/cutils.c
deleted file mode 100644
index 677c1b1e0a039bfa38c989b612236abd880b99a1..0000000000000000000000000000000000000000
--- a/lib/imdb/parser/sql/cutils.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * cutils.c module.
- *
- * Miscellaneous functions to speed up the IMDbPY package.
- *
- * Contents:
- * - pyratcliff():
- *   Function that implements the Ratcliff-Obershelp comparison
- *   amongst Python strings.
- *
- * - pysoundex():
- *   Return a soundex code string, for the given string.
- *
- * Copyright 2004-2009 Davide Alberani <da@erlug.linux.it>
- * Released under the GPL license.
- *
- * NOTE: The Ratcliff-Obershelp part was heavily based on code from the
- * "simil" Python module.
- * The "simil" module is copyright of Luca Montecchiani <cbm64 _at_ inwind.it>
- * and can be found here: http://spazioinwind.libero.it/montecchiani/
- * It was released under the GPL license; original comments are leaved
- * below.
- *
- */
-
-
-/*========== Ratcliff-Obershelp ==========*/
-/*****************************************************************************
- *
- * Stolen code from :
- *
- * [Python-Dev] Why is soundex marked obsolete?
- * by Eric S. Raymond [4]esr@thyrsus.com
- * on Sun, 14 Jan 2001 14:09:01 -0500
- *
- *****************************************************************************/
-
-/*****************************************************************************
- *
- * Ratcliff-Obershelp common-subpattern similarity.
- *
- * This code first appeared in a letter to the editor in Doctor
- * Dobbs's Journal, 11/1988.  The original article on the algorithm,
- * "Pattern Matching by Gestalt" by John Ratcliff, had appeared in the
- * July 1988 issue (#181) but the algorithm was presented in assembly.
- * The main drawback of the Ratcliff-Obershelp algorithm is the cost
- * of the pairwise comparisons.  It is significantly more expensive
- * than stemming, Hamming distance, soundex, and the like.
- *
- * Running time quadratic in the data size, memory usage constant.
- *
- *****************************************************************************/
-
-#include <Python.h>
-
-#define DONTCOMPARE_NULL    0.0
-#define DONTCOMPARE_SAME    1.0
-#define COMPARE             2.0
-#define STRING_MAXLENDIFFER 0.7
-
-/* As of 05 Mar 2008, the longest title is ~600 chars. */
-#define MXLINELEN   1023
-
-#define MAX(a,b) ((a) > (b) ? (a) : (b))
-
-
-//*****************************************
-// preliminary check....
-//*****************************************
-static float
-strings_check(char const *s, char const *t)
-{
-    float threshold;    // lenght difference
-    int s_len = strlen(s);    // length of s
-    int t_len = strlen(t);    // length of t
-
-    // NULL strings ?
-    if ((t_len * s_len) == 0)
-        return (DONTCOMPARE_NULL);
-
-    // the same ?
-    if (strcmp(s, t) == 0)
-        return (DONTCOMPARE_SAME);
-
-    // string lenght difference threshold
-    // we don't want to compare too different lenght strings ;)
-    if (s_len < t_len)
-        threshold = (float) s_len / (float) t_len;
-    else
-        threshold = (float) t_len / (float) s_len;
-    if (threshold < STRING_MAXLENDIFFER)
-        return (DONTCOMPARE_NULL);
-
-    // proceed
-    return (COMPARE);
-}
-
-
-static int
-RatcliffObershelp(char *st1, char *end1, char *st2, char *end2)
-{
-    register char *a1, *a2;
-    char *b1, *b2;
-    char *s1 = st1, *s2 = st2;    /* initializations are just to pacify GCC */
-    short max, i;
-
-    if (end1 <= st1 || end2 <= st2)
-        return (0);
-    if (end1 == st1 + 1 && end2 == st2 + 1)
-        return (0);
-
-    max = 0;
-    b1 = end1;
-    b2 = end2;
-
-    for (a1 = st1; a1 < b1; a1++) {
-        for (a2 = st2; a2 < b2; a2++) {
-            if (*a1 == *a2) {
-                /* determine length of common substring */
-                for (i = 1; a1[i] && (a1[i] == a2[i]); i++)
-                    continue;
-                if (i > max) {
-                    max = i;
-                    s1 = a1;
-                    s2 = a2;
-                    b1 = end1 - max;
-                    b2 = end2 - max;
-                }
-            }
-        }
-    }
-    if (!max)
-        return (0);
-    max += RatcliffObershelp(s1 + max, end1, s2 + max, end2);    /* rhs */
-    max += RatcliffObershelp(st1, s1, st2, s2);    /* lhs */
-    return max;
-}
-
-
-static float
-ratcliff(char *s1, char *s2)
-/* compute Ratcliff-Obershelp similarity of two strings */
-{
-    int l1, l2;
-    float res;
-
-    // preliminary tests
-    res = strings_check(s1, s2);
-    if (res != COMPARE)
-        return(res);
-
-    l1 = strlen(s1);
-    l2 = strlen(s2);
-
-    return 2.0 * RatcliffObershelp(s1, s1 + l1, s2, s2 + l2) / (l1 + l2);
-}
-
-
-/* Change a string to lowercase. */
-static void
-strtolower(char *s1)
-{
-    int i;
-    for (i=0; i < strlen(s1); i++) s1[i] = tolower(s1[i]);
-}
-
-
-/* Ratcliff-Obershelp for two python strings; returns a python float. */
-static PyObject*
-pyratcliff(PyObject *self, PyObject *pArgs)
-{
-    char *s1 = NULL;
-    char *s2 = NULL;
-    PyObject *discard = NULL;
-    char s1copy[MXLINELEN+1];
-    char s2copy[MXLINELEN+1];
-
-    /* The optional PyObject parameter is here to be compatible
-     * with the pure python implementation, which uses a
-     * difflib.SequenceMatcher object. */
-    if (!PyArg_ParseTuple(pArgs, "ss|O", &s1, &s2, &discard))
-        return NULL;
-
-    strncpy(s1copy, s1, MXLINELEN);
-    strncpy(s2copy, s2, MXLINELEN);
-    /* Work on copies. */
-    strtolower(s1copy);
-    strtolower(s2copy);
-
-    return Py_BuildValue("f", ratcliff(s1copy, s2copy));
-}
-
-
-/*========== soundex ==========*/
-/* Max length of the soundex code to output (an uppercase char and
- * _at most_ 4 digits). */
-#define SOUNDEX_LEN 5
-
-/* Group Number Lookup Table  */
-static char soundTable[26] =
-{ 0 /* A */, '1' /* B */, '2' /* C */, '3' /* D */, 0 /* E */, '1' /* F */,
- '2' /* G */, 0 /* H */, 0 /* I */, '2' /* J */, '2' /* K */, '4' /* L */,
- '5' /* M */, '5' /* N */, 0 /* O */, '1' /* P */, '2' /* Q */, '6' /* R */,
- '2' /* S */, '3' /* T */, 0 /* U */, '1' /* V */, 0 /* W */, '2' /* X */,
-  0 /* Y */, '2' /* Z */};
-
-static PyObject*
-pysoundex(PyObject *self, PyObject *pArgs)
-{
-    int i, j, n;
-    char *s = NULL;
-    char word[MXLINELEN+1];
-    char soundCode[SOUNDEX_LEN+1];
-    char c;
-
-    if (!PyArg_ParseTuple(pArgs, "s", &s))
-        return NULL;
-
-    j = 0;
-    n = strlen(s);
-
-    /* Convert to uppercase and exclude non-ascii chars. */
-    for (i = 0; i < n; i++) {
-        c = toupper(s[i]);
-        if (c < 91 && c > 64) {
-            word[j] = c;
-            j++;
-        }
-    }
-    word[j] = '\0';
-
-    n = strlen(word);
-    if (n == 0) {
-        /* If the string is empty, returns None. */
-        return Py_BuildValue("");
-    }
-    soundCode[0] = word[0];
-
-    /* Build the soundCode string. */
-    j = 1;
-    for (i = 1; j < SOUNDEX_LEN && i < n; i++) {
-        c = soundTable[(word[i]-65)];
-        /* Compact zeroes and equal consecutive digits ("12234112"->"123412") */
-        if (c != 0 && c != soundCode[j-1]) {
-                soundCode[j++] = c;
-        }
-    }
-    soundCode[j] = '\0';
-
-    return Py_BuildValue("s", soundCode);
-}
-
-
-static PyMethodDef cutils_methods[] = {
-    {"ratcliff", pyratcliff,
-        METH_VARARGS, "Ratcliff-Obershelp similarity."},
-    {"soundex", pysoundex,
-        METH_VARARGS, "Soundex code for strings."},
-    {NULL}
-};
-
-
-void
-initcutils(void)
-{
-    Py_InitModule("cutils", cutils_methods);
-}
-
-
diff --git a/lib/imdb/parser/sql/dbschema.py b/lib/imdb/parser/sql/dbschema.py
deleted file mode 100644
index 9bb855d33b43ec959adb438bc82d259cd767186d..0000000000000000000000000000000000000000
--- a/lib/imdb/parser/sql/dbschema.py
+++ /dev/null
@@ -1,476 +0,0 @@
-#-*- encoding: utf-8 -*-
-"""
-parser.sql.dbschema module (imdb.parser.sql package).
-
-This module provides the schema used to describe the layout of the
-database used by the imdb.parser.sql package; functions to create/drop
-tables and indexes are also provided.
-
-Copyright 2005-2012 Davide Alberani <da@erlug.linux.it>
-               2006 Giuseppe "Cowo" Corbelli <cowo --> lugbs.linux.it>
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
-"""
-
-import logging
-
-_dbschema_logger = logging.getLogger('imdbpy.parser.sql.dbschema')
-
-
-# Placeholders for column types.
-INTCOL = 1
-UNICODECOL = 2
-STRINGCOL = 3
-_strMap = {1: 'INTCOL', 2: 'UNICODECOL', 3: 'STRINGCOL'}
-
-class DBCol(object):
-    """Define column objects."""
-    def __init__(self, name, kind, **params):
-        self.name = name
-        self.kind = kind
-        self.index = None
-        self.indexLen = None
-        # If not None, two notations are accepted: 'TableName'
-        # and 'TableName.ColName'; in the first case, 'id' is assumed
-        # as the name of the pointed column.
-        self.foreignKey = None
-        if 'index' in params:
-            self.index = params['index']
-            del params['index']
-        if 'indexLen' in params:
-            self.indexLen = params['indexLen']
-            del params['indexLen']
-        if 'foreignKey' in params:
-            self.foreignKey = params['foreignKey']
-            del params['foreignKey']
-        self.params = params
-
-    def __str__(self):
-        """Class representation."""
-        s = '<DBCol %s %s' % (self.name, _strMap[self.kind])
-        if self.index:
-            s += ' INDEX'
-            if self.indexLen:
-                s += '[:%d]' % self.indexLen
-        if self.foreignKey:
-            s += ' FOREIGN'
-        if 'default' in self.params:
-            val = self.params['default']
-            if val is not None:
-                val = '"%s"' % val
-            s += ' DEFAULT=%s' % val
-        for param in self.params:
-            if param == 'default': continue
-            s += ' %s' % param.upper()
-        s += '>'
-        return s
-
-    def __repr__(self):
-        """Class representation."""
-        s = '<DBCol(name="%s", %s' % (self.name, _strMap[self.kind])
-        if self.index:
-            s += ', index="%s"' % self.index
-        if self.indexLen:
-             s += ', indexLen=%d' % self.indexLen
-        if self.foreignKey:
-            s += ', foreignKey="%s"' % self.foreignKey
-        for param in self.params:
-            val = self.params[param]
-            if isinstance(val, (unicode, str)):
-                val = u'"%s"' % val
-            s += ', %s=%s' % (param, val)
-        s += ')>'
-        return s
-
-
-class DBTable(object):
-    """Define table objects."""
-    def __init__(self, name, *cols, **kwds):
-        self.name = name
-        self.cols = cols
-        # Default values.
-        self.values = kwds.get('values', {})
-
-    def __str__(self):
-        """Class representation."""
-        return '<DBTable %s (%d cols, %d values)>' % (self.name,
-                len(self.cols), sum([len(v) for v in self.values.values()]))
-
-    def __repr__(self):
-        """Class representation."""
-        s = '<DBTable(name="%s"' % self.name
-        col_s = ', '.join([repr(col).rstrip('>').lstrip('<')
-                            for col in self.cols])
-        if col_s:
-            s += ', %s' % col_s
-        if self.values:
-            s += ', values=%s' % self.values
-        s += ')>'
-        return s
-
-
-# Default values to insert in some tables: {'column': (list, of, values, ...)}
-kindTypeDefs = {'kind': ('movie', 'tv series', 'tv movie', 'video movie',
-                        'tv mini series', 'video game', 'episode')}
-companyTypeDefs = {'kind': ('distributors', 'production companies',
-                        'special effects companies', 'miscellaneous companies')}
-infoTypeDefs = {'info': ('runtimes', 'color info', 'genres', 'languages',
-    'certificates', 'sound mix', 'tech info', 'countries', 'taglines',
-    'keywords', 'alternate versions', 'crazy credits', 'goofs',
-    'soundtrack', 'quotes', 'release dates', 'trivia', 'locations',
-    'mini biography', 'birth notes', 'birth date', 'height',
-    'death date', 'spouse', 'other works', 'birth name',
-    'salary history', 'nick names', 'books', 'agent address',
-    'biographical movies', 'portrayed in', 'where now', 'trade mark',
-    'interviews', 'article', 'magazine cover photo', 'pictorial',
-    'death notes', 'LD disc format', 'LD year', 'LD digital sound',
-    'LD official retail price', 'LD frequency response', 'LD pressing plant',
-    'LD length', 'LD language', 'LD review', 'LD spaciality', 'LD release date',
-    'LD production country', 'LD contrast', 'LD color rendition',
-    'LD picture format', 'LD video noise', 'LD video artifacts',
-    'LD release country', 'LD sharpness', 'LD dynamic range',
-    'LD audio noise', 'LD color information', 'LD group genre',
-    'LD quality program', 'LD close captions-teletext-ld-g',
-    'LD category', 'LD analog left', 'LD certification',
-    'LD audio quality', 'LD video quality', 'LD aspect ratio',
-    'LD analog right', 'LD additional information',
-    'LD number of chapter stops', 'LD dialogue intellegibility',
-    'LD disc size', 'LD master format', 'LD subtitles',
-    'LD status of availablility', 'LD quality of source',
-    'LD number of sides', 'LD video standard', 'LD supplement',
-    'LD original title', 'LD sound encoding', 'LD number', 'LD label',
-    'LD catalog number', 'LD laserdisc title', 'screenplay-teleplay',
-    'novel', 'adaption', 'book', 'production process protocol',
-    'printed media reviews', 'essays', 'other literature', 'mpaa',
-    'plot', 'votes distribution', 'votes', 'rating',
-    'production dates', 'copyright holder', 'filming dates', 'budget',
-    'weekend gross', 'gross', 'opening weekend', 'rentals',
-    'admissions', 'studios', 'top 250 rank', 'bottom 10 rank')}
-compCastTypeDefs = {'kind': ('cast', 'crew', 'complete', 'complete+verified')}
-linkTypeDefs = {'link': ('follows', 'followed by', 'remake of', 'remade as',
-                        'references', 'referenced in', 'spoofs', 'spoofed in',
-                        'features', 'featured in', 'spin off from', 'spin off',
-                        'version of', 'similar to', 'edited into',
-                        'edited from', 'alternate language version of',
-                        'unknown link')}
-roleTypeDefs = {'role': ('actor', 'actress', 'producer', 'writer',
-                        'cinematographer', 'composer', 'costume designer',
-                        'director', 'editor', 'miscellaneous crew',
-                        'production designer', 'guest')}
-
-# Schema of tables in our database.
-# XXX: Foreign keys can be used to create constrains between tables,
-#      but they create indexes in the database, and this
-#      means poor performances at insert-time.
-DB_SCHEMA = [
-    DBTable('Name',
-        # namePcodeCf is the soundex of the name in the canonical format.
-        # namePcodeNf is the soundex of the name in the normal format, if
-        # different from namePcodeCf.
-        # surnamePcode is the soundex of the surname, if different from the
-        # other two values.
-
-        # The 'id' column is simply skipped by SQLObject (it's a default);
-        # the alternateID attribute here will be ignored by SQLAlchemy.
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
-        DBCol('imdbIndex', UNICODECOL, length=12, default=None),
-        DBCol('imdbID', INTCOL, default=None, index='idx_imdb_id'),
-        DBCol('gender', STRINGCOL, length=1, default=None),
-        DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
-                index='idx_pcodecf'),
-        DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
-                index='idx_pcodenf'),
-        DBCol('surnamePcode', STRINGCOL, length=5, default=None,
-                index='idx_pcode'),
-        DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
-    ),
-
-    DBTable('CharName',
-        # namePcodeNf is the soundex of the name in the normal format.
-        # surnamePcode is the soundex of the surname, if different
-        # from namePcodeNf.
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
-        DBCol('imdbIndex', UNICODECOL, length=12, default=None),
-        DBCol('imdbID', INTCOL, default=None),
-        DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
-                index='idx_pcodenf'),
-        DBCol('surnamePcode', STRINGCOL, length=5, default=None,
-                index='idx_pcode'),
-        DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
-    ),
-
-    DBTable('CompanyName',
-        # namePcodeNf is the soundex of the name in the normal format.
-        # namePcodeSf is the soundex of the name plus the country code.
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
-        DBCol('countryCode', UNICODECOL, length=255, default=None),
-        DBCol('imdbID', INTCOL, default=None),
-        DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
-                index='idx_pcodenf'),
-        DBCol('namePcodeSf', STRINGCOL, length=5, default=None,
-                index='idx_pcodesf'),
-        DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
-    ),
-
-    DBTable('KindType',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('kind', STRINGCOL, length=15, default=None, alternateID=True),
-        values=kindTypeDefs
-    ),
-
-    DBTable('Title',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('title', UNICODECOL, notNone=True,
-                index='idx_title', indexLen=10),
-        DBCol('imdbIndex', UNICODECOL, length=12, default=None),
-        DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
-        DBCol('productionYear', INTCOL, default=None),
-        DBCol('imdbID', INTCOL, default=None, index="idx_imdb_id"),
-        DBCol('phoneticCode', STRINGCOL, length=5, default=None,
-                index='idx_pcode'),
-        DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
-                foreignKey='Title'),
-        DBCol('seasonNr', INTCOL, default=None, index="idx_season_nr"),
-        DBCol('episodeNr', INTCOL, default=None, index="idx_episode_nr"),
-        # Maximum observed length is 44; 49 can store 5 comma-separated
-        # year-year pairs.
-        DBCol('seriesYears', STRINGCOL, length=49, default=None),
-        DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
-    ),
-
-    DBTable('CompanyType',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('kind', STRINGCOL, length=32, default=None, alternateID=True),
-        values=companyTypeDefs
-    ),
-
-    DBTable('AkaName',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('personID', INTCOL, notNone=True, index='idx_person',
-                foreignKey='Name'),
-        DBCol('name', UNICODECOL, notNone=True),
-        DBCol('imdbIndex', UNICODECOL, length=12, default=None),
-        DBCol('namePcodeCf',  STRINGCOL, length=5, default=None,
-                index='idx_pcodecf'),
-        DBCol('namePcodeNf',  STRINGCOL, length=5, default=None,
-                index='idx_pcodenf'),
-        DBCol('surnamePcode',  STRINGCOL, length=5, default=None,
-                index='idx_pcode'),
-        DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
-    ),
-
-    DBTable('AkaTitle',
-        # XXX: It's safer to set notNone to False, here.
-        #      alias for akas are stored completely in the AkaTitle table;
-        #      this means that episodes will set also a "tv series" alias name.
-        #      Reading the aka-title.list file it looks like there are
-        #      episode titles with aliases to different titles for both
-        #      the episode and the series title, while for just the series
-        #      there are no aliases.
-        #      E.g.:
-        #      aka title                                 original title
-        # "Series, The" (2005) {The Episode}  "Other Title" (2005) {Other Title}
-        # But there is no:
-        # "Series, The" (2005)                "Other Title" (2005)
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('movieID', INTCOL, notNone=True, index='idx_movieid',
-                foreignKey='Title'),
-        DBCol('title', UNICODECOL, notNone=True),
-        DBCol('imdbIndex', UNICODECOL, length=12, default=None),
-        DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
-        DBCol('productionYear', INTCOL, default=None),
-        DBCol('phoneticCode',  STRINGCOL, length=5, default=None,
-                index='idx_pcode'),
-        DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
-                foreignKey='AkaTitle'),
-        DBCol('seasonNr', INTCOL, default=None),
-        DBCol('episodeNr', INTCOL, default=None),
-        DBCol('note', UNICODECOL, default=None),
-        DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
-    ),
-
-    DBTable('RoleType',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('role', STRINGCOL, length=32, notNone=True, alternateID=True),
-        values=roleTypeDefs
-    ),
-
-    DBTable('CastInfo',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('personID', INTCOL, notNone=True, index='idx_pid',
-                foreignKey='Name'),
-        DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
-                foreignKey='Title'),
-        DBCol('personRoleID', INTCOL, default=None, index='idx_cid',
-                foreignKey='CharName'),
-        DBCol('note', UNICODECOL, default=None),
-        DBCol('nrOrder', INTCOL, default=None),
-        DBCol('roleID', INTCOL, notNone=True, foreignKey='RoleType')
-    ),
-
-    DBTable('CompCastType',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('kind', STRINGCOL, length=32, notNone=True, alternateID=True),
-        values=compCastTypeDefs
-    ),
-
-    DBTable('CompleteCast',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('movieID', INTCOL, index='idx_mid', foreignKey='Title'),
-        DBCol('subjectID', INTCOL, notNone=True, foreignKey='CompCastType'),
-        DBCol('statusID', INTCOL, notNone=True, foreignKey='CompCastType')
-    ),
-
-    DBTable('InfoType',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('info', STRINGCOL, length=32, notNone=True, alternateID=True),
-        values=infoTypeDefs
-    ),
-
-    DBTable('LinkType',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('link', STRINGCOL, length=32, notNone=True, alternateID=True),
-        values=linkTypeDefs
-    ),
-
-    DBTable('Keyword',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        # XXX: can't use alternateID=True, because it would create
-        #      a UNIQUE index; unfortunately (at least with a common
-        #      collation like utf8_unicode_ci) MySQL will consider
-        #      some different keywords identical - like
-        #      "fiancée" and "fiancee".
-        DBCol('keyword', UNICODECOL, notNone=True,
-                index='idx_keyword', indexLen=5),
-        DBCol('phoneticCode', STRINGCOL, length=5, default=None,
-                index='idx_pcode')
-    ),
-
-    DBTable('MovieKeyword',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
-                foreignKey='Title'),
-        DBCol('keywordID', INTCOL, notNone=True, index='idx_keywordid',
-                foreignKey='Keyword')
-    ),
-
-    DBTable('MovieLink',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
-                foreignKey='Title'),
-        DBCol('linkedMovieID', INTCOL, notNone=True, foreignKey='Title'),
-        DBCol('linkTypeID', INTCOL, notNone=True, foreignKey='LinkType')
-    ),
-
-    DBTable('MovieInfo',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
-                foreignKey='Title'),
-        DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
-        DBCol('info', UNICODECOL, notNone=True),
-        DBCol('note', UNICODECOL, default=None)
-    ),
-
-    # This table is identical to MovieInfo, except that both 'infoTypeID'
-    # and 'info' are indexed.
-    DBTable('MovieInfoIdx',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
-                foreignKey='Title'),
-        DBCol('infoTypeID', INTCOL, notNone=True, index='idx_infotypeid',
-                foreignKey='InfoType'),
-        DBCol('info', UNICODECOL, notNone=True, index='idx_info', indexLen=10),
-        DBCol('note', UNICODECOL, default=None)
-    ),
-
-    DBTable('MovieCompanies',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
-                foreignKey='Title'),
-        DBCol('companyID', INTCOL, notNone=True, index='idx_cid',
-                foreignKey='CompanyName'),
-        DBCol('companyTypeID', INTCOL, notNone=True, foreignKey='CompanyType'),
-        DBCol('note', UNICODECOL, default=None)
-    ),
-
-    DBTable('PersonInfo',
-        DBCol('id', INTCOL, notNone=True, alternateID=True),
-        DBCol('personID', INTCOL, notNone=True, index='idx_pid',
-                foreignKey='Name'),
-        DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
-        DBCol('info', UNICODECOL, notNone=True),
-        DBCol('note', UNICODECOL, default=None)
-    )
-]
-
-
-# Functions to manage tables.
-def dropTables(tables, ifExists=True):
-    """Drop the tables."""
-    # In reverse order (useful to avoid errors about foreign keys).
-    DB_TABLES_DROP = list(tables)
-    DB_TABLES_DROP.reverse()
-    for table in DB_TABLES_DROP:
-        _dbschema_logger.info('dropping table %s', table._imdbpyName)
-        table.dropTable(ifExists)
-
-def createTables(tables, ifNotExists=True):
-    """Create the tables and insert default values."""
-    for table in tables:
-        # Create the table.
-        _dbschema_logger.info('creating table %s', table._imdbpyName)
-        table.createTable(ifNotExists)
-        # Insert default values, if any.
-        if table._imdbpySchema.values:
-            _dbschema_logger.info('inserting values into table %s',
-                                    table._imdbpyName)
-            for key in table._imdbpySchema.values:
-                for value in table._imdbpySchema.values[key]:
-                    table(**{key: unicode(value)})
-
-def createIndexes(tables, ifNotExists=True):
-    """Create the indexes in the database.
-    Return a list of errors, if any."""
-    errors = []
-    for table in tables:
-        _dbschema_logger.info('creating indexes for table %s',
-                                table._imdbpyName)
-        try:
-            table.addIndexes(ifNotExists)
-        except Exception, e:
-            errors.append(e)
-            continue
-    return errors
-
-def createForeignKeys(tables, ifNotExists=True):
-    """Create Foreign Keys.
-    Return a list of errors, if any."""
-    errors = []
-    mapTables = {}
-    for table in tables:
-        mapTables[table._imdbpyName] = table
-    for table in tables:
-        _dbschema_logger.info('creating foreign keys for table %s',
-                                table._imdbpyName)
-        try:
-            table.addForeignKeys(mapTables, ifNotExists)
-        except Exception, e:
-            errors.append(e)
-            continue
-    return errors
-
diff --git a/lib/imdb/parser/sql/objectadapter.py b/lib/imdb/parser/sql/objectadapter.py
deleted file mode 100644
index 170e5164cdeccd99bf4c0fda0f732eba1ac2c06d..0000000000000000000000000000000000000000
--- a/lib/imdb/parser/sql/objectadapter.py
+++ /dev/null
@@ -1,211 +0,0 @@
-"""
-parser.sql.objectadapter module (imdb.parser.sql package).
-
-This module adapts the SQLObject ORM to the internal mechanism.
-
-Copyright 2008-2010 Davide Alberani <da@erlug.linux.it>
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
-"""
-
-import sys
-import logging
-
-from sqlobject import *
-from sqlobject.sqlbuilder import ISNULL, ISNOTNULL, AND, OR, IN, CONTAINSSTRING
-
-from dbschema import *
-
-_object_logger = logging.getLogger('imdbpy.parser.sql.object')
-
-
-# Maps our placeholders to SQLAlchemy's column types.
-MAP_COLS = {
-        INTCOL: IntCol,
-        UNICODECOL: UnicodeCol,
-        STRINGCOL: StringCol
-}
-
-
-# Exception raised when Table.get(id) returns no value.
-NotFoundError = SQLObjectNotFound
-
-
-# class method to be added to the SQLObject class.
-def addIndexes(cls, ifNotExists=True):
-    """Create all required indexes."""
-    for col in cls._imdbpySchema.cols:
-        if col.index:
-            idxName = col.index
-            colToIdx = col.name
-            if col.indexLen:
-                colToIdx = {'column': col.name, 'length': col.indexLen}
-            if idxName in [i.name for i in cls.sqlmeta.indexes]:
-                # Check if the index is already present.
-                continue
-            idx = DatabaseIndex(colToIdx, name=idxName)
-            cls.sqlmeta.addIndex(idx)
-    try:
-        cls.createIndexes(ifNotExists)
-    except dberrors.OperationalError, e:
-        _object_logger.warn('Skipping creation of the %s.%s index: %s' %
-                            (cls.sqlmeta.table, col.name, e))
-addIndexes = classmethod(addIndexes)
-
-
-# Global repository for "fake" tables with Foreign Keys - need to
-# prevent troubles if addForeignKeys is called more than one time.
-FAKE_TABLES_REPOSITORY = {}
-
-def _buildFakeFKTable(cls, fakeTableName):
-    """Return a "fake" table, with foreign keys where needed."""
-    countCols = 0
-    attrs = {}
-    for col in cls._imdbpySchema.cols:
-        countCols += 1
-        if col.name == 'id':
-            continue
-        if not col.foreignKey:
-            # A non-foreign key column - add it as usual.
-            attrs[col.name] = MAP_COLS[col.kind](**col.params)
-            continue
-        # XXX: Foreign Keys pointing to TableName.ColName not yet supported.
-        thisColName = col.name
-        if thisColName.endswith('ID'):
-            thisColName = thisColName[:-2]
-
-        fks = col.foreignKey.split('.', 1)
-        foreignTableName = fks[0]
-        if len(fks) == 2:
-            foreignColName = fks[1]
-        else:
-            foreignColName = 'id'
-        # Unused...
-        #fkName = 'fk_%s_%s_%d' % (foreignTableName, foreignColName,
-        #                        countCols)
-        # Create a Foreign Key column, with the correct references.
-        fk = ForeignKey(foreignTableName, name=thisColName, default=None)
-        attrs[thisColName] = fk
-    # Build a _NEW_ SQLObject subclass, with foreign keys, if needed.
-    newcls = type(fakeTableName, (SQLObject,), attrs)
-    return newcls
-
-def addForeignKeys(cls, mapTables, ifNotExists=True):
-    """Create all required foreign keys."""
-    # Do not even try, if there are no FK, in this table.
-    if not filter(None, [col.foreignKey for col in cls._imdbpySchema.cols]):
-        return
-    fakeTableName = 'myfaketable%s' % cls.sqlmeta.table
-    if fakeTableName in FAKE_TABLES_REPOSITORY:
-        newcls = FAKE_TABLES_REPOSITORY[fakeTableName]
-    else:
-        newcls = _buildFakeFKTable(cls, fakeTableName)
-        FAKE_TABLES_REPOSITORY[fakeTableName] = newcls
-    # Connect the class with foreign keys.
-    newcls.setConnection(cls._connection)
-    for col in cls._imdbpySchema.cols:
-        if col.name == 'id':
-            continue
-        if not col.foreignKey:
-            continue
-        # Get the SQL that _WOULD BE_ run, if we had to create
-        # this "fake" table.
-        fkQuery = newcls._connection.createReferenceConstraint(newcls,
-                                newcls.sqlmeta.columns[col.name])
-        if not fkQuery:
-            # Probably the db doesn't support foreign keys (SQLite).
-            continue
-        # Remove "myfaketable" to get references to _real_ tables.
-        fkQuery = fkQuery.replace('myfaketable', '')
-        # Execute the query.
-        newcls._connection.query(fkQuery)
-    # Disconnect it.
-    newcls._connection.close()
-addForeignKeys = classmethod(addForeignKeys)
-
-
-# Module-level "cache" for SQLObject classes, to prevent
-# "class TheClass is already in the registry" errors, when
-# two or more connections to the database are made.
-# XXX: is this the best way to act?
-TABLES_REPOSITORY = {}
-
-def getDBTables(uri=None):
-    """Return a list of classes to be used to access the database
-    through the SQLObject ORM.  The connection uri is optional, and
-    can be used to tailor the db schema to specific needs."""
-    DB_TABLES = []
-    for table in DB_SCHEMA:
-        if table.name in TABLES_REPOSITORY:
-            DB_TABLES.append(TABLES_REPOSITORY[table.name])
-            continue
-        attrs = {'_imdbpyName': table.name, '_imdbpySchema': table,
-                'addIndexes': addIndexes, 'addForeignKeys': addForeignKeys}
-        for col in table.cols:
-            if col.name == 'id':
-                continue
-            attrs[col.name] = MAP_COLS[col.kind](**col.params)
-        # Create a subclass of SQLObject.
-        # XXX: use a metaclass?  I can't see any advantage.
-        cls = type(table.name, (SQLObject,), attrs)
-        DB_TABLES.append(cls)
-        TABLES_REPOSITORY[table.name] = cls
-    return DB_TABLES
-
-
-def toUTF8(s):
-    """For some strange reason, sometimes SQLObject wants utf8 strings
-    instead of unicode."""
-    return s.encode('utf_8')
-
-
-def setConnection(uri, tables, encoding='utf8', debug=False):
-    """Set connection for every table."""
-    kw = {}
-    # FIXME: it's absolutely unclear what we should do to correctly
-    #        support unicode in MySQL; with some versions of SQLObject,
-    #        it seems that setting use_unicode=1 is the _wrong_ thing to do.
-    _uriLower = uri.lower()
-    if _uriLower.startswith('mysql'):
-        kw['use_unicode'] = 1
-        #kw['sqlobject_encoding'] = encoding
-        kw['charset'] = encoding
-
-        # On some server configurations, we will need to explictly enable
-        # loading data from local files
-        kw['local_infile'] = 1
-    conn = connectionForURI(uri, **kw)
-    conn.debug = debug
-    # XXX: doesn't work and a work-around was put in imdbpy2sql.py;
-    #      is there any way to modify the text_factory parameter of
-    #      a SQLite connection?
-    #if uri.startswith('sqlite'):
-    #    major = sys.version_info[0]
-    #    minor = sys.version_info[1]
-    #    if major > 2 or (major == 2 and minor > 5):
-    #        sqliteConn = conn.getConnection()
-    #        sqliteConn.text_factory = str
-    for table in tables:
-        table.setConnection(conn)
-        #table.sqlmeta.cacheValues = False
-        # FIXME: is it safe to set table._cacheValue to False?  Looks like
-        #        we can't retrieve correct values after an update (I think
-        #        it's never needed, but...)  Anyway, these are set to False
-        #        for performance reason at insert time (see imdbpy2sql.py).
-        table._cacheValue = False
-    # Required by imdbpy2sql.py.
-    conn.paramstyle = conn.module.paramstyle
-    return conn
-
diff --git a/lib/imdb/utils.py b/lib/imdb/utils.py
index 0ffb58a84465c2734b6e075687d01342053367d9..284edae0d8a96b7ddb80eefe7a85306009c70965 100644
--- a/lib/imdb/utils.py
+++ b/lib/imdb/utils.py
@@ -18,7 +18,7 @@ GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 """
 
 from __future__ import generators
@@ -431,13 +431,13 @@ def analyze_title(title, canonical=None, canonicalSeries=None,
             yi = [(yiy, yii)]
             if yk == 'TV episode':
                 kind = u'episode'
-            elif yk == 'TV':
+            elif yk in ('TV', 'TV Movie'):
                 kind = u'tv movie'
             elif yk == 'TV Series':
                 kind = u'tv series'
             elif yk == 'Video':
                 kind = u'video movie'
-            elif yk == 'TV mini-series':
+            elif yk in ('TV mini-series', 'TV Mini-Series'):
                 kind = u'tv mini series'
             elif yk == 'Video Game':
                 kind = u'video game'
@@ -960,7 +960,7 @@ def _tag4TON(ton, addAccessSystem=False, _containerOnly=False):
             crl = [crl]
         for cr in crl:
             crTag = cr.__class__.__name__.lower()
-            crValue = cr['long imdb name']
+            crValue = cr.get('long imdb name') or u''
             crValue = _normalizeValue(crValue)
             crID = cr.getID()
             if crID is not None:
diff --git a/lib/jwt/__init__.py b/lib/jwt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..398d8540daa03b2fcaac720d6707a90d396e439a
--- /dev/null
+++ b/lib/jwt/__init__.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+# flake8: noqa
+
+"""
+JSON Web Token implementation
+
+Minimum implementation based on this spec:
+http://self-issued.info/docs/draft-jones-json-web-token-01.html
+"""
+
+
+__title__ = 'pyjwt'
+__version__ = '1.5.0'
+__author__ = 'José Padilla'
+__license__ = 'MIT'
+__copyright__ = 'Copyright 2015 José Padilla'
+
+
+from .api_jwt import (
+    encode, decode, register_algorithm, unregister_algorithm,
+    get_unverified_header, PyJWT
+)
+from .api_jws import PyJWS
+from .exceptions import (
+    InvalidTokenError, DecodeError, InvalidAudienceError,
+    ExpiredSignatureError, ImmatureSignatureError, InvalidIssuedAtError,
+    InvalidIssuerError, ExpiredSignature, InvalidAudience, InvalidIssuer,
+    MissingRequiredClaimError
+)
diff --git a/lib/jwt/__main__.py b/lib/jwt/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ccc38c5f733fc54c7247cd79672dd6056f9fb21
--- /dev/null
+++ b/lib/jwt/__main__.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import json
+import optparse
+import sys
+import time
+
+from . import DecodeError, __package__, __version__, decode, encode
+
+
+def main():
+
+    usage = '''Encodes or decodes JSON Web Tokens based on input.
+
+  %prog [options] input
+
+Decoding examples:
+
+  %prog --key=secret json.web.token
+  %prog --no-verify json.web.token
+
+Encoding requires the key option and takes space separated key/value pairs
+separated by equals (=) as input. Examples:
+
+  %prog --key=secret iss=me exp=1302049071
+  %prog --key=secret foo=bar exp=+10
+
+The exp key is special and can take an offset to current Unix time.\
+'''
+    p = optparse.OptionParser(
+        usage=usage,
+        prog='pyjwt',
+        version='%s %s' % (__package__, __version__),
+    )
+
+    p.add_option(
+        '-n', '--no-verify',
+        action='store_false',
+        dest='verify',
+        default=True,
+        help='ignore signature and claims verification on decode'
+    )
+
+    p.add_option(
+        '--key',
+        dest='key',
+        metavar='KEY',
+        default=None,
+        help='set the secret key to sign with'
+    )
+
+    p.add_option(
+        '--alg',
+        dest='algorithm',
+        metavar='ALG',
+        default='HS256',
+        help='set crypto algorithm to sign with. default=HS256'
+    )
+
+    options, arguments = p.parse_args()
+
+    if len(arguments) > 0 or not sys.stdin.isatty():
+        if len(arguments) == 1 and (not options.verify or options.key):
+            # Try to decode
+            try:
+                if not sys.stdin.isatty():
+                    token = sys.stdin.read()
+                else:
+                    token = arguments[0]
+
+                token = token.encode('utf-8')
+                data = decode(token, key=options.key, verify=options.verify)
+
+                print(json.dumps(data))
+                sys.exit(0)
+            except DecodeError as e:
+                print(e)
+                sys.exit(1)
+
+        # Try to encode
+        if options.key is None:
+            print('Key is required when encoding. See --help for usage.')
+            sys.exit(1)
+
+        # Build payload object to encode
+        payload = {}
+
+        for arg in arguments:
+            try:
+                k, v = arg.split('=', 1)
+
+                # exp +offset special case?
+                if k == 'exp' and v[0] == '+' and len(v) > 1:
+                    v = str(int(time.time()+int(v[1:])))
+
+                # Cast to integer?
+                if v.isdigit():
+                    v = int(v)
+                else:
+                    # Cast to float?
+                    try:
+                        v = float(v)
+                    except ValueError:
+                        pass
+
+                # Cast to true, false, or null?
+                constants = {'true': True, 'false': False, 'null': None}
+
+                if v in constants:
+                    v = constants[v]
+
+                payload[k] = v
+            except ValueError:
+                print('Invalid encoding input at {}'.format(arg))
+                sys.exit(1)
+
+        try:
+            token = encode(
+                payload,
+                key=options.key,
+                algorithm=options.algorithm
+            )
+
+            print(token)
+            sys.exit(0)
+        except Exception as e:
+            print(e)
+            sys.exit(1)
+    else:
+        p.print_help()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib/jwt/algorithms.py b/lib/jwt/algorithms.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6d990adc9ecb563003a6aa4eb1236349af131a3
--- /dev/null
+++ b/lib/jwt/algorithms.py
@@ -0,0 +1,428 @@
+import hashlib
+import hmac
+import json
+
+
+from .compat import constant_time_compare, string_types
+from .exceptions import InvalidKeyError
+from .utils import (
+    base64url_decode, base64url_encode, der_to_raw_signature,
+    force_bytes, force_unicode, from_base64url_uint, raw_to_der_signature,
+    to_base64url_uint
+)
+
+try:
+    from cryptography.hazmat.primitives import hashes
+    from cryptography.hazmat.primitives.serialization import (
+        load_pem_private_key, load_pem_public_key, load_ssh_public_key
+    )
+    from cryptography.hazmat.primitives.asymmetric.rsa import (
+        RSAPrivateKey, RSAPublicKey, RSAPrivateNumbers, RSAPublicNumbers,
+        rsa_recover_prime_factors, rsa_crt_dmp1, rsa_crt_dmq1, rsa_crt_iqmp
+    )
+    from cryptography.hazmat.primitives.asymmetric.ec import (
+        EllipticCurvePrivateKey, EllipticCurvePublicKey
+    )
+    from cryptography.hazmat.primitives.asymmetric import ec, padding
+    from cryptography.hazmat.backends import default_backend
+    from cryptography.exceptions import InvalidSignature
+
+    has_crypto = True
+except ImportError:
+    has_crypto = False
+
+requires_cryptography = set(['RS256', 'RS384', 'RS512', 'ES256', 'ES384',
+                             'ES521', 'ES512', 'PS256', 'PS384', 'PS512'])
+
+
+def get_default_algorithms():
+    """
+    Returns the algorithms that are implemented by the library.
+    """
+    default_algorithms = {
+        'none': NoneAlgorithm(),
+        'HS256': HMACAlgorithm(HMACAlgorithm.SHA256),
+        'HS384': HMACAlgorithm(HMACAlgorithm.SHA384),
+        'HS512': HMACAlgorithm(HMACAlgorithm.SHA512)
+    }
+
+    if has_crypto:
+        default_algorithms.update({
+            'RS256': RSAAlgorithm(RSAAlgorithm.SHA256),
+            'RS384': RSAAlgorithm(RSAAlgorithm.SHA384),
+            'RS512': RSAAlgorithm(RSAAlgorithm.SHA512),
+            'ES256': ECAlgorithm(ECAlgorithm.SHA256),
+            'ES384': ECAlgorithm(ECAlgorithm.SHA384),
+            'ES521': ECAlgorithm(ECAlgorithm.SHA512),
+            'ES512': ECAlgorithm(ECAlgorithm.SHA512),  # Backward compat for #219 fix
+            'PS256': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA256),
+            'PS384': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA384),
+            'PS512': RSAPSSAlgorithm(RSAPSSAlgorithm.SHA512)
+        })
+
+    return default_algorithms
+
+
+class Algorithm(object):
+    """
+    The interface for an algorithm used to sign and verify tokens.
+    """
+    def prepare_key(self, key):
+        """
+        Performs necessary validation and conversions on the key and returns
+        the key value in the proper format for sign() and verify().
+        """
+        raise NotImplementedError
+
+    def sign(self, msg, key):
+        """
+        Returns a digital signature for the specified message
+        using the specified key value.
+        """
+        raise NotImplementedError
+
+    def verify(self, msg, key, sig):
+        """
+        Verifies that the specified digital signature is valid
+        for the specified message and key values.
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def to_jwk(key_obj):
+        """
+        Serializes a given RSA key into a JWK
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def from_jwk(jwk):
+        """
+        Deserializes a given RSA key from JWK back into a PublicKey or PrivateKey object
+        """
+        raise NotImplementedError
+
+
+class NoneAlgorithm(Algorithm):
+    """
+    Placeholder for use when no signing or verification
+    operations are required.
+    """
+    def prepare_key(self, key):
+        if key == '':
+            key = None
+
+        if key is not None:
+            raise InvalidKeyError('When alg = "none", key value must be None.')
+
+        return key
+
+    def sign(self, msg, key):
+        return b''
+
+    def verify(self, msg, key, sig):
+        return False
+
+
+class HMACAlgorithm(Algorithm):
+    """
+    Performs signing and verification operations using HMAC
+    and the specified hash function.
+    """
+    SHA256 = hashlib.sha256
+    SHA384 = hashlib.sha384
+    SHA512 = hashlib.sha512
+
+    def __init__(self, hash_alg):
+        self.hash_alg = hash_alg
+
+    def prepare_key(self, key):
+        key = force_bytes(key)
+
+        invalid_strings = [
+            b'-----BEGIN PUBLIC KEY-----',
+            b'-----BEGIN CERTIFICATE-----',
+            b'ssh-rsa'
+        ]
+
+        if any([string_value in key for string_value in invalid_strings]):
+            raise InvalidKeyError(
+                'The specified key is an asymmetric key or x509 certificate and'
+                ' should not be used as an HMAC secret.')
+
+        return key
+
+    @staticmethod
+    def to_jwk(key_obj):
+        return json.dumps({
+            'k': force_unicode(base64url_encode(force_bytes(key_obj))),
+            'kty': 'oct'
+        })
+
+    @staticmethod
+    def from_jwk(jwk):
+        obj = json.loads(jwk)
+
+        if obj.get('kty') != 'oct':
+            raise InvalidKeyError('Not an HMAC key')
+
+        return base64url_decode(obj['k'])
+
+    def sign(self, msg, key):
+        return hmac.new(key, msg, self.hash_alg).digest()
+
+    def verify(self, msg, key, sig):
+        return constant_time_compare(sig, self.sign(msg, key))
+
+
+if has_crypto:
+
+    class RSAAlgorithm(Algorithm):
+        """
+        Performs signing and verification operations using
+        RSASSA-PKCS-v1_5 and the specified hash function.
+        """
+        SHA256 = hashes.SHA256
+        SHA384 = hashes.SHA384
+        SHA512 = hashes.SHA512
+
+        def __init__(self, hash_alg):
+            self.hash_alg = hash_alg
+
+        def prepare_key(self, key):
+            if isinstance(key, RSAPrivateKey) or \
+               isinstance(key, RSAPublicKey):
+                return key
+
+            if isinstance(key, string_types):
+                key = force_bytes(key)
+
+                try:
+                    if key.startswith(b'ssh-rsa'):
+                        key = load_ssh_public_key(key, backend=default_backend())
+                    else:
+                        key = load_pem_private_key(key, password=None, backend=default_backend())
+                except ValueError:
+                    key = load_pem_public_key(key, backend=default_backend())
+            else:
+                raise TypeError('Expecting a PEM-formatted key.')
+
+            return key
+
+        @staticmethod
+        def to_jwk(key_obj):
+            obj = None
+
+            if getattr(key_obj, 'private_numbers', None):
+                # Private key
+                numbers = key_obj.private_numbers()
+
+                obj = {
+                    'kty': 'RSA',
+                    'key_ops': ['sign'],
+                    'n': force_unicode(to_base64url_uint(numbers.public_numbers.n)),
+                    'e': force_unicode(to_base64url_uint(numbers.public_numbers.e)),
+                    'd': force_unicode(to_base64url_uint(numbers.d)),
+                    'p': force_unicode(to_base64url_uint(numbers.p)),
+                    'q': force_unicode(to_base64url_uint(numbers.q)),
+                    'dp': force_unicode(to_base64url_uint(numbers.dmp1)),
+                    'dq': force_unicode(to_base64url_uint(numbers.dmq1)),
+                    'qi': force_unicode(to_base64url_uint(numbers.iqmp))
+                }
+
+            elif getattr(key_obj, 'verifier', None):
+                # Public key
+                numbers = key_obj.public_numbers()
+
+                obj = {
+                    'kty': 'RSA',
+                    'key_ops': ['verify'],
+                    'n': force_unicode(to_base64url_uint(numbers.n)),
+                    'e': force_unicode(to_base64url_uint(numbers.e))
+                }
+            else:
+                raise InvalidKeyError('Not a public or private key')
+
+            return json.dumps(obj)
+
+        @staticmethod
+        def from_jwk(jwk):
+            try:
+                obj = json.loads(jwk)
+            except ValueError:
+                raise InvalidKeyError('Key is not valid JSON')
+
+            if obj.get('kty') != 'RSA':
+                raise InvalidKeyError('Not an RSA key')
+
+            if 'd' in obj and 'e' in obj and 'n' in obj:
+                # Private key
+                if 'oth' in obj:
+                    raise InvalidKeyError('Unsupported RSA private key: > 2 primes not supported')
+
+                other_props = ['p', 'q', 'dp', 'dq', 'qi']
+                props_found = [prop in obj for prop in other_props]
+                any_props_found = any(props_found)
+
+                if any_props_found and not all(props_found):
+                    raise InvalidKeyError('RSA key must include all parameters if any are present besides d')
+
+                public_numbers = RSAPublicNumbers(
+                    from_base64url_uint(obj['e']), from_base64url_uint(obj['n'])
+                )
+
+                if any_props_found:
+                    numbers = RSAPrivateNumbers(
+                        d=from_base64url_uint(obj['d']),
+                        p=from_base64url_uint(obj['p']),
+                        q=from_base64url_uint(obj['q']),
+                        dmp1=from_base64url_uint(obj['dp']),
+                        dmq1=from_base64url_uint(obj['dq']),
+                        iqmp=from_base64url_uint(obj['qi']),
+                        public_numbers=public_numbers
+                    )
+                else:
+                    d = from_base64url_uint(obj['d'])
+                    p, q = rsa_recover_prime_factors(
+                        public_numbers.n, d, public_numbers.e
+                    )
+
+                    numbers = RSAPrivateNumbers(
+                        d=d,
+                        p=p,
+                        q=q,
+                        dmp1=rsa_crt_dmp1(d, p),
+                        dmq1=rsa_crt_dmq1(d, q),
+                        iqmp=rsa_crt_iqmp(p, q),
+                        public_numbers=public_numbers
+                    )
+
+                return numbers.private_key(default_backend())
+            elif 'n' in obj and 'e' in obj:
+                # Public key
+                numbers = RSAPublicNumbers(
+                    from_base64url_uint(obj['e']), from_base64url_uint(obj['n'])
+                )
+
+                return numbers.public_key(default_backend())
+            else:
+                raise InvalidKeyError('Not a public or private key')
+
+        def sign(self, msg, key):
+            signer = key.signer(
+                padding.PKCS1v15(),
+                self.hash_alg()
+            )
+
+            signer.update(msg)
+            return signer.finalize()
+
+        def verify(self, msg, key, sig):
+            verifier = key.verifier(
+                sig,
+                padding.PKCS1v15(),
+                self.hash_alg()
+            )
+
+            verifier.update(msg)
+
+            try:
+                verifier.verify()
+                return True
+            except InvalidSignature:
+                return False
+
+    class ECAlgorithm(Algorithm):
+        """
+        Performs signing and verification operations using
+        ECDSA and the specified hash function
+        """
+        SHA256 = hashes.SHA256
+        SHA384 = hashes.SHA384
+        SHA512 = hashes.SHA512
+
+        def __init__(self, hash_alg):
+            self.hash_alg = hash_alg
+
+        def prepare_key(self, key):
+            if isinstance(key, EllipticCurvePrivateKey) or \
+               isinstance(key, EllipticCurvePublicKey):
+                return key
+
+            if isinstance(key, string_types):
+                key = force_bytes(key)
+
+                # Attempt to load key. We don't know if it's
+                # a Signing Key or a Verifying Key, so we try
+                # the Verifying Key first.
+                try:
+                    if key.startswith(b'ecdsa-sha2-'):
+                        key = load_ssh_public_key(key, backend=default_backend())
+                    else:
+                        key = load_pem_public_key(key, backend=default_backend())
+                except ValueError:
+                    key = load_pem_private_key(key, password=None, backend=default_backend())
+
+            else:
+                raise TypeError('Expecting a PEM-formatted key.')
+
+            return key
+
+        def sign(self, msg, key):
+            signer = key.signer(ec.ECDSA(self.hash_alg()))
+
+            signer.update(msg)
+            der_sig = signer.finalize()
+
+            return der_to_raw_signature(der_sig, key.curve)
+
+        def verify(self, msg, key, sig):
+            try:
+                der_sig = raw_to_der_signature(sig, key.curve)
+            except ValueError:
+                return False
+
+            verifier = key.verifier(der_sig, ec.ECDSA(self.hash_alg()))
+
+            verifier.update(msg)
+
+            try:
+                verifier.verify()
+                return True
+            except InvalidSignature:
+                return False
+
+    class RSAPSSAlgorithm(RSAAlgorithm):
+        """
+        Performs a signature using RSASSA-PSS with MGF1
+        """
+
+        def sign(self, msg, key):
+            signer = key.signer(
+                padding.PSS(
+                    mgf=padding.MGF1(self.hash_alg()),
+                    salt_length=self.hash_alg.digest_size
+                ),
+                self.hash_alg()
+            )
+
+            signer.update(msg)
+            return signer.finalize()
+
+        def verify(self, msg, key, sig):
+            verifier = key.verifier(
+                sig,
+                padding.PSS(
+                    mgf=padding.MGF1(self.hash_alg()),
+                    salt_length=self.hash_alg.digest_size
+                ),
+                self.hash_alg()
+            )
+
+            verifier.update(msg)
+
+            try:
+                verifier.verify()
+                return True
+            except InvalidSignature:
+                return False
diff --git a/lib/jwt/api_jws.py b/lib/jwt/api_jws.py
new file mode 100644
index 0000000000000000000000000000000000000000..66b6a675e0e55155797c278bf0463de37fbe0e30
--- /dev/null
+++ b/lib/jwt/api_jws.py
@@ -0,0 +1,215 @@
+import binascii
+import json
+import warnings
+
+from collections import Mapping
+
+from .algorithms import (
+    Algorithm, get_default_algorithms, has_crypto, requires_cryptography  # NOQA
+)
+from .compat import binary_type, string_types, text_type
+from .exceptions import DecodeError, InvalidAlgorithmError, InvalidTokenError
+from .utils import base64url_decode, base64url_encode, force_bytes, merge_dict
+
+
+class PyJWS(object):
+    header_typ = 'JWT'
+
+    def __init__(self, algorithms=None, options=None):
+        self._algorithms = get_default_algorithms()
+        self._valid_algs = (set(algorithms) if algorithms is not None
+                            else set(self._algorithms))
+
+        # Remove algorithms that aren't on the whitelist
+        for key in list(self._algorithms.keys()):
+            if key not in self._valid_algs:
+                del self._algorithms[key]
+
+        if not options:
+            options = {}
+
+        self.options = merge_dict(self._get_default_options(), options)
+
+    @staticmethod
+    def _get_default_options():
+        return {
+            'verify_signature': True
+        }
+
+    def register_algorithm(self, alg_id, alg_obj):
+        """
+        Registers a new Algorithm for use when creating and verifying tokens.
+        """
+        if alg_id in self._algorithms:
+            raise ValueError('Algorithm already has a handler.')
+
+        if not isinstance(alg_obj, Algorithm):
+            raise TypeError('Object is not of type `Algorithm`')
+
+        self._algorithms[alg_id] = alg_obj
+        self._valid_algs.add(alg_id)
+
+    def unregister_algorithm(self, alg_id):
+        """
+        Unregisters an Algorithm for use when creating and verifying tokens
+        Throws KeyError if algorithm is not registered.
+        """
+        if alg_id not in self._algorithms:
+            raise KeyError('The specified algorithm could not be removed'
+                           ' because it is not registered.')
+
+        del self._algorithms[alg_id]
+        self._valid_algs.remove(alg_id)
+
+    def get_algorithms(self):
+        """
+        Returns a list of supported values for the 'alg' parameter.
+        """
+        return list(self._valid_algs)
+
+    def encode(self, payload, key, algorithm='HS256', headers=None,
+               json_encoder=None):
+        segments = []
+
+        if algorithm is None:
+            algorithm = 'none'
+
+        if algorithm not in self._valid_algs:
+            pass
+
+        # Header
+        header = {'typ': self.header_typ, 'alg': algorithm}
+
+        if headers:
+            self._validate_headers(headers)
+            header.update(headers)
+
+        json_header = force_bytes(
+            json.dumps(
+                header,
+                separators=(',', ':'),
+                cls=json_encoder
+            )
+        )
+
+        segments.append(base64url_encode(json_header))
+        segments.append(base64url_encode(payload))
+
+        # Segments
+        signing_input = b'.'.join(segments)
+        try:
+            alg_obj = self._algorithms[algorithm]
+            key = alg_obj.prepare_key(key)
+            signature = alg_obj.sign(signing_input, key)
+
+        except KeyError:
+            if not has_crypto and algorithm in requires_cryptography:
+                raise NotImplementedError(
+                    "Algorithm '%s' could not be found. Do you have cryptography "
+                    "installed?" % algorithm
+                )
+            else:
+                raise NotImplementedError('Algorithm not supported')
+
+        segments.append(base64url_encode(signature))
+
+        return b'.'.join(segments)
+
+    def decode(self, jws, key='', verify=True, algorithms=None, options=None,
+               **kwargs):
+        payload, signing_input, header, signature = self._load(jws)
+
+        if verify:
+            merged_options = merge_dict(self.options, options)
+            if merged_options.get('verify_signature'):
+                self._verify_signature(payload, signing_input, header, signature,
+                                       key, algorithms)
+        else:
+            warnings.warn('The verify parameter is deprecated. '
+                          'Please use options instead.', DeprecationWarning)
+
+        return payload
+
+    def get_unverified_header(self, jwt):
+        """Returns back the JWT header parameters as a dict()
+
+        Note: The signature is not verified so the header parameters
+        should not be fully trusted until signature verification is complete
+        """
+        headers = self._load(jwt)[2]
+        self._validate_headers(headers)
+
+        return headers
+
+    def _load(self, jwt):
+        if isinstance(jwt, text_type):
+            jwt = jwt.encode('utf-8')
+
+        if not issubclass(type(jwt), binary_type):
+            raise DecodeError("Invalid token type. Token must be a {0}".format(
+                binary_type))
+
+        try:
+            signing_input, crypto_segment = jwt.rsplit(b'.', 1)
+            header_segment, payload_segment = signing_input.split(b'.', 1)
+        except ValueError:
+            raise DecodeError('Not enough segments')
+
+        try:
+            header_data = base64url_decode(header_segment)
+        except (TypeError, binascii.Error):
+            raise DecodeError('Invalid header padding')
+
+        try:
+            header = json.loads(header_data.decode('utf-8'))
+        except ValueError as e:
+            raise DecodeError('Invalid header string: %s' % e)
+
+        if not isinstance(header, Mapping):
+            raise DecodeError('Invalid header string: must be a json object')
+
+        try:
+            payload = base64url_decode(payload_segment)
+        except (TypeError, binascii.Error):
+            raise DecodeError('Invalid payload padding')
+
+        try:
+            signature = base64url_decode(crypto_segment)
+        except (TypeError, binascii.Error):
+            raise DecodeError('Invalid crypto padding')
+
+        return (payload, signing_input, header, signature)
+
+    def _verify_signature(self, payload, signing_input, header, signature,
+                          key='', algorithms=None):
+
+        alg = header.get('alg')
+
+        if algorithms is not None and alg not in algorithms:
+            raise InvalidAlgorithmError('The specified alg value is not allowed')
+
+        try:
+            alg_obj = self._algorithms[alg]
+            key = alg_obj.prepare_key(key)
+
+            if not alg_obj.verify(signing_input, key, signature):
+                raise DecodeError('Signature verification failed')
+
+        except KeyError:
+            raise InvalidAlgorithmError('Algorithm not supported')
+
+    def _validate_headers(self, headers):
+        if 'kid' in headers:
+            self._validate_kid(headers['kid'])
+
+    def _validate_kid(self, kid):
+        if not isinstance(kid, string_types):
+            raise InvalidTokenError('Key ID header parameter must be a string')
+
+
+_jws_global_obj = PyJWS()
+encode = _jws_global_obj.encode
+decode = _jws_global_obj.decode
+register_algorithm = _jws_global_obj.register_algorithm
+unregister_algorithm = _jws_global_obj.unregister_algorithm
+get_unverified_header = _jws_global_obj.get_unverified_header
diff --git a/lib/jwt/api_jwt.py b/lib/jwt/api_jwt.py
new file mode 100644
index 0000000000000000000000000000000000000000..bca68231146e54135da4e36e6715275b2c2567e8
--- /dev/null
+++ b/lib/jwt/api_jwt.py
@@ -0,0 +1,183 @@
+import json
+import warnings
+
+from calendar import timegm
+from collections import Mapping
+from datetime import datetime, timedelta
+
+from .api_jws import PyJWS
+from .algorithms import Algorithm, get_default_algorithms  # NOQA
+from .compat import string_types, timedelta_total_seconds
+from .exceptions import (
+    DecodeError, ExpiredSignatureError, ImmatureSignatureError,
+    InvalidAudienceError, InvalidIssuedAtError,
+    InvalidIssuerError, MissingRequiredClaimError
+)
+from .utils import merge_dict
+
+
+class PyJWT(PyJWS):
+    header_type = 'JWT'
+
+    @staticmethod
+    def _get_default_options():
+        return {
+            'verify_signature': True,
+            'verify_exp': True,
+            'verify_nbf': True,
+            'verify_iat': True,
+            'verify_aud': True,
+            'verify_iss': True,
+            'require_exp': False,
+            'require_iat': False,
+            'require_nbf': False
+        }
+
+    def encode(self, payload, key, algorithm='HS256', headers=None,
+               json_encoder=None):
+        # Check that we get a mapping
+        if not isinstance(payload, Mapping):
+            raise TypeError('Expecting a mapping object, as JWT only supports '
+                            'JSON objects as payloads.')
+
+        # Payload
+        for time_claim in ['exp', 'iat', 'nbf']:
+            # Convert datetime to a intDate value in known time-format claims
+            if isinstance(payload.get(time_claim), datetime):
+                payload[time_claim] = timegm(payload[time_claim].utctimetuple())
+
+        json_payload = json.dumps(
+            payload,
+            separators=(',', ':'),
+            cls=json_encoder
+        ).encode('utf-8')
+
+        return super(PyJWT, self).encode(
+            json_payload, key, algorithm, headers, json_encoder
+        )
+
+    def decode(self, jwt, key='', verify=True, algorithms=None, options=None,
+               **kwargs):
+        payload, signing_input, header, signature = self._load(jwt)
+
+        decoded = super(PyJWT, self).decode(jwt, key, verify, algorithms,
+                                            options, **kwargs)
+
+        try:
+            payload = json.loads(decoded.decode('utf-8'))
+        except ValueError as e:
+            raise DecodeError('Invalid payload string: %s' % e)
+        if not isinstance(payload, Mapping):
+            raise DecodeError('Invalid payload string: must be a json object')
+
+        if verify:
+            merged_options = merge_dict(self.options, options)
+            self._validate_claims(payload, merged_options, **kwargs)
+
+        return payload
+
+    def _validate_claims(self, payload, options, audience=None, issuer=None,
+                         leeway=0, **kwargs):
+
+        if 'verify_expiration' in kwargs:
+            options['verify_exp'] = kwargs.get('verify_expiration', True)
+            warnings.warn('The verify_expiration parameter is deprecated. '
+                          'Please use options instead.', DeprecationWarning)
+
+        if isinstance(leeway, timedelta):
+            leeway = timedelta_total_seconds(leeway)
+
+        if not isinstance(audience, (string_types, type(None))):
+            raise TypeError('audience must be a string or None')
+
+        self._validate_required_claims(payload, options)
+
+        now = timegm(datetime.utcnow().utctimetuple())
+
+        if 'iat' in payload and options.get('verify_iat'):
+            self._validate_iat(payload, now, leeway)
+
+        if 'nbf' in payload and options.get('verify_nbf'):
+            self._validate_nbf(payload, now, leeway)
+
+        if 'exp' in payload and options.get('verify_exp'):
+            self._validate_exp(payload, now, leeway)
+
+        if options.get('verify_iss'):
+            self._validate_iss(payload, issuer)
+
+        if options.get('verify_aud'):
+            self._validate_aud(payload, audience)
+
+    def _validate_required_claims(self, payload, options):
+        if options.get('require_exp') and payload.get('exp') is None:
+            raise MissingRequiredClaimError('exp')
+
+        if options.get('require_iat') and payload.get('iat') is None:
+            raise MissingRequiredClaimError('iat')
+
+        if options.get('require_nbf') and payload.get('nbf') is None:
+            raise MissingRequiredClaimError('nbf')
+
+    def _validate_iat(self, payload, now, leeway):
+        try:
+            int(payload['iat'])
+        except ValueError:
+            raise InvalidIssuedAtError('Issued At claim (iat) must be an integer.')
+
+    def _validate_nbf(self, payload, now, leeway):
+        try:
+            nbf = int(payload['nbf'])
+        except ValueError:
+            raise DecodeError('Not Before claim (nbf) must be an integer.')
+
+        if nbf > (now + leeway):
+            raise ImmatureSignatureError('The token is not yet valid (nbf)')
+
+    def _validate_exp(self, payload, now, leeway):
+        try:
+            exp = int(payload['exp'])
+        except ValueError:
+            raise DecodeError('Expiration Time claim (exp) must be an'
+                              ' integer.')
+
+        if exp < (now - leeway):
+            raise ExpiredSignatureError('Signature has expired')
+
+    def _validate_aud(self, payload, audience):
+        if audience is None and 'aud' not in payload:
+            return
+
+        if audience is not None and 'aud' not in payload:
+            # Application specified an audience, but it could not be
+            # verified since the token does not contain a claim.
+            raise MissingRequiredClaimError('aud')
+
+        audience_claims = payload['aud']
+
+        if isinstance(audience_claims, string_types):
+            audience_claims = [audience_claims]
+        if not isinstance(audience_claims, list):
+            raise InvalidAudienceError('Invalid claim format in token')
+        if any(not isinstance(c, string_types) for c in audience_claims):
+            raise InvalidAudienceError('Invalid claim format in token')
+        if audience not in audience_claims:
+            raise InvalidAudienceError('Invalid audience')
+
+    def _validate_iss(self, payload, issuer):
+        if issuer is None:
+            return
+
+        if 'iss' not in payload:
+            raise MissingRequiredClaimError('iss')
+
+        if payload['iss'] != issuer:
+            raise InvalidIssuerError('Invalid issuer')
+
+
+_jwt_global_obj = PyJWT()
+encode = _jwt_global_obj.encode
+decode = _jwt_global_obj.decode
+register_algorithm = _jwt_global_obj.register_algorithm
+unregister_algorithm = _jwt_global_obj.unregister_algorithm
+get_unverified_header = _jwt_global_obj.get_unverified_header
diff --git a/lib/jwt/compat.py b/lib/jwt/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..b928c7d2ebd987351e4d7ea1e99377a97788be19
--- /dev/null
+++ b/lib/jwt/compat.py
@@ -0,0 +1,76 @@
+"""
+The `compat` module provides support for backwards compatibility with older
+versions of python, and compatibility wrappers around optional packages.
+"""
+# flake8: noqa
+import hmac
+import struct
+import sys
+
+
+PY3 = sys.version_info[0] == 3
+
+
+if PY3:
+    text_type = str
+    binary_type = bytes
+else:
+    text_type = unicode
+    binary_type = str
+
+string_types = (text_type, binary_type)
+
+
+def timedelta_total_seconds(delta):
+    try:
+        delta.total_seconds
+    except AttributeError:
+        # On Python 2.6, timedelta instances do not have
+        # a .total_seconds() method.
+        total_seconds = delta.days * 24 * 60 * 60 + delta.seconds
+    else:
+        total_seconds = delta.total_seconds()
+
+    return total_seconds
+
+
+try:
+    constant_time_compare = hmac.compare_digest
+except AttributeError:
+    # Fallback for Python < 2.7
+    def constant_time_compare(val1, val2):
+        """
+        Returns True if the two strings are equal, False otherwise.
+
+        The time taken is independent of the number of characters that match.
+        """
+        if len(val1) != len(val2):
+            return False
+
+        result = 0
+
+        for x, y in zip(val1, val2):
+            result |= ord(x) ^ ord(y)
+
+        return result == 0
+
+# Use int.to_bytes if it exists (Python 3)
+if getattr(int, 'to_bytes', None):
+    def bytes_from_int(val):
+        remaining = val
+        byte_length = 0
+
+        while remaining != 0:
+            remaining = remaining >> 8
+            byte_length += 1
+
+        return val.to_bytes(byte_length, 'big', signed=False)
+else:
+    def bytes_from_int(val):
+        buf = []
+        while val:
+            val, remainder = divmod(val, 256)
+            buf.append(remainder)
+
+        buf.reverse()
+        return struct.pack('%sB' % len(buf), *buf)
diff --git a/lib/dateutil/test/__init__.py b/lib/jwt/contrib/__init__.py
similarity index 100%
rename from lib/dateutil/test/__init__.py
rename to lib/jwt/contrib/__init__.py
diff --git a/lib/jwt/contrib/algorithms/__init__.py b/lib/jwt/contrib/algorithms/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/jwt/contrib/algorithms/py_ecdsa.py b/lib/jwt/contrib/algorithms/py_ecdsa.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf0dea5ae28b3be0b40f0da444072acc84590e55
--- /dev/null
+++ b/lib/jwt/contrib/algorithms/py_ecdsa.py
@@ -0,0 +1,60 @@
+# Note: This file is named py_ecdsa.py because import behavior in Python 2
+# would cause ecdsa.py to squash the ecdsa library that it depends upon.
+
+import hashlib
+
+import ecdsa
+
+from jwt.algorithms import Algorithm
+from jwt.compat import string_types, text_type
+
+
+class ECAlgorithm(Algorithm):
+    """
+    Performs signing and verification operations using
+    ECDSA and the specified hash function
+
+    This class requires the ecdsa package to be installed.
+
+    This is based off of the implementation in PyJWT 0.3.2
+    """
+    SHA256 = hashlib.sha256
+    SHA384 = hashlib.sha384
+    SHA512 = hashlib.sha512
+
+    def __init__(self, hash_alg):
+        self.hash_alg = hash_alg
+
+    def prepare_key(self, key):
+
+        if isinstance(key, ecdsa.SigningKey) or \
+           isinstance(key, ecdsa.VerifyingKey):
+            return key
+
+        if isinstance(key, string_types):
+            if isinstance(key, text_type):
+                key = key.encode('utf-8')
+
+            # Attempt to load key. We don't know if it's
+            # a Signing Key or a Verifying Key, so we try
+            # the Verifying Key first.
+            try:
+                key = ecdsa.VerifyingKey.from_pem(key)
+            except ecdsa.der.UnexpectedDER:
+                key = ecdsa.SigningKey.from_pem(key)
+
+        else:
+            raise TypeError('Expecting a PEM-formatted key.')
+
+        return key
+
+    def sign(self, msg, key):
+        return key.sign(msg, hashfunc=self.hash_alg,
+                        sigencode=ecdsa.util.sigencode_string)
+
+    def verify(self, msg, key, sig):
+        try:
+            return key.verify(sig, msg, hashfunc=self.hash_alg,
+                              sigdecode=ecdsa.util.sigdecode_string)
+        except AssertionError:
+            return False
diff --git a/lib/jwt/contrib/algorithms/pycrypto.py b/lib/jwt/contrib/algorithms/pycrypto.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6afaa59dd037cebab55170195596ce1e8973813
--- /dev/null
+++ b/lib/jwt/contrib/algorithms/pycrypto.py
@@ -0,0 +1,47 @@
+import Crypto.Hash.SHA256
+import Crypto.Hash.SHA384
+import Crypto.Hash.SHA512
+
+from Crypto.PublicKey import RSA
+from Crypto.Signature import PKCS1_v1_5
+
+from jwt.algorithms import Algorithm
+from jwt.compat import string_types, text_type
+
+
+class RSAAlgorithm(Algorithm):
+    """
+    Performs signing and verification operations using
+    RSASSA-PKCS-v1_5 and the specified hash function.
+
+    This class requires PyCrypto package to be installed.
+
+    This is based off of the implementation in PyJWT 0.3.2
+    """
+    SHA256 = Crypto.Hash.SHA256
+    SHA384 = Crypto.Hash.SHA384
+    SHA512 = Crypto.Hash.SHA512
+
+    def __init__(self, hash_alg):
+        self.hash_alg = hash_alg
+
+    def prepare_key(self, key):
+
+        if isinstance(key, RSA._RSAobj):
+            return key
+
+        if isinstance(key, string_types):
+            if isinstance(key, text_type):
+                key = key.encode('utf-8')
+
+            key = RSA.importKey(key)
+        else:
+            raise TypeError('Expecting a PEM- or RSA-formatted key.')
+
+        return key
+
+    def sign(self, msg, key):
+        return PKCS1_v1_5.new(key).sign(self.hash_alg.new(msg))
+
+    def verify(self, msg, key, sig):
+        return PKCS1_v1_5.new(key).verify(self.hash_alg.new(msg), sig)
diff --git a/lib/jwt/exceptions.py b/lib/jwt/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..31177a0a2f8fda4f25fe408a3965f4c9ea4e5d5a
--- /dev/null
+++ b/lib/jwt/exceptions.py
@@ -0,0 +1,48 @@
+class InvalidTokenError(Exception):
+    pass
+
+
+class DecodeError(InvalidTokenError):
+    pass
+
+
+class ExpiredSignatureError(InvalidTokenError):
+    pass
+
+
+class InvalidAudienceError(InvalidTokenError):
+    pass
+
+
+class InvalidIssuerError(InvalidTokenError):
+    pass
+
+
+class InvalidIssuedAtError(InvalidTokenError):
+    pass
+
+
+class ImmatureSignatureError(InvalidTokenError):
+    pass
+
+
+class InvalidKeyError(Exception):
+    pass
+
+
+class InvalidAlgorithmError(InvalidTokenError):
+    pass
+
+
+class MissingRequiredClaimError(InvalidTokenError):
+    def __init__(self, claim):
+        self.claim = claim
+
+    def __str__(self):
+        return 'Token is missing the "%s" claim' % self.claim
+
+
+# Compatibility aliases (deprecated)
+ExpiredSignature = ExpiredSignatureError
+InvalidAudience = InvalidAudienceError
+InvalidIssuer = InvalidIssuerError
diff --git a/lib/jwt/utils.py b/lib/jwt/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b33c7a2d4551b41cbd01b1ed3989a06cff05462d
--- /dev/null
+++ b/lib/jwt/utils.py
@@ -0,0 +1,113 @@
+import base64
+import binascii
+import struct
+
+from .compat import binary_type, bytes_from_int, text_type
+
+try:
+    from cryptography.hazmat.primitives.asymmetric.utils import (
+        decode_dss_signature, encode_dss_signature
+    )
+except ImportError:
+    pass
+
+
+def force_unicode(value):
+    if isinstance(value, binary_type):
+        return value.decode('utf-8')
+    elif isinstance(value, text_type):
+        return value
+    else:
+        raise TypeError('Expected a string value')
+
+
+def force_bytes(value):
+    if isinstance(value, text_type):
+        return value.encode('utf-8')
+    elif isinstance(value, binary_type):
+        return value
+    else:
+        raise TypeError('Expected a string value')
+
+
+def base64url_decode(input):
+    if isinstance(input, text_type):
+        input = input.encode('ascii')
+
+    rem = len(input) % 4
+
+    if rem > 0:
+        input += b'=' * (4 - rem)
+
+    return base64.urlsafe_b64decode(input)
+
+
+def base64url_encode(input):
+    return base64.urlsafe_b64encode(input).replace(b'=', b'')
+
+
+def to_base64url_uint(val):
+    if val < 0:
+        raise ValueError('Must be a positive integer')
+
+    int_bytes = bytes_from_int(val)
+
+    if len(int_bytes) == 0:
+        int_bytes = b'\x00'
+
+    return base64url_encode(int_bytes)
+
+
+def from_base64url_uint(val):
+    if isinstance(val, text_type):
+        val = val.encode('ascii')
+
+    data = base64url_decode(val)
+
+    buf = struct.unpack('%sB' % len(data), data)
+    return int(''.join(["%02x" % byte for byte in buf]), 16)
+
+
+def merge_dict(original, updates):
+    if not updates:
+        return original
+
+    try:
+        merged_options = original.copy()
+        merged_options.update(updates)
+    except (AttributeError, ValueError) as e:
+        raise TypeError('original and updates must be a dictionary: %s' % e)
+
+    return merged_options
+
+
+def number_to_bytes(num, num_bytes):
+    padded_hex = '%0*x' % (2 * num_bytes, num)
+    big_endian = binascii.a2b_hex(padded_hex.encode('ascii'))
+    return big_endian
+
+
+def bytes_to_number(string):
+    return int(binascii.b2a_hex(string), 16)
+
+
+def der_to_raw_signature(der_sig, curve):
+    num_bits = curve.key_size
+    num_bytes = (num_bits + 7) // 8
+
+    r, s = decode_dss_signature(der_sig)
+
+    return number_to_bytes(r, num_bytes) + number_to_bytes(s, num_bytes)
+
+
+def raw_to_der_signature(raw_sig, curve):
+    num_bits = curve.key_size
+    num_bytes = (num_bits + 7) // 8
+
+    if len(raw_sig) != 2 * num_bytes:
+        raise ValueError('Invalid signature')
+
+    r = bytes_to_number(raw_sig[:num_bytes])
+    s = bytes_to_number(raw_sig[num_bytes:])
+
+    return encode_dss_signature(r, s)
diff --git a/lib/markdown2.py b/lib/markdown2.py
index 41c8f19ddf9baad678be872a640dd64496214e34..0c77519a818ee0a71b00b3fda5c4a9604e71d489 100644
--- a/lib/markdown2.py
+++ b/lib/markdown2.py
@@ -56,6 +56,8 @@ see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
   string to use for a "class" tag attribute. Currently only supports "img",
   "table", "pre" and "code" tags. Add an issue if you require this for other
   tags.
+* link-patterns: Auto-link given regex patterns in text (e.g. bug number
+  references, revision number references).
 * markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
   have markdown processing be done on its contents. Similar to
   <http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
@@ -64,23 +66,28 @@ see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
   See <https://github.com/trentm/python-markdown2/issues/77> for details.
 * nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
   <http://en.wikipedia.org/wiki/Nofollow>.
+* numbering: Support of generic counters.  Non standard extension to
+  allow sequential numbering of figures, tables, equations, exhibits etc.
 * pyshell: Treats unindented Python interactive shell sessions as <code>
   blocks.
-* link-patterns: Auto-link given regex patterns in text (e.g. bug number
-  references, revision number references).
 * smarty-pants: Replaces ' and " with curly quotation marks or curly
   apostrophes.  Replaces --, ---, ..., and . . . with en dashes, em dashes,
   and ellipses.
 * spoiler: A special kind of blockquote commonly hidden behind a
   click on SO. Syntax per <http://meta.stackexchange.com/a/72878>.
-* toc: The returned HTML string gets a new "toc_html" attribute which is
-  a Table of Contents for the document. (experimental)
-* xml: Passes one-liner processing instructions and namespaced XML tags.
+* tag-friendly: Requires atx style headers to have a space between the # and
+  the header text. Useful for applications that require twitter style tags to
+  pass through the parser.
 * tables: Tables using the same format as GFM
   <https://help.github.com/articles/github-flavored-markdown#tables> and
   PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>.
+* toc: The returned HTML string gets a new "toc_html" attribute which is
+  a Table of Contents for the document. (experimental)
+* use-file-vars: Look for an Emacs-style markdown-extras file variable to turn
+  on Extras.
 * wiki-tables: Google Code Wiki-style tables. See
   <http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
+* xml: Passes one-liner processing instructions and namespaced XML tags.
 """
 
 # Dev Notes:
@@ -88,13 +95,11 @@ see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
 #   not yet sure if there implications with this. Compare 'pydoc sre'
 #   and 'perldoc perlre'.
 
-__version_info__ = (2, 3, 1)
+__version_info__ = (2, 3, 4)
 __version__ = '.'.join(map(str, __version_info__))
 __author__ = "Trent Mick"
 
-import os
 import sys
-from pprint import pprint, pformat
 import re
 import logging
 try:
@@ -104,17 +109,15 @@ except ImportError:
 import optparse
 from random import random, randint
 import codecs
-
-
-#---- Python version compat
-
 try:
-    from urllib.parse import quote # python3
+    from urllib import quote_plus
 except ImportError:
-    from urllib import quote # python2
+    from urllib.parse import quote_plus
+
 
-if sys.version_info[:2] < (2,4):
-    from sets import Set as set
+# ---- Python version compat
+
+if sys.version_info[:2] < (2, 4):
     def reversed(sequence):
         for i in sequence[::-1]:
             yield i
@@ -132,9 +135,7 @@ elif sys.version_info[0] >= 3:
     unicode = str
     base_string_type = str
 
-
-
-#---- globals
+# ---- globals
 
 DEBUG = False
 log = logging.getLogger("markdown")
@@ -151,19 +152,17 @@ g_escape_table = dict([(ch, _hash_text(ch))
     for ch in '\\`*_{}[]()>#+-.!'])
 
 
-
-#---- exceptions
-
+# ---- exceptions
 class MarkdownError(Exception):
     pass
 
 
-
-#---- public api
+# ---- public api
 
 def markdown_path(path, encoding="utf-8",
                   html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
                   safe_mode=None, extras=None, link_patterns=None,
+                  footnote_title=None, footnote_return_symbol=None,
                   use_file_vars=False):
     fp = codecs.open(path, 'r', encoding)
     text = fp.read()
@@ -171,16 +170,23 @@ def markdown_path(path, encoding="utf-8",
     return Markdown(html4tags=html4tags, tab_width=tab_width,
                     safe_mode=safe_mode, extras=extras,
                     link_patterns=link_patterns,
+                    footnote_title=footnote_title,
+                    footnote_return_symbol=footnote_return_symbol,
                     use_file_vars=use_file_vars).convert(text)
 
+
 def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
              safe_mode=None, extras=None, link_patterns=None,
+             footnote_title=None, footnote_return_symbol=None,
              use_file_vars=False):
     return Markdown(html4tags=html4tags, tab_width=tab_width,
                     safe_mode=safe_mode, extras=extras,
                     link_patterns=link_patterns,
+                    footnote_title=footnote_title,
+                    footnote_return_symbol=footnote_return_symbol,
                     use_file_vars=use_file_vars).convert(text)
 
+
 class Markdown(object):
     # The dict of "extras" to enable in processing -- a mapping of
     # extra name to argument for the extra. Most extras do not have an
@@ -203,7 +209,9 @@ class Markdown(object):
     _ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
 
     def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
-                 extras=None, link_patterns=None, use_file_vars=False):
+                 extras=None, link_patterns=None,
+                 footnote_title=None, footnote_return_symbol=None,
+                 use_file_vars=False):
         if html4tags:
             self.empty_element_suffix = ">"
         else:
@@ -228,11 +236,13 @@ class Markdown(object):
                 extras = dict([(e, None) for e in extras])
             self.extras.update(extras)
         assert isinstance(self.extras, dict)
-        if "toc" in self.extras and not "header-ids" in self.extras:
+        if "toc" in self.extras and "header-ids" not in self.extras:
             self.extras["header-ids"] = None   # "toc" implies "header-ids"
         self._instance_extras = self.extras.copy()
 
         self.link_patterns = link_patterns
+        self.footnote_title = footnote_title
+        self.footnote_return_symbol = footnote_return_symbol
         self.use_file_vars = use_file_vars
         self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
 
@@ -252,13 +262,28 @@ class Markdown(object):
             self.footnotes = {}
             self.footnote_ids = []
         if "header-ids" in self.extras:
-            self._count_from_header_id = {} # no `defaultdict` in Python 2.4
+            self._count_from_header_id = {}  # no `defaultdict` in Python 2.4
         if "metadata" in self.extras:
             self.metadata = {}
 
     # Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
     # should only be used in <a> tags with an "href" attribute.
-    _a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
+    _a_nofollow = re.compile(r"""
+        <(a)
+        (
+            [^>]*
+            href=   # href is required
+            ['"]?   # HTML5 attribute values do not have to be quoted
+            [^#'"]  # We don't want to match href values that start with # (like footnotes)
+        )
+        """,
+        re.IGNORECASE | re.VERBOSE
+    )
+
+    # Opens the linked document in a new window or tab
+    # should only used in <a> tags with an "href" attribute.
+    # same with _a_nofollow
+    _a_blank = _a_nofollow
 
     def convert(self, text):
         """Convert the given text."""
@@ -274,7 +299,7 @@ class Markdown(object):
         self.reset()
 
         if not isinstance(text, unicode):
-            #TODO: perhaps shouldn't presume UTF-8 for string input?
+            # TODO: perhaps shouldn't presume UTF-8 for string input?
             text = unicode(text, 'utf-8')
 
         if self.use_file_vars:
@@ -294,7 +319,8 @@ class Markdown(object):
                     self.extras[ename] = earg
 
         # Standardize line endings:
-        text = re.sub("\r\n|\r", "\n", text)
+        text = text.replace("\r\n", "\n")
+        text = text.replace("\r", "\n")
 
         # Make sure $text ends with a couple of newlines:
         text += "\n\n"
@@ -326,6 +352,11 @@ class Markdown(object):
         if "fenced-code-blocks" in self.extras and self.safe_mode:
             text = self._do_fenced_code_blocks(text)
 
+        # Because numbering references aren't links (yet?) then we can do everything associated with counters
+        # before we get started
+        if "numbering" in self.extras:
+            text = self._do_numbering(text)
+
         # Strip link definitions, store in hashes.
         if "footnotes" in self.extras:
             # Must do footnotes first because an unlucky footnote defn
@@ -349,6 +380,9 @@ class Markdown(object):
         if "nofollow" in self.extras:
             text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
 
+        if "target-blank-links" in self.extras:
+            text = self._a_blank.sub(r'<\1 target="_blank"\2', text)
+
         text += "\n"
 
         rv = UnicodeWithAttrs(text)
@@ -372,30 +406,53 @@ class Markdown(object):
         """
         return text
 
-    # Is metadata if the content starts with '---'-fenced `key: value`
+    # Is metadata if the content starts with optional '---'-fenced `key: value`
     # pairs. E.g. (indented for presentation):
     #   ---
     #   foo: bar
     #   another-var: blah blah
     #   ---
-    _metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
+    #   # header
+    # or:
+    #   foo: bar
+    #   another-var: blah blah
+    #
+    #   # header
+    _meta_data_pattern = re.compile(r'^(?:---[\ \t]*\n)?(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)|([\S\w]+\s*:(?! >)[ \t]*.*\n?)(?:---[\ \t]*\n)?', re.MULTILINE)
+    _key_val_pat = re.compile("[\S\w]+\s*:(?! >)[ \t]*.*\n?", re.MULTILINE)
+    # this allows key: >
+    #                   value
+    #                   conutiues over multiple lines
+    _key_val_block_pat = re.compile(
+        "(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)", re.MULTILINE)
+    _meta_data_fence_pattern = re.compile(r'^---[\ \t]*\n', re.MULTILINE)
+    _meta_data_newline = re.compile("^\n", re.MULTILINE)
 
     def _extract_metadata(self, text):
-        # fast test
-        if not text.startswith("---"):
-            return text
-        match = self._metadata_pat.match(text)
-        if not match:
-            return text
+        if text.startswith("---"):
+            fence_splits = re.split(self._meta_data_fence_pattern, text, maxsplit=2)
+            metadata_content = fence_splits[1]
+            match = re.findall(self._meta_data_pattern, metadata_content)
+            if not match:
+                return text
+            tail = fence_splits[2]
+        else:
+            metadata_split = re.split(self._meta_data_newline, text, maxsplit=1)
+            metadata_content = metadata_split[0]
+            match = re.findall(self._meta_data_pattern, metadata_content)
+            if not match:
+                return text
+            tail = metadata_split[1]
 
-        tail = text[len(match.group(0)):]
-        metadata_str = match.group(1).strip()
-        for line in metadata_str.split('\n'):
-            key, value = line.split(':', 1)
-            self.metadata[key.strip()] = value.strip()
+        kv = re.findall(self._key_val_pat, text)
+        kvm = re.findall(self._key_val_block_pat, text)
+        kvm = [item.replace(": >\n", ":", 1) for item in kvm]
 
-        return tail
+        for item in kv + kvm:
+            k, v = item.split(":", 1)
+            self.metadata[k.strip()] = v.strip()
 
+        return tail
 
     _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
     # This regular expression is intended to match blocks like this:
@@ -421,7 +478,7 @@ class Markdown(object):
         http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
         """
         emacs_vars = {}
-        SIZE = pow(2, 13) # 8kB
+        SIZE = pow(2, 13)  # 8kB
 
         # Search near the start for a '-*-'-style one-liner of variables.
         head = text[:SIZE]
@@ -457,7 +514,7 @@ class Markdown(object):
                 prefix = match.group("prefix")
                 suffix = match.group("suffix")
                 lines = match.group("content").splitlines(0)
-                #print "prefix=%r, suffix=%r, content=%r, lines: %s"\
+                # print "prefix=%r, suffix=%r, content=%r, lines: %s"\
                 #      % (prefix, suffix, match.group("content"), lines)
 
                 # Validate the Local Variables block: proper prefix and suffix
@@ -478,9 +535,9 @@ class Markdown(object):
 
                 # Parse out one emacs var per line.
                 continued_for = None
-                for line in lines[:-1]: # no var on the last line ("PREFIX End:")
-                    if prefix: line = line[len(prefix):] # strip prefix
-                    if suffix: line = line[:-len(suffix)] # strip suffix
+                for line in lines[:-1]:  # no var on the last line ("PREFIX End:")
+                    if prefix: line = line[len(prefix):]  # strip prefix
+                    if suffix: line = line[:-len(suffix)]  # strip suffix
                     line = line.strip()
                     if continued_for:
                         variable = continued_for
@@ -514,14 +571,19 @@ class Markdown(object):
 
         return emacs_vars
 
-    # Cribbed from a post by Bart Lateur:
-    # <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
-    _detab_re = re.compile(r'(.*?)\t', re.M)
-    def _detab_sub(self, match):
-        g1 = match.group(1)
-        return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
+    def _detab_line(self, line):
+        r"""Recusively convert tabs to spaces in a single line.
+
+        Called from _detab()."""
+        if '\t' not in line:
+            return line
+        chunk1, chunk2 = line.split('\t', 1)
+        chunk1 += (' ' * (self.tab_width - len(chunk1) % self.tab_width))
+        output = chunk1 + chunk2
+        return self._detab_line(output)
+
     def _detab(self, text):
-        r"""Remove (leading?) tabs from a file.
+        r"""Iterate text line by line and convert tabs to spaces.
 
             >>> m = Markdown()
             >>> m._detab("\tfoo")
@@ -537,7 +599,10 @@ class Markdown(object):
         """
         if '\t' not in text:
             return text
-        return self._detab_re.subn(self._detab_sub, text)[0]
+        output = []
+        for line in text.splitlines():
+            output.append(self._detab_line(line))
+        return '\n'.join(output)
 
     # I broke out the html5 tags here and add them to _block_tags_a and
     # _block_tags_b.  This way html5 tags are easy to keep track of.
@@ -743,6 +808,64 @@ class Markdown(object):
             self.titles[key] = title
         return ""
 
+    def _do_numbering(self, text):
+        ''' We handle the special extension for generic numbering for
+            tables, figures etc.
+        '''
+        # First pass to define all the references
+        self.regex_defns = re.compile(r'''
+            \[\#(\w+)\s* # the counter.  Open square plus hash plus a word \1
+            ([^@]*)\s*   # Some optional characters, that aren't an @. \2
+            @(\w+)       # the id.  Should this be normed? \3
+            ([^\]]*)\]   # The rest of the text up to the terminating ] \4
+            ''', re.VERBOSE)
+        self.regex_subs = re.compile(r"\[@(\w+)\s*\]")  # [@ref_id]
+        counters = {}
+        references = {}
+        replacements = []
+        definition_html = '<figcaption class="{}" id="counter-ref-{}">{}{}{}</figcaption>'
+        reference_html = '<a class="{}" href="#counter-ref-{}">{}</a>'
+        for match in self.regex_defns.finditer(text):
+            # We must have four match groups otherwise this isn't a numbering reference
+            if len(match.groups()) != 4:
+                continue
+            counter = match.group(1)
+            text_before = match.group(2)
+            ref_id = match.group(3)
+            text_after = match.group(4)
+            number = counters.get(counter, 1)
+            references[ref_id] = (number, counter)
+            replacements.append((match.start(0),
+                                 definition_html.format(counter,
+                                                        ref_id,
+                                                        text_before,
+                                                        number,
+                                                        text_after),
+                                 match.end(0)))
+            counters[counter] = number + 1
+        for repl in reversed(replacements):
+            text = text[:repl[0]] + repl[1] + text[repl[2]:]
+
+        # Second pass to replace the references with the right
+        # value of the counter
+        # Fwiw, it's vaguely annoying to have to turn the iterator into
+        # a list and then reverse it but I can't think of a better thing to do.
+        for match in reversed(list(self.regex_subs.finditer(text))):
+            number, counter = references.get(match.group(1), (None, None))
+            if number is not None:
+                repl = reference_html.format(counter,
+                                             match.group(1),
+                                             number)
+            else:
+                repl = reference_html.format(match.group(1),
+                                             'countererror',
+                                             '?' + match.group(1) + '?')
+            if "smarty-pants" in self.extras:
+                repl = repl.replace('"', self._escape_table['"'])
+
+            text = text[:match.start()] + repl + text[match.end():]
+        return text
+
     def _extract_footnote_def_sub(self, match):
         id, text = match.groups()
         text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
@@ -831,7 +954,7 @@ class Markdown(object):
         lines = match.group(0).splitlines(0)
         _dedentlines(lines)
         indent = ' ' * self.tab_width
-        s = ('\n' # separate from possible cuddled paragraph
+        s = ('\n'  # separate from possible cuddled paragraph
              + indent + ('\n'+indent).join(lines)
              + '\n\n')
         return s
@@ -853,10 +976,15 @@ class Markdown(object):
         return _pyshell_block_re.sub(self._pyshell_block_sub, text)
 
     def _table_sub(self, match):
+        trim_space_re = '^[ \t\n]+|[ \t\n]+$'
+        trim_bar_re = '^\||\|$'
+        split_bar_re = '^\||(?<!\\\\)\|'
+        escape_bar_re = '\\\\\|'
+
         head, underline, body = match.groups()
 
         # Determine aligns for columns.
-        cols = [cell.strip() for cell in underline.strip('| \t\n').split('|')]
+        cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", underline)))]
         align_from_col_idx = {}
         for col_idx, col in enumerate(cols):
             if col[0] == ':' and col[-1] == ':':
@@ -868,7 +996,7 @@ class Markdown(object):
 
         # thead
         hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<thead>', '<tr>']
-        cols = [cell.strip() for cell in head.strip('| \t\n').split('|')]
+        cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", head)))]
         for col_idx, col in enumerate(cols):
             hlines.append('  <th%s>%s</th>' % (
                 align_from_col_idx.get(col_idx, ''),
@@ -881,7 +1009,7 @@ class Markdown(object):
         hlines.append('<tbody>')
         for line in body.strip('\n').split('\n'):
             hlines.append('<tr>')
-            cols = [cell.strip() for cell in line.strip('| \t\n').split('|')]
+            cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", line)))]
             for col_idx, col in enumerate(cols):
                 hlines.append('  <td%s>%s</td>' % (
                     align_from_col_idx.get(col_idx, ''),
@@ -924,13 +1052,13 @@ class Markdown(object):
 
     def _wiki_table_sub(self, match):
         ttext = match.group(0).strip()
-        #print 'wiki table: %r' % match.group(0)
+        # print 'wiki table: %r' % match.group(0)
         rows = []
         for line in ttext.splitlines(0):
             line = line.strip()[2:-2].strip()
             row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
             rows.append(row)
-        #pprint(rows)
+        # pprint(rows)
         hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<tbody>']
         for row in rows:
             hrow = ['<tr>']
@@ -977,6 +1105,9 @@ class Markdown(object):
 
         text = self._encode_amps_and_angles(text)
 
+        if "strike" in self.extras:
+            text = self._do_strike(text)
+
         text = self._do_italics_and_bold(text)
 
         if "smarty-pants" in self.extras:
@@ -1140,6 +1271,7 @@ class Markdown(object):
             url = self._strip_anglebrackets.sub(r'\1', url)
         return url, title, end_idx
 
+    _safe_protocols = re.compile(r'(https?|ftp):', re.I)
     def _do_links(self, text):
         """Turn Markdown link shortcuts into XHTML <a> and <img> tags.
 
@@ -1157,7 +1289,7 @@ class Markdown(object):
         anchor_allowed_pos = 0
 
         curr_pos = 0
-        while True: # Handle the next link.
+        while True:  # Handle the next link.
             # The next '[' is the start of:
             # - an inline anchor:   [text](url "title")
             # - a reference anchor: [text][id]
@@ -1221,7 +1353,7 @@ class Markdown(object):
                 return text
 
             # Inline anchor or img?
-            if text[p] == '(': # attempt at perf improvement
+            if text[p] == '(':  # attempt at perf improvement
                 url, title, url_end_idx = self._extract_url_and_title(text, p)
                 if url is not None:
                     # Handle an inline anchor or img.
@@ -1243,16 +1375,21 @@ class Markdown(object):
                     if is_img:
                         img_class_str = self._html_class_str_from_tag("img")
                         result = '<img src="%s" alt="%s"%s%s%s' \
-                            % (url.replace('"', '&quot;'),
+                            % (_html_escape_url(url, safe_mode=self.safe_mode),
                                _xml_escape_attr(link_text),
-                               title_str, img_class_str, self.empty_element_suffix)
+                               title_str,
+                               img_class_str,
+                               self.empty_element_suffix)
                         if "smarty-pants" in self.extras:
                             result = result.replace('"', self._escape_table['"'])
                         curr_pos = start_idx + len(result)
                         text = text[:start_idx] + result + text[url_end_idx:]
                     elif start_idx >= anchor_allowed_pos:
-                        result_head = '<a href="%s"%s>' % (url, title_str)
-                        result = '%s%s</a>' % (result_head, link_text)
+                        if self.safe_mode and not self._safe_protocols.match(url):
+                            result_head = '<a href="#"%s>' % (title_str)
+                        else:
+                            result_head = '<a href="%s"%s>' % (_html_escape_url(url, safe_mode=self.safe_mode), title_str)
+                        result = '%s%s</a>' % (result_head, _xml_escape_attr(link_text))
                         if "smarty-pants" in self.extras:
                             result = result.replace('"', self._escape_table['"'])
                         # <img> allowed from curr_pos on, <a> from
@@ -1284,7 +1421,6 @@ class Markdown(object):
                                  .replace('_', self._escape_table['_'])
                         title = self.titles.get(link_id)
                         if title:
-                            before = title
                             title = _xml_escape_attr(title) \
                                 .replace('*', self._escape_table['*']) \
                                 .replace('_', self._escape_table['_'])
@@ -1294,17 +1430,20 @@ class Markdown(object):
                         if is_img:
                             img_class_str = self._html_class_str_from_tag("img")
                             result = '<img src="%s" alt="%s"%s%s%s' \
-                                % (url.replace('"', '&quot;'),
-                                   link_text.replace('"', '&quot;'),
-                                   title_str, img_class_str, self.empty_element_suffix)
+                                % (_html_escape_url(url, safe_mode=self.safe_mode),
+                                   _xml_escape_attr(link_text),
+                                   title_str,
+                                   img_class_str,
+                                   self.empty_element_suffix)
                             if "smarty-pants" in self.extras:
                                 result = result.replace('"', self._escape_table['"'])
                             curr_pos = start_idx + len(result)
                             text = text[:start_idx] + result + text[match.end():]
                         elif start_idx >= anchor_allowed_pos:
-                            result = '<a href="%s"%s>%s</a>' \
-                                % (url, title_str, link_text)
-                            result_head = '<a href="%s"%s>' % (url, title_str)
+                            if self.safe_mode and not self._safe_protocols.match(url):
+                                result_head = '<a href="#"%s>' % (title_str)
+                            else:
+                                result_head = '<a href="%s"%s>' % (_html_escape_url(url, safe_mode=self.safe_mode), title_str)
                             result = '%s%s</a>' % (result_head, link_text)
                             if "smarty-pants" in self.extras:
                                 result = result.replace('"', self._escape_table['"'])
@@ -1349,6 +1488,9 @@ class Markdown(object):
             header_id += '-%s' % self._count_from_header_id[header_id]
         else:
             self._count_from_header_id[header_id] = 1
+            if 0 == len(header_id):
+                header_id += '-%s' % self._count_from_header_id[header_id]
+
         return header_id
 
     _toc = None
@@ -1416,7 +1558,7 @@ class Markdown(object):
             return self._h_re_tag_friendly.sub(self._h_sub, text)
         return self._h_re.sub(self._h_sub, text)
 
-    _marker_ul_chars  = '*+-'
+    _marker_ul_chars = '*+-'
     _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
     _marker_ul = '(?:[%s])' % _marker_ul_chars
     _marker_ol = r'(?:\d+\.)'
@@ -1478,7 +1620,7 @@ class Markdown(object):
             start, end = match.span()
             middle = self._list_sub(match)
             text = text[:start] + middle + text[end:]
-            pos = start + len(middle) # start pos for next attempted match
+            pos = start + len(middle)  # start pos for next attempted match
 
         return text
 
@@ -1487,16 +1629,30 @@ class Markdown(object):
         (^[ \t]*)               # leading whitespace = \2
         (?P<marker>%s) [ \t]+   # list marker = \3
         ((?:.+?)                # list item text = \4
-         (\n{1,2}))             # eols = \5
+        (\n{1,2}))              # eols = \5
         (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
         ''' % (_marker_any, _marker_any),
         re.M | re.X | re.S)
 
+    _task_list_item_re = re.compile(r'''
+        (\[[\ x]\])[ \t]+       # tasklist marker = \1
+        (.*)                   # list item text = \2
+    ''', re.M | re.X | re.S)
+
+    _task_list_warpper_str = r'<input type="checkbox" class="task-list-item-checkbox" %sdisabled> %s'
+
+    def _task_list_item_sub(self, match):
+        marker = match.group(1)
+        item_text = match.group(2)
+        if marker == '[x]':
+                return self._task_list_warpper_str % ('checked ', item_text)
+        elif marker == '[ ]':
+                return self._task_list_warpper_str % ('', item_text)
+
     _last_li_endswith_two_eols = False
     def _list_item_sub(self, match):
         item = match.group(4)
         leading_line = match.group(1)
-        leading_space = match.group(2)
         if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
             item = self._run_block_gamut(self._outdent(item))
         else:
@@ -1506,6 +1662,10 @@ class Markdown(object):
                 item = item[:-1]
             item = self._run_span_gamut(item)
         self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
+
+        if "task_list" in self.extras:
+            item = self._task_list_item_re.sub(self._task_list_item_sub, item)
+
         return "<li>%s</li>\n" % item
 
     def _process_list_items(self, list_str):
@@ -1594,7 +1754,7 @@ class Markdown(object):
                 formatter_opts = self.extras['code-color'] or {}
 
         if lexer_name:
-            def unhash_code( codeblock ):
+            def unhash_code(codeblock):
                 for key, sanitized in list(self.html_spans.items()):
                     codeblock = codeblock.replace(key, sanitized)
                 replacements = [
@@ -1652,14 +1812,14 @@ class Markdown(object):
         return code_block_re.sub(self._code_block_sub, text)
 
     _fenced_code_block_re = re.compile(r'''
-        (?:\n\n|\A\n?)
+        (?:\n+|\A\n?)
         ^```([\w+-]+)?[ \t]*\n      # opening fence, $1 = optional lang
         (.*?)                       # $2 = code block content
         ^```[ \t]*\n                # closing fence
         ''', re.M | re.X | re.S)
 
     def _fenced_code_block_sub(self, match):
-        return self._code_block_sub(match, is_fenced_code_block=True);
+        return self._code_block_sub(match, is_fenced_code_block=True)
 
     def _do_fenced_code_blocks(self, text):
         """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
@@ -1732,6 +1892,11 @@ class Markdown(object):
         self._escape_table[text] = hashed
         return hashed
 
+    _strike_re = re.compile(r"~~(?=\S)(.+?)(?<=\S)~~", re.S)
+    def _do_strike(self, text):
+        text = self._strike_re.sub(r"<strike>\1</strike>", text)
+        return text
+
     _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
     _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
     _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
@@ -1776,12 +1941,12 @@ class Markdown(object):
         <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
         discussion of some diversion from the original SmartyPants.
         """
-        if "'" in text: # guard for perf
+        if "'" in text:  # guard for perf
             text = self._do_smart_contractions(text)
             text = self._opening_single_quote_re.sub("&#8216;", text)
             text = self._closing_single_quote_re.sub("&#8217;", text)
 
-        if '"' in text: # guard for perf
+        if '"' in text:  # guard for perf
             text = self._opening_double_quote_re.sub("&#8220;", text)
             text = self._closing_double_quote_re.sub("&#8221;", text)
 
@@ -1804,8 +1969,8 @@ class Markdown(object):
     '''
     _block_quote_re = re.compile(_block_quote_base % '', re.M | re.X)
     _block_quote_re_spoiler = re.compile(_block_quote_base % '[ \t]*?!?', re.M | re.X)
-    _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
-    _bq_one_level_re_spoiler = re.compile('^[ \t]*>[ \t]*?![ \t]?', re.M);
+    _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M)
+    _bq_one_level_re_spoiler = re.compile('^[ \t]*>[ \t]*?![ \t]?', re.M)
     _bq_all_lines_spoilers = re.compile(r'\A(?:^[ \t]*>[ \t]*?!.*[\n\r]*)+\Z', re.M)
     _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
     def _dedent_two_spaces_sub(self, match):
@@ -1884,15 +2049,31 @@ class Markdown(object):
                 '<hr' + self.empty_element_suffix,
                 '<ol>',
             ]
+
+            if not self.footnote_title:
+                self.footnote_title = "Jump back to footnote %d in the text."
+            if not self.footnote_return_symbol:
+                self.footnote_return_symbol = "&#8617;"
+
             for i, id in enumerate(self.footnote_ids):
                 if i != 0:
                     footer.append('')
                 footer.append('<li id="fn-%s">' % id)
                 footer.append(self._run_block_gamut(self.footnotes[id]))
-                backlink = ('<a href="#fnref-%s" '
-                    'class="footnoteBackLink" '
-                    'title="Jump back to footnote %d in the text.">'
-                    '&#8617;</a>' % (id, i+1))
+                try:
+                    backlink = ('<a href="#fnref-%s" ' +
+                            'class="footnoteBackLink" ' +
+                            'title="' + self.footnote_title + '">' +
+                            self.footnote_return_symbol +
+                            '</a>') % (id, i+1)
+                except TypeError:
+                    log.debug("Footnote error. `footnote_title` "
+                              "must include parameter. Using defaults.")
+                    backlink = ('<a href="#fnref-%s" '
+                        'class="footnoteBackLink" '
+                        'title="Jump back to footnote %d in the text.">'
+                        '&#8617;</a>' % (id, i+1))
+
                 if footer[-1].endswith("</p>"):
                     footer[-1] = footer[-1][:-len("</p>")] \
                         + '&#160;' + backlink + "</p>"
@@ -2031,7 +2212,7 @@ class MarkdownWithExtras(Markdown):
     extras = ["footnotes", "code-color"]
 
 
-#---- internal support functions
+# ---- internal support functions
 
 class UnicodeWithAttrs(unicode):
     """A subclass of unicode used for the return value of conversion to
@@ -2100,6 +2281,7 @@ def _curry(*args, **kwargs):
         return function(*args + rest, **combined)
     return result
 
+
 # Recipe: regex_from_encoded_pattern (1.0)
 def _regex_from_encoded_pattern(s):
     """'foo'    -> re.compile(re.escape('foo'))
@@ -2126,9 +2308,10 @@ def _regex_from_encoded_pattern(s):
                                  "(must be one of '%s')"
                                  % (char, s, ''.join(list(flag_from_char.keys()))))
         return re.compile(s[1:idx], flags)
-    else: # not an encoded regex
+    else:  # not an encoded regex
         return re.compile(re.escape(s))
 
+
 # Recipe: dedent (0.1.2)
 def _dedentlines(lines, tabsize=8, skip_first_line=False):
     """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
@@ -2146,7 +2329,6 @@ def _dedentlines(lines, tabsize=8, skip_first_line=False):
     if DEBUG:
         print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
               % (tabsize, skip_first_line))
-    indents = []
     margin = None
     for i, line in enumerate(lines):
         if i == 0 and skip_first_line: continue
@@ -2157,11 +2339,11 @@ def _dedentlines(lines, tabsize=8, skip_first_line=False):
             elif ch == '\t':
                 indent += tabsize - (indent % tabsize)
             elif ch in '\r\n':
-                continue # skip all-whitespace lines
+                continue  # skip all-whitespace lines
             else:
                 break
         else:
-            continue # skip all-whitespace lines
+            continue  # skip all-whitespace lines
         if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
         if margin is None:
             margin = indent
@@ -2200,6 +2382,7 @@ def _dedentlines(lines, tabsize=8, skip_first_line=False):
                     lines[i] = lines[i][removed:]
     return lines
 
+
 def _dedent(text, tabsize=8, skip_first_line=False):
     """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
 
@@ -2217,28 +2400,30 @@ def _dedent(text, tabsize=8, skip_first_line=False):
 
 
 class _memoized(object):
-   """Decorator that caches a function's return value each time it is called.
-   If called later with the same arguments, the cached value is returned, and
-   not re-evaluated.
-
-   http://wiki.python.org/moin/PythonDecoratorLibrary
-   """
-   def __init__(self, func):
-      self.func = func
-      self.cache = {}
-   def __call__(self, *args):
-      try:
-         return self.cache[args]
-      except KeyError:
-         self.cache[args] = value = self.func(*args)
-         return value
-      except TypeError:
-         # uncachable -- for instance, passing a list as an argument.
-         # Better to not cache than to blow up entirely.
-         return self.func(*args)
-   def __repr__(self):
-      """Return the function's docstring."""
-      return self.func.__doc__
+    """Decorator that caches a function's return value each time it is called.
+    If called later with the same arguments, the cached value is returned, and
+    not re-evaluated.
+
+    http://wiki.python.org/moin/PythonDecoratorLibrary
+    """
+    def __init__(self, func):
+        self.func = func
+        self.cache = {}
+
+    def __call__(self, *args):
+        try:
+            return self.cache[args]
+        except KeyError:
+            self.cache[args] = value = self.func(*args)
+            return value
+        except TypeError:
+            # uncachable -- for instance, passing a list as an argument.
+            # Better to not cache than to blow up entirely.
+            return self.func(*args)
+
+    def __repr__(self):
+        """Return the function's docstring."""
+        return self.func.__doc__
 
 
 def _xml_oneliner_re_from_tab_width(tab_width):
@@ -2262,8 +2447,9 @@ def _xml_oneliner_re_from_tab_width(tab_width):
         """ % (tab_width - 1), re.X)
 _xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
 
+
 def _hr_tag_re_from_tab_width(tab_width):
-     return re.compile(r"""
+    return re.compile(r"""
         (?:
             (?<=\n\n)       # Starting after a blank line
             |               # or
@@ -2312,18 +2498,31 @@ def _xml_encode_email_char_at_random(ch):
         return '&#%s;' % ord(ch)
 
 
+def _html_escape_url(attr, safe_mode=False):
+    """Replace special characters that are potentially malicious in url string."""
+    escaped = (attr
+        .replace('"', '&quot;')
+        .replace('<', '&lt;')
+        .replace('>', '&gt;'))
+    if safe_mode:
+        escaped = escaped.replace('+', ' ')
+        escaped = escaped.replace("'", "&#39;")
+    return escaped
+
 
-#---- mainline
+# ---- mainline
 
 class _NoReflowFormatter(optparse.IndentedHelpFormatter):
     """An optparse formatter that does NOT reflow the description."""
     def format_description(self, description):
         return description or ""
 
+
 def _test():
     import doctest
     doctest.testmod()
 
+
 def main(argv=None):
     if argv is None:
         argv = sys.argv
@@ -2440,7 +2639,7 @@ def main(argv=None):
                 sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
         if extras and "toc" in extras:
             log.debug("toc_html: " +
-                html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+                str(html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')))
         if opts.compare:
             test_dir = join(dirname(dirname(abspath(__file__))), "test")
             if exists(join(test_dir, "test_markdown2.py")):
@@ -2455,4 +2654,4 @@ def main(argv=None):
 
 
 if __name__ == "__main__":
-    sys.exit( main(sys.argv) )
+    sys.exit(main(sys.argv))
diff --git a/lib/simplejson/__init__.py b/lib/simplejson/__init__.py
deleted file mode 100644
index d5b4d39913770fad1b415caf6cc08f08a5d38bac..0000000000000000000000000000000000000000
--- a/lib/simplejson/__init__.py
+++ /dev/null
@@ -1,318 +0,0 @@
-r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
-JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
-interchange format.
-
-:mod:`simplejson` exposes an API familiar to users of the standard library
-:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
-version of the :mod:`json` library contained in Python 2.6, but maintains
-compatibility with Python 2.4 and Python 2.5 and (currently) has
-significant performance advantages, even without using the optional C
-extension for speedups.
-
-Encoding basic Python object hierarchies::
-
-    >>> import simplejson as json
-    >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
-    '["foo", {"bar": ["baz", null, 1.0, 2]}]'
-    >>> print json.dumps("\"foo\bar")
-    "\"foo\bar"
-    >>> print json.dumps(u'\u1234')
-    "\u1234"
-    >>> print json.dumps('\\')
-    "\\"
-    >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
-    {"a": 0, "b": 0, "c": 0}
-    >>> from StringIO import StringIO
-    >>> io = StringIO()
-    >>> json.dump(['streaming API'], io)
-    >>> io.getvalue()
-    '["streaming API"]'
-
-Compact encoding::
-
-    >>> import simplejson as json
-    >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
-    '[1,2,3,{"4":5,"6":7}]'
-
-Pretty printing::
-
-    >>> import simplejson as json
-    >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
-    >>> print '\n'.join([l.rstrip() for l in  s.splitlines()])
-    {
-        "4": 5,
-        "6": 7
-    }
-
-Decoding JSON::
-
-    >>> import simplejson as json
-    >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
-    >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
-    True
-    >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
-    True
-    >>> from StringIO import StringIO
-    >>> io = StringIO('["streaming API"]')
-    >>> json.load(io)[0] == 'streaming API'
-    True
-
-Specializing JSON object decoding::
-
-    >>> import simplejson as json
-    >>> def as_complex(dct):
-    ...     if '__complex__' in dct:
-    ...         return complex(dct['real'], dct['imag'])
-    ...     return dct
-    ...
-    >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
-    ...     object_hook=as_complex)
-    (1+2j)
-    >>> import decimal
-    >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
-    True
-
-Specializing JSON object encoding::
-
-    >>> import simplejson as json
-    >>> def encode_complex(obj):
-    ...     if isinstance(obj, complex):
-    ...         return [obj.real, obj.imag]
-    ...     raise TypeError(repr(o) + " is not JSON serializable")
-    ...
-    >>> json.dumps(2 + 1j, default=encode_complex)
-    '[2.0, 1.0]'
-    >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
-    '[2.0, 1.0]'
-    >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
-    '[2.0, 1.0]'
-
-
-Using simplejson.tool from the shell to validate and pretty-print::
-
-    $ echo '{"json":"obj"}' | python -m simplejson.tool
-    {
-        "json": "obj"
-    }
-    $ echo '{ 1.2:3.4}' | python -m simplejson.tool
-    Expecting property name: line 1 column 2 (char 2)
-"""
-__version__ = '2.0.9'
-__all__ = [
-    'dump', 'dumps', 'load', 'loads',
-    'JSONDecoder', 'JSONEncoder',
-]
-
-__author__ = 'Bob Ippolito <bob@redivi.com>'
-
-from decoder import JSONDecoder
-from encoder import JSONEncoder
-
-_default_encoder = JSONEncoder(
-    skipkeys=False,
-    ensure_ascii=True,
-    check_circular=True,
-    allow_nan=True,
-    indent=None,
-    separators=None,
-    encoding='utf-8',
-    default=None,
-)
-
-def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
-        allow_nan=True, cls=None, indent=None, separators=None,
-        encoding='utf-8', default=None, **kw):
-    """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
-    ``.write()``-supporting file-like object).
-
-    If ``skipkeys`` is true then ``dict`` keys that are not basic types
-    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
-    will be skipped instead of raising a ``TypeError``.
-
-    If ``ensure_ascii`` is false, then the some chunks written to ``fp``
-    may be ``unicode`` instances, subject to normal Python ``str`` to
-    ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
-    understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
-    to cause an error.
-
-    If ``check_circular`` is false, then the circular reference check
-    for container types will be skipped and a circular reference will
-    result in an ``OverflowError`` (or worse).
-
-    If ``allow_nan`` is false, then it will be a ``ValueError`` to
-    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
-    in strict compliance of the JSON specification, instead of using the
-    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
-
-    If ``indent`` is a non-negative integer, then JSON array elements and object
-    members will be pretty-printed with that indent level. An indent level
-    of 0 will only insert newlines. ``None`` is the most compact representation.
-
-    If ``separators`` is an ``(item_separator, dict_separator)`` tuple
-    then it will be used instead of the default ``(', ', ': ')`` separators.
-    ``(',', ':')`` is the most compact JSON representation.
-
-    ``encoding`` is the character encoding for str instances, default is UTF-8.
-
-    ``default(obj)`` is a function that should return a serializable version
-    of obj or raise TypeError. The default simply raises TypeError.
-
-    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
-    ``.default()`` method to serialize additional types), specify it with
-    the ``cls`` kwarg.
-
-    """
-    # cached encoder
-    if (not skipkeys and ensure_ascii and
-        check_circular and allow_nan and
-        cls is None and indent is None and separators is None and
-        encoding == 'utf-8' and default is None and not kw):
-        iterable = _default_encoder.iterencode(obj)
-    else:
-        if cls is None:
-            cls = JSONEncoder
-        iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
-            check_circular=check_circular, allow_nan=allow_nan, indent=indent,
-            separators=separators, encoding=encoding,
-            default=default, **kw).iterencode(obj)
-    # could accelerate with writelines in some versions of Python, at
-    # a debuggability cost
-    for chunk in iterable:
-        fp.write(chunk)
-
-
-def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
-        allow_nan=True, cls=None, indent=None, separators=None,
-        encoding='utf-8', default=None, **kw):
-    """Serialize ``obj`` to a JSON formatted ``str``.
-
-    If ``skipkeys`` is false then ``dict`` keys that are not basic types
-    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
-    will be skipped instead of raising a ``TypeError``.
-
-    If ``ensure_ascii`` is false, then the return value will be a
-    ``unicode`` instance subject to normal Python ``str`` to ``unicode``
-    coercion rules instead of being escaped to an ASCII ``str``.
-
-    If ``check_circular`` is false, then the circular reference check
-    for container types will be skipped and a circular reference will
-    result in an ``OverflowError`` (or worse).
-
-    If ``allow_nan`` is false, then it will be a ``ValueError`` to
-    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
-    strict compliance of the JSON specification, instead of using the
-    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
-
-    If ``indent`` is a non-negative integer, then JSON array elements and
-    object members will be pretty-printed with that indent level. An indent
-    level of 0 will only insert newlines. ``None`` is the most compact
-    representation.
-
-    If ``separators`` is an ``(item_separator, dict_separator)`` tuple
-    then it will be used instead of the default ``(', ', ': ')`` separators.
-    ``(',', ':')`` is the most compact JSON representation.
-
-    ``encoding`` is the character encoding for str instances, default is UTF-8.
-
-    ``default(obj)`` is a function that should return a serializable version
-    of obj or raise TypeError. The default simply raises TypeError.
-
-    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
-    ``.default()`` method to serialize additional types), specify it with
-    the ``cls`` kwarg.
-
-    """
-    # cached encoder
-    if (not skipkeys and ensure_ascii and
-        check_circular and allow_nan and
-        cls is None and indent is None and separators is None and
-        encoding == 'utf-8' and default is None and not kw):
-        return _default_encoder.encode(obj)
-    if cls is None:
-        cls = JSONEncoder
-    return cls(
-        skipkeys=skipkeys, ensure_ascii=ensure_ascii,
-        check_circular=check_circular, allow_nan=allow_nan, indent=indent,
-        separators=separators, encoding=encoding, default=default,
-        **kw).encode(obj)
-
-
-_default_decoder = JSONDecoder(encoding=None, object_hook=None)
-
-
-def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
-        parse_int=None, parse_constant=None, **kw):
-    """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
-    a JSON document) to a Python object.
-
-    If the contents of ``fp`` is encoded with an ASCII based encoding other
-    than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
-    be specified. Encodings that are not ASCII based (such as UCS-2) are
-    not allowed, and should be wrapped with
-    ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
-    object and passed to ``loads()``
-
-    ``object_hook`` is an optional function that will be called with the
-    result of any object literal decode (a ``dict``). The return value of
-    ``object_hook`` will be used instead of the ``dict``. This feature
-    can be used to implement custom decoders (e.g. JSON-RPC class hinting).
-
-    To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
-    kwarg.
-
-    """
-    return loads(fp.read(),
-        encoding=encoding, cls=cls, object_hook=object_hook,
-        parse_float=parse_float, parse_int=parse_int,
-        parse_constant=parse_constant, **kw)
-
-
-def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
-        parse_int=None, parse_constant=None, **kw):
-    """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
-    document) to a Python object.
-
-    If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
-    other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
-    must be specified. Encodings that are not ASCII based (such as UCS-2)
-    are not allowed and should be decoded to ``unicode`` first.
-
-    ``object_hook`` is an optional function that will be called with the
-    result of any object literal decode (a ``dict``). The return value of
-    ``object_hook`` will be used instead of the ``dict``. This feature
-    can be used to implement custom decoders (e.g. JSON-RPC class hinting).
-
-    ``parse_float``, if specified, will be called with the string
-    of every JSON float to be decoded. By default this is equivalent to
-    float(num_str). This can be used to use another datatype or parser
-    for JSON floats (e.g. decimal.Decimal).
-
-    ``parse_int``, if specified, will be called with the string
-    of every JSON int to be decoded. By default this is equivalent to
-    int(num_str). This can be used to use another datatype or parser
-    for JSON integers (e.g. float).
-
-    ``parse_constant``, if specified, will be called with one of the
-    following strings: -Infinity, Infinity, NaN, null, true, false.
-    This can be used to raise an exception if invalid JSON numbers
-    are encountered.
-
-    To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
-    kwarg.
-
-    """
-    if (cls is None and encoding is None and object_hook is None and
-            parse_int is None and parse_float is None and
-            parse_constant is None and not kw):
-        return _default_decoder.decode(s)
-    if cls is None:
-        cls = JSONDecoder
-    if object_hook is not None:
-        kw['object_hook'] = object_hook
-    if parse_float is not None:
-        kw['parse_float'] = parse_float
-    if parse_int is not None:
-        kw['parse_int'] = parse_int
-    if parse_constant is not None:
-        kw['parse_constant'] = parse_constant
-    return cls(encoding=encoding, **kw).decode(s)
diff --git a/lib/simplejson/_speedups.c b/lib/simplejson/_speedups.c
deleted file mode 100644
index 23b5f4a6e6fdebf424317fbb9b0076a3d6ca25d8..0000000000000000000000000000000000000000
--- a/lib/simplejson/_speedups.c
+++ /dev/null
@@ -1,2329 +0,0 @@
-#include "Python.h"
-#include "structmember.h"
-#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TYPE)
-#define Py_TYPE(ob)     (((PyObject*)(ob))->ob_type)
-#endif
-#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
-typedef int Py_ssize_t;
-#define PY_SSIZE_T_MAX INT_MAX
-#define PY_SSIZE_T_MIN INT_MIN
-#define PyInt_FromSsize_t PyInt_FromLong
-#define PyInt_AsSsize_t PyInt_AsLong
-#endif
-#ifndef Py_IS_FINITE
-#define Py_IS_FINITE(X) (!Py_IS_INFINITY(X) && !Py_IS_NAN(X))
-#endif
-
-#ifdef __GNUC__
-#define UNUSED __attribute__((__unused__))
-#else
-#define UNUSED
-#endif
-
-#define DEFAULT_ENCODING "utf-8"
-
-#define PyScanner_Check(op) PyObject_TypeCheck(op, &PyScannerType)
-#define PyScanner_CheckExact(op) (Py_TYPE(op) == &PyScannerType)
-#define PyEncoder_Check(op) PyObject_TypeCheck(op, &PyEncoderType)
-#define PyEncoder_CheckExact(op) (Py_TYPE(op) == &PyEncoderType)
-
-static PyTypeObject PyScannerType;
-static PyTypeObject PyEncoderType;
-
-typedef struct _PyScannerObject {
-    PyObject_HEAD
-    PyObject *encoding;
-    PyObject *strict;
-    PyObject *object_hook;
-    PyObject *parse_float;
-    PyObject *parse_int;
-    PyObject *parse_constant;
-} PyScannerObject;
-
-static PyMemberDef scanner_members[] = {
-    {"encoding", T_OBJECT, offsetof(PyScannerObject, encoding), READONLY, "encoding"},
-    {"strict", T_OBJECT, offsetof(PyScannerObject, strict), READONLY, "strict"},
-    {"object_hook", T_OBJECT, offsetof(PyScannerObject, object_hook), READONLY, "object_hook"},
-    {"parse_float", T_OBJECT, offsetof(PyScannerObject, parse_float), READONLY, "parse_float"},
-    {"parse_int", T_OBJECT, offsetof(PyScannerObject, parse_int), READONLY, "parse_int"},
-    {"parse_constant", T_OBJECT, offsetof(PyScannerObject, parse_constant), READONLY, "parse_constant"},
-    {NULL}
-};
-
-typedef struct _PyEncoderObject {
-    PyObject_HEAD
-    PyObject *markers;
-    PyObject *defaultfn;
-    PyObject *encoder;
-    PyObject *indent;
-    PyObject *key_separator;
-    PyObject *item_separator;
-    PyObject *sort_keys;
-    PyObject *skipkeys;
-    int fast_encode;
-    int allow_nan;
-} PyEncoderObject;
-
-static PyMemberDef encoder_members[] = {
-    {"markers", T_OBJECT, offsetof(PyEncoderObject, markers), READONLY, "markers"},
-    {"default", T_OBJECT, offsetof(PyEncoderObject, defaultfn), READONLY, "default"},
-    {"encoder", T_OBJECT, offsetof(PyEncoderObject, encoder), READONLY, "encoder"},
-    {"indent", T_OBJECT, offsetof(PyEncoderObject, indent), READONLY, "indent"},
-    {"key_separator", T_OBJECT, offsetof(PyEncoderObject, key_separator), READONLY, "key_separator"},
-    {"item_separator", T_OBJECT, offsetof(PyEncoderObject, item_separator), READONLY, "item_separator"},
-    {"sort_keys", T_OBJECT, offsetof(PyEncoderObject, sort_keys), READONLY, "sort_keys"},
-    {"skipkeys", T_OBJECT, offsetof(PyEncoderObject, skipkeys), READONLY, "skipkeys"},
-    {NULL}
-};
-
-static Py_ssize_t
-ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars);
-static PyObject *
-ascii_escape_unicode(PyObject *pystr);
-static PyObject *
-ascii_escape_str(PyObject *pystr);
-static PyObject *
-py_encode_basestring_ascii(PyObject* self UNUSED, PyObject *pystr);
-void init_speedups(void);
-static PyObject *
-scan_once_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr);
-static PyObject *
-scan_once_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr);
-static PyObject *
-_build_rval_index_tuple(PyObject *rval, Py_ssize_t idx);
-static PyObject *
-scanner_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
-static int
-scanner_init(PyObject *self, PyObject *args, PyObject *kwds);
-static void
-scanner_dealloc(PyObject *self);
-static int
-scanner_clear(PyObject *self);
-static PyObject *
-encoder_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
-static int
-encoder_init(PyObject *self, PyObject *args, PyObject *kwds);
-static void
-encoder_dealloc(PyObject *self);
-static int
-encoder_clear(PyObject *self);
-static int
-encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ssize_t indent_level);
-static int
-encoder_listencode_obj(PyEncoderObject *s, PyObject *rval, PyObject *obj, Py_ssize_t indent_level);
-static int
-encoder_listencode_dict(PyEncoderObject *s, PyObject *rval, PyObject *dct, Py_ssize_t indent_level);
-static PyObject *
-_encoded_const(PyObject *const);
-static void
-raise_errmsg(char *msg, PyObject *s, Py_ssize_t end);
-static PyObject *
-encoder_encode_string(PyEncoderObject *s, PyObject *obj);
-static int
-_convertPyInt_AsSsize_t(PyObject *o, Py_ssize_t *size_ptr);
-static PyObject *
-_convertPyInt_FromSsize_t(Py_ssize_t *size_ptr);
-static PyObject *
-encoder_encode_float(PyEncoderObject *s, PyObject *obj);
-
-#define S_CHAR(c) (c >= ' ' && c <= '~' && c != '\\' && c != '"')
-#define IS_WHITESPACE(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || ((c) == '\r'))
-
-#define MIN_EXPANSION 6
-#ifdef Py_UNICODE_WIDE
-#define MAX_EXPANSION (2 * MIN_EXPANSION)
-#else
-#define MAX_EXPANSION MIN_EXPANSION
-#endif
-
-static int
-_convertPyInt_AsSsize_t(PyObject *o, Py_ssize_t *size_ptr)
-{
-    /* PyObject to Py_ssize_t converter */
-    *size_ptr = PyInt_AsSsize_t(o);
-    if (*size_ptr == -1 && PyErr_Occurred());
-        return 1;
-    return 0;
-}
-
-static PyObject *
-_convertPyInt_FromSsize_t(Py_ssize_t *size_ptr)
-{
-    /* Py_ssize_t to PyObject converter */
-    return PyInt_FromSsize_t(*size_ptr);
-}
-
-static Py_ssize_t
-ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars)
-{
-    /* Escape unicode code point c to ASCII escape sequences
-    in char *output. output must have at least 12 bytes unused to
-    accommodate an escaped surrogate pair "\uXXXX\uXXXX" */
-    output[chars++] = '\\';
-    switch (c) {
-        case '\\': output[chars++] = (char)c; break;
-        case '"': output[chars++] = (char)c; break;
-        case '\b': output[chars++] = 'b'; break;
-        case '\f': output[chars++] = 'f'; break;
-        case '\n': output[chars++] = 'n'; break;
-        case '\r': output[chars++] = 'r'; break;
-        case '\t': output[chars++] = 't'; break;
-        default:
-#ifdef Py_UNICODE_WIDE
-            if (c >= 0x10000) {
-                /* UTF-16 surrogate pair */
-                Py_UNICODE v = c - 0x10000;
-                c = 0xd800 | ((v >> 10) & 0x3ff);
-                output[chars++] = 'u';
-                output[chars++] = "0123456789abcdef"[(c >> 12) & 0xf];
-                output[chars++] = "0123456789abcdef"[(c >>  8) & 0xf];
-                output[chars++] = "0123456789abcdef"[(c >>  4) & 0xf];
-                output[chars++] = "0123456789abcdef"[(c      ) & 0xf];
-                c = 0xdc00 | (v & 0x3ff);
-                output[chars++] = '\\';
-            }
-#endif
-            output[chars++] = 'u';
-            output[chars++] = "0123456789abcdef"[(c >> 12) & 0xf];
-            output[chars++] = "0123456789abcdef"[(c >>  8) & 0xf];
-            output[chars++] = "0123456789abcdef"[(c >>  4) & 0xf];
-            output[chars++] = "0123456789abcdef"[(c      ) & 0xf];
-    }
-    return chars;
-}
-
-static PyObject *
-ascii_escape_unicode(PyObject *pystr)
-{
-    /* Take a PyUnicode pystr and return a new ASCII-only escaped PyString */
-    Py_ssize_t i;
-    Py_ssize_t input_chars;
-    Py_ssize_t output_size;
-    Py_ssize_t max_output_size;
-    Py_ssize_t chars;
-    PyObject *rval;
-    char *output;
-    Py_UNICODE *input_unicode;
-
-    input_chars = PyUnicode_GET_SIZE(pystr);
-    input_unicode = PyUnicode_AS_UNICODE(pystr);
-
-    /* One char input can be up to 6 chars output, estimate 4 of these */
-    output_size = 2 + (MIN_EXPANSION * 4) + input_chars;
-    max_output_size = 2 + (input_chars * MAX_EXPANSION);
-    rval = PyString_FromStringAndSize(NULL, output_size);
-    if (rval == NULL) {
-        return NULL;
-    }
-    output = PyString_AS_STRING(rval);
-    chars = 0;
-    output[chars++] = '"';
-    for (i = 0; i < input_chars; i++) {
-        Py_UNICODE c = input_unicode[i];
-        if (S_CHAR(c)) {
-            output[chars++] = (char)c;
-        }
-        else {
-            chars = ascii_escape_char(c, output, chars);
-        }
-        if (output_size - chars < (1 + MAX_EXPANSION)) {
-            /* There's more than four, so let's resize by a lot */
-            Py_ssize_t new_output_size = output_size * 2;
-            /* This is an upper bound */
-            if (new_output_size > max_output_size) {
-                new_output_size = max_output_size;
-            }
-            /* Make sure that the output size changed before resizing */
-            if (new_output_size != output_size) {
-                output_size = new_output_size;
-                if (_PyString_Resize(&rval, output_size) == -1) {
-                    return NULL;
-                }
-                output = PyString_AS_STRING(rval);
-            }
-        }
-    }
-    output[chars++] = '"';
-    if (_PyString_Resize(&rval, chars) == -1) {
-        return NULL;
-    }
-    return rval;
-}
-
-static PyObject *
-ascii_escape_str(PyObject *pystr)
-{
-    /* Take a PyString pystr and return a new ASCII-only escaped PyString */
-    Py_ssize_t i;
-    Py_ssize_t input_chars;
-    Py_ssize_t output_size;
-    Py_ssize_t chars;
-    PyObject *rval;
-    char *output;
-    char *input_str;
-
-    input_chars = PyString_GET_SIZE(pystr);
-    input_str = PyString_AS_STRING(pystr);
-
-    /* Fast path for a string that's already ASCII */
-    for (i = 0; i < input_chars; i++) {
-        Py_UNICODE c = (Py_UNICODE)(unsigned char)input_str[i];
-        if (!S_CHAR(c)) {
-            /* If we have to escape something, scan the string for unicode */
-            Py_ssize_t j;
-            for (j = i; j < input_chars; j++) {
-                c = (Py_UNICODE)(unsigned char)input_str[j];
-                if (c > 0x7f) {
-                    /* We hit a non-ASCII character, bail to unicode mode */
-                    PyObject *uni;
-                    uni = PyUnicode_DecodeUTF8(input_str, input_chars, "strict");
-                    if (uni == NULL) {
-                        return NULL;
-                    }
-                    rval = ascii_escape_unicode(uni);
-                    Py_DECREF(uni);
-                    return rval;
-                }
-            }
-            break;
-        }
-    }
-
-    if (i == input_chars) {
-        /* Input is already ASCII */
-        output_size = 2 + input_chars;
-    }
-    else {
-        /* One char input can be up to 6 chars output, estimate 4 of these */
-        output_size = 2 + (MIN_EXPANSION * 4) + input_chars;
-    }
-    rval = PyString_FromStringAndSize(NULL, output_size);
-    if (rval == NULL) {
-        return NULL;
-    }
-    output = PyString_AS_STRING(rval);
-    output[0] = '"';
-
-    /* We know that everything up to i is ASCII already */
-    chars = i + 1;
-    memcpy(&output[1], input_str, i);
-
-    for (; i < input_chars; i++) {
-        Py_UNICODE c = (Py_UNICODE)(unsigned char)input_str[i];
-        if (S_CHAR(c)) {
-            output[chars++] = (char)c;
-        }
-        else {
-            chars = ascii_escape_char(c, output, chars);
-        }
-        /* An ASCII char can't possibly expand to a surrogate! */
-        if (output_size - chars < (1 + MIN_EXPANSION)) {
-            /* There's more than four, so let's resize by a lot */
-            output_size *= 2;
-            if (output_size > 2 + (input_chars * MIN_EXPANSION)) {
-                output_size = 2 + (input_chars * MIN_EXPANSION);
-            }
-            if (_PyString_Resize(&rval, output_size) == -1) {
-                return NULL;
-            }
-            output = PyString_AS_STRING(rval);
-        }
-    }
-    output[chars++] = '"';
-    if (_PyString_Resize(&rval, chars) == -1) {
-        return NULL;
-    }
-    return rval;
-}
-
-static void
-raise_errmsg(char *msg, PyObject *s, Py_ssize_t end)
-{
-    /* Use the Python function simplejson.decoder.errmsg to raise a nice
-    looking ValueError exception */
-    static PyObject *errmsg_fn = NULL;
-    PyObject *pymsg;
-    if (errmsg_fn == NULL) {
-        PyObject *decoder = PyImport_ImportModule("simplejson.decoder");
-        if (decoder == NULL)
-            return;
-        errmsg_fn = PyObject_GetAttrString(decoder, "errmsg");
-        Py_DECREF(decoder);
-        if (errmsg_fn == NULL)
-            return;
-    }
-    pymsg = PyObject_CallFunction(errmsg_fn, "(zOO&)", msg, s, _convertPyInt_FromSsize_t, &end);
-    if (pymsg) {
-        PyErr_SetObject(PyExc_ValueError, pymsg);
-        Py_DECREF(pymsg);
-    }
-}
-
-static PyObject *
-join_list_unicode(PyObject *lst)
-{
-    /* return u''.join(lst) */
-    static PyObject *joinfn = NULL;
-    if (joinfn == NULL) {
-        PyObject *ustr = PyUnicode_FromUnicode(NULL, 0);
-        if (ustr == NULL)
-            return NULL;
-
-        joinfn = PyObject_GetAttrString(ustr, "join");
-        Py_DECREF(ustr);
-        if (joinfn == NULL)
-            return NULL;
-    }
-    return PyObject_CallFunctionObjArgs(joinfn, lst, NULL);
-}
-
-static PyObject *
-join_list_string(PyObject *lst)
-{
-    /* return ''.join(lst) */
-    static PyObject *joinfn = NULL;
-    if (joinfn == NULL) {
-        PyObject *ustr = PyString_FromStringAndSize(NULL, 0);
-        if (ustr == NULL)
-            return NULL;
-
-        joinfn = PyObject_GetAttrString(ustr, "join");
-        Py_DECREF(ustr);
-        if (joinfn == NULL)
-            return NULL;
-    }
-    return PyObject_CallFunctionObjArgs(joinfn, lst, NULL);
-}
-
-static PyObject *
-_build_rval_index_tuple(PyObject *rval, Py_ssize_t idx) {
-    /* return (rval, idx) tuple, stealing reference to rval */
-    PyObject *tpl;
-    PyObject *pyidx;
-    /*
-    steal a reference to rval, returns (rval, idx)
-    */
-    if (rval == NULL) {
-        return NULL;
-    }
-    pyidx = PyInt_FromSsize_t(idx);
-    if (pyidx == NULL) {
-        Py_DECREF(rval);
-        return NULL;
-    }
-    tpl = PyTuple_New(2);
-    if (tpl == NULL) {
-        Py_DECREF(pyidx);
-        Py_DECREF(rval);
-        return NULL;
-    }
-    PyTuple_SET_ITEM(tpl, 0, rval);
-    PyTuple_SET_ITEM(tpl, 1, pyidx);
-    return tpl;
-}
-
-static PyObject *
-scanstring_str(PyObject *pystr, Py_ssize_t end, char *encoding, int strict, Py_ssize_t *next_end_ptr)
-{
-    /* Read the JSON string from PyString pystr.
-    end is the index of the first character after the quote.
-    encoding is the encoding of pystr (must be an ASCII superset)
-    if strict is zero then literal control characters are allowed
-    *next_end_ptr is a return-by-reference index of the character
-        after the end quote
-
-    Return value is a new PyString (if ASCII-only) or PyUnicode
-    */
-    PyObject *rval;
-    Py_ssize_t len = PyString_GET_SIZE(pystr);
-    Py_ssize_t begin = end - 1;
-    Py_ssize_t next = begin;
-    int has_unicode = 0;
-    char *buf = PyString_AS_STRING(pystr);
-    PyObject *chunks = PyList_New(0);
-    if (chunks == NULL) {
-        goto bail;
-    }
-    if (end < 0 || len <= end) {
-        PyErr_SetString(PyExc_ValueError, "end is out of bounds");
-        goto bail;
-    }
-    while (1) {
-        /* Find the end of the string or the next escape */
-        Py_UNICODE c = 0;
-        PyObject *chunk = NULL;
-        for (next = end; next < len; next++) {
-            c = (unsigned char)buf[next];
-            if (c == '"' || c == '\\') {
-                break;
-            }
-            else if (strict && c <= 0x1f) {
-                raise_errmsg("Invalid control character at", pystr, next);
-                goto bail;
-            }
-            else if (c > 0x7f) {
-                has_unicode = 1;
-            }
-        }
-        if (!(c == '"' || c == '\\')) {
-            raise_errmsg("Unterminated string starting at", pystr, begin);
-            goto bail;
-        }
-        /* Pick up this chunk if it's not zero length */
-        if (next != end) {
-            PyObject *strchunk = PyString_FromStringAndSize(&buf[end], next - end);
-            if (strchunk == NULL) {
-                goto bail;
-            }
-            if (has_unicode) {
-                chunk = PyUnicode_FromEncodedObject(strchunk, encoding, NULL);
-                Py_DECREF(strchunk);
-                if (chunk == NULL) {
-                    goto bail;
-                }
-            }
-            else {
-                chunk = strchunk;
-            }
-            if (PyList_Append(chunks, chunk)) {
-                Py_DECREF(chunk);
-                goto bail;
-            }
-            Py_DECREF(chunk);
-        }
-        next++;
-        if (c == '"') {
-            end = next;
-            break;
-        }
-        if (next == len) {
-            raise_errmsg("Unterminated string starting at", pystr, begin);
-            goto bail;
-        }
-        c = buf[next];
-        if (c != 'u') {
-            /* Non-unicode backslash escapes */
-            end = next + 1;
-            switch (c) {
-                case '"': break;
-                case '\\': break;
-                case '/': break;
-                case 'b': c = '\b'; break;
-                case 'f': c = '\f'; break;
-                case 'n': c = '\n'; break;
-                case 'r': c = '\r'; break;
-                case 't': c = '\t'; break;
-                default: c = 0;
-            }
-            if (c == 0) {
-                raise_errmsg("Invalid \\escape", pystr, end - 2);
-                goto bail;
-            }
-        }
-        else {
-            c = 0;
-            next++;
-            end = next + 4;
-            if (end >= len) {
-                raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1);
-                goto bail;
-            }
-            /* Decode 4 hex digits */
-            for (; next < end; next++) {
-                Py_UNICODE digit = buf[next];
-                c <<= 4;
-                switch (digit) {
-                    case '0': case '1': case '2': case '3': case '4':
-                    case '5': case '6': case '7': case '8': case '9':
-                        c |= (digit - '0'); break;
-                    case 'a': case 'b': case 'c': case 'd': case 'e':
-                    case 'f':
-                        c |= (digit - 'a' + 10); break;
-                    case 'A': case 'B': case 'C': case 'D': case 'E':
-                    case 'F':
-                        c |= (digit - 'A' + 10); break;
-                    default:
-                        raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
-                        goto bail;
-                }
-            }
-#ifdef Py_UNICODE_WIDE
-            /* Surrogate pair */
-            if ((c & 0xfc00) == 0xd800) {
-                Py_UNICODE c2 = 0;
-                if (end + 6 >= len) {
-                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
-                    goto bail;
-                }
-                if (buf[next++] != '\\' || buf[next++] != 'u') {
-                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
-                    goto bail;
-                }
-                end += 6;
-                /* Decode 4 hex digits */
-                for (; next < end; next++) {
-                    c2 <<= 4;
-                    Py_UNICODE digit = buf[next];
-                    switch (digit) {
-                        case '0': case '1': case '2': case '3': case '4':
-                        case '5': case '6': case '7': case '8': case '9':
-                            c2 |= (digit - '0'); break;
-                        case 'a': case 'b': case 'c': case 'd': case 'e':
-                        case 'f':
-                            c2 |= (digit - 'a' + 10); break;
-                        case 'A': case 'B': case 'C': case 'D': case 'E':
-                        case 'F':
-                            c2 |= (digit - 'A' + 10); break;
-                        default:
-                            raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
-                            goto bail;
-                    }
-                }
-                if ((c2 & 0xfc00) != 0xdc00) {
-                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
-                    goto bail;
-                }
-                c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
-            }
-            else if ((c & 0xfc00) == 0xdc00) {
-                raise_errmsg("Unpaired low surrogate", pystr, end - 5);
-                goto bail;
-            }
-#endif
-        }
-        if (c > 0x7f) {
-            has_unicode = 1;
-        }
-        if (has_unicode) {
-            chunk = PyUnicode_FromUnicode(&c, 1);
-            if (chunk == NULL) {
-                goto bail;
-            }
-        }
-        else {
-            char c_char = Py_CHARMASK(c);
-            chunk = PyString_FromStringAndSize(&c_char, 1);
-            if (chunk == NULL) {
-                goto bail;
-            }
-        }
-        if (PyList_Append(chunks, chunk)) {
-            Py_DECREF(chunk);
-            goto bail;
-        }
-        Py_DECREF(chunk);
-    }
-
-    rval = join_list_string(chunks);
-    if (rval == NULL) {
-        goto bail;
-    }
-    Py_CLEAR(chunks);
-    *next_end_ptr = end;
-    return rval;
-bail:
-    *next_end_ptr = -1;
-    Py_XDECREF(chunks);
-    return NULL;
-}
-
-
-static PyObject *
-scanstring_unicode(PyObject *pystr, Py_ssize_t end, int strict, Py_ssize_t *next_end_ptr)
-{
-    /* Read the JSON string from PyUnicode pystr.
-    end is the index of the first character after the quote.
-    if strict is zero then literal control characters are allowed
-    *next_end_ptr is a return-by-reference index of the character
-        after the end quote
-
-    Return value is a new PyUnicode
-    */
-    PyObject *rval;
-    Py_ssize_t len = PyUnicode_GET_SIZE(pystr);
-    Py_ssize_t begin = end - 1;
-    Py_ssize_t next = begin;
-    const Py_UNICODE *buf = PyUnicode_AS_UNICODE(pystr);
-    PyObject *chunks = PyList_New(0);
-    if (chunks == NULL) {
-        goto bail;
-    }
-    if (end < 0 || len <= end) {
-        PyErr_SetString(PyExc_ValueError, "end is out of bounds");
-        goto bail;
-    }
-    while (1) {
-        /* Find the end of the string or the next escape */
-        Py_UNICODE c = 0;
-        PyObject *chunk = NULL;
-        for (next = end; next < len; next++) {
-            c = buf[next];
-            if (c == '"' || c == '\\') {
-                break;
-            }
-            else if (strict && c <= 0x1f) {
-                raise_errmsg("Invalid control character at", pystr, next);
-                goto bail;
-            }
-        }
-        if (!(c == '"' || c == '\\')) {
-            raise_errmsg("Unterminated string starting at", pystr, begin);
-            goto bail;
-        }
-        /* Pick up this chunk if it's not zero length */
-        if (next != end) {
-            chunk = PyUnicode_FromUnicode(&buf[end], next - end);
-            if (chunk == NULL) {
-                goto bail;
-            }
-            if (PyList_Append(chunks, chunk)) {
-                Py_DECREF(chunk);
-                goto bail;
-            }
-            Py_DECREF(chunk);
-        }
-        next++;
-        if (c == '"') {
-            end = next;
-            break;
-        }
-        if (next == len) {
-            raise_errmsg("Unterminated string starting at", pystr, begin);
-            goto bail;
-        }
-        c = buf[next];
-        if (c != 'u') {
-            /* Non-unicode backslash escapes */
-            end = next + 1;
-            switch (c) {
-                case '"': break;
-                case '\\': break;
-                case '/': break;
-                case 'b': c = '\b'; break;
-                case 'f': c = '\f'; break;
-                case 'n': c = '\n'; break;
-                case 'r': c = '\r'; break;
-                case 't': c = '\t'; break;
-                default: c = 0;
-            }
-            if (c == 0) {
-                raise_errmsg("Invalid \\escape", pystr, end - 2);
-                goto bail;
-            }
-        }
-        else {
-            c = 0;
-            next++;
-            end = next + 4;
-            if (end >= len) {
-                raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1);
-                goto bail;
-            }
-            /* Decode 4 hex digits */
-            for (; next < end; next++) {
-                Py_UNICODE digit = buf[next];
-                c <<= 4;
-                switch (digit) {
-                    case '0': case '1': case '2': case '3': case '4':
-                    case '5': case '6': case '7': case '8': case '9':
-                        c |= (digit - '0'); break;
-                    case 'a': case 'b': case 'c': case 'd': case 'e':
-                    case 'f':
-                        c |= (digit - 'a' + 10); break;
-                    case 'A': case 'B': case 'C': case 'D': case 'E':
-                    case 'F':
-                        c |= (digit - 'A' + 10); break;
-                    default:
-                        raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
-                        goto bail;
-                }
-            }
-#ifdef Py_UNICODE_WIDE
-            /* Surrogate pair */
-            if ((c & 0xfc00) == 0xd800) {
-                Py_UNICODE c2 = 0;
-                if (end + 6 >= len) {
-                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
-                    goto bail;
-                }
-                if (buf[next++] != '\\' || buf[next++] != 'u') {
-                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
-                    goto bail;
-                }
-                end += 6;
-                /* Decode 4 hex digits */
-                for (; next < end; next++) {
-                    c2 <<= 4;
-                    Py_UNICODE digit = buf[next];
-                    switch (digit) {
-                        case '0': case '1': case '2': case '3': case '4':
-                        case '5': case '6': case '7': case '8': case '9':
-                            c2 |= (digit - '0'); break;
-                        case 'a': case 'b': case 'c': case 'd': case 'e':
-                        case 'f':
-                            c2 |= (digit - 'a' + 10); break;
-                        case 'A': case 'B': case 'C': case 'D': case 'E':
-                        case 'F':
-                            c2 |= (digit - 'A' + 10); break;
-                        default:
-                            raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
-                            goto bail;
-                    }
-                }
-                if ((c2 & 0xfc00) != 0xdc00) {
-                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
-                    goto bail;
-                }
-                c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
-            }
-            else if ((c & 0xfc00) == 0xdc00) {
-                raise_errmsg("Unpaired low surrogate", pystr, end - 5);
-                goto bail;
-            }
-#endif
-        }
-        chunk = PyUnicode_FromUnicode(&c, 1);
-        if (chunk == NULL) {
-            goto bail;
-        }
-        if (PyList_Append(chunks, chunk)) {
-            Py_DECREF(chunk);
-            goto bail;
-        }
-        Py_DECREF(chunk);
-    }
-
-    rval = join_list_unicode(chunks);
-    if (rval == NULL) {
-        goto bail;
-    }
-    Py_DECREF(chunks);
-    *next_end_ptr = end;
-    return rval;
-bail:
-    *next_end_ptr = -1;
-    Py_XDECREF(chunks);
-    return NULL;
-}
-
-PyDoc_STRVAR(pydoc_scanstring,
-    "scanstring(basestring, end, encoding, strict=True) -> (str, end)\n"
-    "\n"
-    "Scan the string s for a JSON string. End is the index of the\n"
-    "character in s after the quote that started the JSON string.\n"
-    "Unescapes all valid JSON string escape sequences and raises ValueError\n"
-    "on attempt to decode an invalid string. If strict is False then literal\n"
-    "control characters are allowed in the string.\n"
-    "\n"
-    "Returns a tuple of the decoded string and the index of the character in s\n"
-    "after the end quote."
-);
-
-static PyObject *
-py_scanstring(PyObject* self UNUSED, PyObject *args)
-{
-    PyObject *pystr;
-    PyObject *rval;
-    Py_ssize_t end;
-    Py_ssize_t next_end = -1;
-    char *encoding = NULL;
-    int strict = 1;
-    if (!PyArg_ParseTuple(args, "OO&|zi:scanstring", &pystr, _convertPyInt_AsSsize_t, &end, &encoding, &strict)) {
-        return NULL;
-    }
-    if (encoding == NULL) {
-        encoding = DEFAULT_ENCODING;
-    }
-    if (PyString_Check(pystr)) {
-        rval = scanstring_str(pystr, end, encoding, strict, &next_end);
-    }
-    else if (PyUnicode_Check(pystr)) {
-        rval = scanstring_unicode(pystr, end, strict, &next_end);
-    }
-    else {
-        PyErr_Format(PyExc_TypeError,
-                     "first argument must be a string, not %.80s",
-                     Py_TYPE(pystr)->tp_name);
-        return NULL;
-    }
-    return _build_rval_index_tuple(rval, next_end);
-}
-
-PyDoc_STRVAR(pydoc_encode_basestring_ascii,
-    "encode_basestring_ascii(basestring) -> str\n"
-    "\n"
-    "Return an ASCII-only JSON representation of a Python string"
-);
-
-static PyObject *
-py_encode_basestring_ascii(PyObject* self UNUSED, PyObject *pystr)
-{
-    /* Return an ASCII-only JSON representation of a Python string */
-    /* METH_O */
-    if (PyString_Check(pystr)) {
-        return ascii_escape_str(pystr);
-    }
-    else if (PyUnicode_Check(pystr)) {
-        return ascii_escape_unicode(pystr);
-    }
-    else {
-        PyErr_Format(PyExc_TypeError,
-                     "first argument must be a string, not %.80s",
-                     Py_TYPE(pystr)->tp_name);
-        return NULL;
-    }
-}
-
-static void
-scanner_dealloc(PyObject *self)
-{
-    /* Deallocate scanner object */
-    scanner_clear(self);
-    Py_TYPE(self)->tp_free(self);
-}
-
-static int
-scanner_traverse(PyObject *self, visitproc visit, void *arg)
-{
-    PyScannerObject *s;
-    assert(PyScanner_Check(self));
-    s = (PyScannerObject *)self;
-    Py_VISIT(s->encoding);
-    Py_VISIT(s->strict);
-    Py_VISIT(s->object_hook);
-    Py_VISIT(s->parse_float);
-    Py_VISIT(s->parse_int);
-    Py_VISIT(s->parse_constant);
-    return 0;
-}
-
-static int
-scanner_clear(PyObject *self)
-{
-    PyScannerObject *s;
-    assert(PyScanner_Check(self));
-    s = (PyScannerObject *)self;
-    Py_CLEAR(s->encoding);
-    Py_CLEAR(s->strict);
-    Py_CLEAR(s->object_hook);
-    Py_CLEAR(s->parse_float);
-    Py_CLEAR(s->parse_int);
-    Py_CLEAR(s->parse_constant);
-    return 0;
-}
-
-static PyObject *
-_parse_object_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
-    /* Read a JSON object from PyString pystr.
-    idx is the index of the first character after the opening curly brace.
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the closing curly brace.
-
-    Returns a new PyObject (usually a dict, but object_hook can change that)
-    */
-    char *str = PyString_AS_STRING(pystr);
-    Py_ssize_t end_idx = PyString_GET_SIZE(pystr) - 1;
-    PyObject *rval = PyDict_New();
-    PyObject *key = NULL;
-    PyObject *val = NULL;
-    char *encoding = PyString_AS_STRING(s->encoding);
-    int strict = PyObject_IsTrue(s->strict);
-    Py_ssize_t next_idx;
-    if (rval == NULL)
-        return NULL;
-
-    /* skip whitespace after { */
-    while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-    /* only loop if the object is non-empty */
-    if (idx <= end_idx && str[idx] != '}') {
-        while (idx <= end_idx) {
-            /* read key */
-            if (str[idx] != '"') {
-                raise_errmsg("Expecting property name", pystr, idx);
-                goto bail;
-            }
-            key = scanstring_str(pystr, idx + 1, encoding, strict, &next_idx);
-            if (key == NULL)
-                goto bail;
-            idx = next_idx;
-
-            /* skip whitespace between key and : delimiter, read :, skip whitespace */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-            if (idx > end_idx || str[idx] != ':') {
-                raise_errmsg("Expecting : delimiter", pystr, idx);
-                goto bail;
-            }
-            idx++;
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-            /* read any JSON data type */
-            val = scan_once_str(s, pystr, idx, &next_idx);
-            if (val == NULL)
-                goto bail;
-
-            if (PyDict_SetItem(rval, key, val) == -1)
-                goto bail;
-
-            Py_CLEAR(key);
-            Py_CLEAR(val);
-            idx = next_idx;
-
-            /* skip whitespace before } or , */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-            /* bail if the object is closed or we didn't get the , delimiter */
-            if (idx > end_idx) break;
-            if (str[idx] == '}') {
-                break;
-            }
-            else if (str[idx] != ',') {
-                raise_errmsg("Expecting , delimiter", pystr, idx);
-                goto bail;
-            }
-            idx++;
-
-            /* skip whitespace after , delimiter */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-        }
-    }
-    /* verify that idx < end_idx, str[idx] should be '}' */
-    if (idx > end_idx || str[idx] != '}') {
-        raise_errmsg("Expecting object", pystr, end_idx);
-        goto bail;
-    }
-    /* if object_hook is not None: rval = object_hook(rval) */
-    if (s->object_hook != Py_None) {
-        val = PyObject_CallFunctionObjArgs(s->object_hook, rval, NULL);
-        if (val == NULL)
-            goto bail;
-        Py_DECREF(rval);
-        rval = val;
-        val = NULL;
-    }
-    *next_idx_ptr = idx + 1;
-    return rval;
-bail:
-    Py_XDECREF(key);
-    Py_XDECREF(val);
-    Py_DECREF(rval);
-    return NULL;
-}
-
-static PyObject *
-_parse_object_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
-    /* Read a JSON object from PyUnicode pystr.
-    idx is the index of the first character after the opening curly brace.
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the closing curly brace.
-
-    Returns a new PyObject (usually a dict, but object_hook can change that)
-    */
-    Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
-    Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
-    PyObject *val = NULL;
-    PyObject *rval = PyDict_New();
-    PyObject *key = NULL;
-    int strict = PyObject_IsTrue(s->strict);
-    Py_ssize_t next_idx;
-    if (rval == NULL)
-        return NULL;
-
-    /* skip whitespace after { */
-    while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-    /* only loop if the object is non-empty */
-    if (idx <= end_idx && str[idx] != '}') {
-        while (idx <= end_idx) {
-            /* read key */
-            if (str[idx] != '"') {
-                raise_errmsg("Expecting property name", pystr, idx);
-                goto bail;
-            }
-            key = scanstring_unicode(pystr, idx + 1, strict, &next_idx);
-            if (key == NULL)
-                goto bail;
-            idx = next_idx;
-
-            /* skip whitespace between key and : delimiter, read :, skip whitespace */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-            if (idx > end_idx || str[idx] != ':') {
-                raise_errmsg("Expecting : delimiter", pystr, idx);
-                goto bail;
-            }
-            idx++;
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-            /* read any JSON term */
-            val = scan_once_unicode(s, pystr, idx, &next_idx);
-            if (val == NULL)
-                goto bail;
-
-            if (PyDict_SetItem(rval, key, val) == -1)
-                goto bail;
-
-            Py_CLEAR(key);
-            Py_CLEAR(val);
-            idx = next_idx;
-
-            /* skip whitespace before } or , */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-            /* bail if the object is closed or we didn't get the , delimiter */
-            if (idx > end_idx) break;
-            if (str[idx] == '}') {
-                break;
-            }
-            else if (str[idx] != ',') {
-                raise_errmsg("Expecting , delimiter", pystr, idx);
-                goto bail;
-            }
-            idx++;
-
-            /* skip whitespace after , delimiter */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-        }
-    }
-
-    /* verify that idx < end_idx, str[idx] should be '}' */
-    if (idx > end_idx || str[idx] != '}') {
-        raise_errmsg("Expecting object", pystr, end_idx);
-        goto bail;
-    }
-
-    /* if object_hook is not None: rval = object_hook(rval) */
-    if (s->object_hook != Py_None) {
-        val = PyObject_CallFunctionObjArgs(s->object_hook, rval, NULL);
-        if (val == NULL)
-            goto bail;
-        Py_DECREF(rval);
-        rval = val;
-        val = NULL;
-    }
-    *next_idx_ptr = idx + 1;
-    return rval;
-bail:
-    Py_XDECREF(key);
-    Py_XDECREF(val);
-    Py_DECREF(rval);
-    return NULL;
-}
-
-static PyObject *
-_parse_array_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
-    /* Read a JSON array from PyString pystr.
-    idx is the index of the first character after the opening brace.
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the closing brace.
-
-    Returns a new PyList
-    */
-    char *str = PyString_AS_STRING(pystr);
-    Py_ssize_t end_idx = PyString_GET_SIZE(pystr) - 1;
-    PyObject *val = NULL;
-    PyObject *rval = PyList_New(0);
-    Py_ssize_t next_idx;
-    if (rval == NULL)
-        return NULL;
-
-    /* skip whitespace after [ */
-    while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-    /* only loop if the array is non-empty */
-    if (idx <= end_idx && str[idx] != ']') {
-        while (idx <= end_idx) {
-
-            /* read any JSON term and de-tuplefy the (rval, idx) */
-            val = scan_once_str(s, pystr, idx, &next_idx);
-            if (val == NULL)
-                goto bail;
-
-            if (PyList_Append(rval, val) == -1)
-                goto bail;
-
-            Py_CLEAR(val);
-            idx = next_idx;
-
-            /* skip whitespace between term and , */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-            /* bail if the array is closed or we didn't get the , delimiter */
-            if (idx > end_idx) break;
-            if (str[idx] == ']') {
-                break;
-            }
-            else if (str[idx] != ',') {
-                raise_errmsg("Expecting , delimiter", pystr, idx);
-                goto bail;
-            }
-            idx++;
-
-            /* skip whitespace after , */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-        }
-    }
-
-    /* verify that idx < end_idx, str[idx] should be ']' */
-    if (idx > end_idx || str[idx] != ']') {
-        raise_errmsg("Expecting object", pystr, end_idx);
-        goto bail;
-    }
-    *next_idx_ptr = idx + 1;
-    return rval;
-bail:
-    Py_XDECREF(val);
-    Py_DECREF(rval);
-    return NULL;
-}
-
-static PyObject *
-_parse_array_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
-    /* Read a JSON array from PyString pystr.
-    idx is the index of the first character after the opening brace.
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the closing brace.
-
-    Returns a new PyList
-    */
-    Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
-    Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
-    PyObject *val = NULL;
-    PyObject *rval = PyList_New(0);
-    Py_ssize_t next_idx;
-    if (rval == NULL)
-        return NULL;
-
-    /* skip whitespace after [ */
-    while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-    /* only loop if the array is non-empty */
-    if (idx <= end_idx && str[idx] != ']') {
-        while (idx <= end_idx) {
-
-            /* read any JSON term  */
-            val = scan_once_unicode(s, pystr, idx, &next_idx);
-            if (val == NULL)
-                goto bail;
-
-            if (PyList_Append(rval, val) == -1)
-                goto bail;
-
-            Py_CLEAR(val);
-            idx = next_idx;
-
-            /* skip whitespace between term and , */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-
-            /* bail if the array is closed or we didn't get the , delimiter */
-            if (idx > end_idx) break;
-            if (str[idx] == ']') {
-                break;
-            }
-            else if (str[idx] != ',') {
-                raise_errmsg("Expecting , delimiter", pystr, idx);
-                goto bail;
-            }
-            idx++;
-
-            /* skip whitespace after , */
-            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
-        }
-    }
-
-    /* verify that idx < end_idx, str[idx] should be ']' */
-    if (idx > end_idx || str[idx] != ']') {
-        raise_errmsg("Expecting object", pystr, end_idx);
-        goto bail;
-    }
-    *next_idx_ptr = idx + 1;
-    return rval;
-bail:
-    Py_XDECREF(val);
-    Py_DECREF(rval);
-    return NULL;
-}
-
-static PyObject *
-_parse_constant(PyScannerObject *s, char *constant, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
-    /* Read a JSON constant from PyString pystr.
-    constant is the constant string that was found
-        ("NaN", "Infinity", "-Infinity").
-    idx is the index of the first character of the constant
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the constant.
-
-    Returns the result of parse_constant
-    */
-    PyObject *cstr;
-    PyObject *rval;
-    /* constant is "NaN", "Infinity", or "-Infinity" */
-    cstr = PyString_InternFromString(constant);
-    if (cstr == NULL)
-        return NULL;
-
-    /* rval = parse_constant(constant) */
-    rval = PyObject_CallFunctionObjArgs(s->parse_constant, cstr, NULL);
-    idx += PyString_GET_SIZE(cstr);
-    Py_DECREF(cstr);
-    *next_idx_ptr = idx;
-    return rval;
-}
-
-static PyObject *
-_match_number_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t start, Py_ssize_t *next_idx_ptr) {
-    /* Read a JSON number from PyString pystr.
-    idx is the index of the first character of the number
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the number.
-
-    Returns a new PyObject representation of that number:
-        PyInt, PyLong, or PyFloat.
-        May return other types if parse_int or parse_float are set
-    */
-    char *str = PyString_AS_STRING(pystr);
-    Py_ssize_t end_idx = PyString_GET_SIZE(pystr) - 1;
-    Py_ssize_t idx = start;
-    int is_float = 0;
-    PyObject *rval;
-    PyObject *numstr;
-
-    /* read a sign if it's there, make sure it's not the end of the string */
-    if (str[idx] == '-') {
-        idx++;
-        if (idx > end_idx) {
-            PyErr_SetNone(PyExc_StopIteration);
-            return NULL;
-        }
-    }
-
-    /* read as many integer digits as we find as long as it doesn't start with 0 */
-    if (str[idx] >= '1' && str[idx] <= '9') {
-        idx++;
-        while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
-    }
-    /* if it starts with 0 we only expect one integer digit */
-    else if (str[idx] == '0') {
-        idx++;
-    }
-    /* no integer digits, error */
-    else {
-        PyErr_SetNone(PyExc_StopIteration);
-        return NULL;
-    }
-
-    /* if the next char is '.' followed by a digit then read all float digits */
-    if (idx < end_idx && str[idx] == '.' && str[idx + 1] >= '0' && str[idx + 1] <= '9') {
-        is_float = 1;
-        idx += 2;
-        while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
-    }
-
-    /* if the next char is 'e' or 'E' then maybe read the exponent (or backtrack) */
-    if (idx < end_idx && (str[idx] == 'e' || str[idx] == 'E')) {
-
-        /* save the index of the 'e' or 'E' just in case we need to backtrack */
-        Py_ssize_t e_start = idx;
-        idx++;
-
-        /* read an exponent sign if present */
-        if (idx < end_idx && (str[idx] == '-' || str[idx] == '+')) idx++;
-
-        /* read all digits */
-        while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
-
-        /* if we got a digit, then parse as float. if not, backtrack */
-        if (str[idx - 1] >= '0' && str[idx - 1] <= '9') {
-            is_float = 1;
-        }
-        else {
-            idx = e_start;
-        }
-    }
-
-    /* copy the section we determined to be a number */
-    numstr = PyString_FromStringAndSize(&str[start], idx - start);
-    if (numstr == NULL)
-        return NULL;
-    if (is_float) {
-        /* parse as a float using a fast path if available, otherwise call user defined method */
-        if (s->parse_float != (PyObject *)&PyFloat_Type) {
-            rval = PyObject_CallFunctionObjArgs(s->parse_float, numstr, NULL);
-        }
-        else {
-            rval = PyFloat_FromDouble(PyOS_ascii_atof(PyString_AS_STRING(numstr)));
-        }
-    }
-    else {
-        /* parse as an int using a fast path if available, otherwise call user defined method */
-        if (s->parse_int != (PyObject *)&PyInt_Type) {
-            rval = PyObject_CallFunctionObjArgs(s->parse_int, numstr, NULL);
-        }
-        else {
-            rval = PyInt_FromString(PyString_AS_STRING(numstr), NULL, 10);
-        }
-    }
-    Py_DECREF(numstr);
-    *next_idx_ptr = idx;
-    return rval;
-}
-
-static PyObject *
-_match_number_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t start, Py_ssize_t *next_idx_ptr) {
-    /* Read a JSON number from PyUnicode pystr.
-    idx is the index of the first character of the number
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the number.
-
-    Returns a new PyObject representation of that number:
-        PyInt, PyLong, or PyFloat.
-        May return other types if parse_int or parse_float are set
-    */
-    Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
-    Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
-    Py_ssize_t idx = start;
-    int is_float = 0;
-    PyObject *rval;
-    PyObject *numstr;
-
-    /* read a sign if it's there, make sure it's not the end of the string */
-    if (str[idx] == '-') {
-        idx++;
-        if (idx > end_idx) {
-            PyErr_SetNone(PyExc_StopIteration);
-            return NULL;
-        }
-    }
-
-    /* read as many integer digits as we find as long as it doesn't start with 0 */
-    if (str[idx] >= '1' && str[idx] <= '9') {
-        idx++;
-        while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
-    }
-    /* if it starts with 0 we only expect one integer digit */
-    else if (str[idx] == '0') {
-        idx++;
-    }
-    /* no integer digits, error */
-    else {
-        PyErr_SetNone(PyExc_StopIteration);
-        return NULL;
-    }
-
-    /* if the next char is '.' followed by a digit then read all float digits */
-    if (idx < end_idx && str[idx] == '.' && str[idx + 1] >= '0' && str[idx + 1] <= '9') {
-        is_float = 1;
-        idx += 2;
-        while (idx < end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
-    }
-
-    /* if the next char is 'e' or 'E' then maybe read the exponent (or backtrack) */
-    if (idx < end_idx && (str[idx] == 'e' || str[idx] == 'E')) {
-        Py_ssize_t e_start = idx;
-        idx++;
-
-        /* read an exponent sign if present */
-        if (idx < end_idx && (str[idx] == '-' || str[idx] == '+')) idx++;
-
-        /* read all digits */
-        while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
-
-        /* if we got a digit, then parse as float. if not, backtrack */
-        if (str[idx - 1] >= '0' && str[idx - 1] <= '9') {
-            is_float = 1;
-        }
-        else {
-            idx = e_start;
-        }
-    }
-
-    /* copy the section we determined to be a number */
-    numstr = PyUnicode_FromUnicode(&str[start], idx - start);
-    if (numstr == NULL)
-        return NULL;
-    if (is_float) {
-        /* parse as a float using a fast path if available, otherwise call user defined method */
-        if (s->parse_float != (PyObject *)&PyFloat_Type) {
-            rval = PyObject_CallFunctionObjArgs(s->parse_float, numstr, NULL);
-        }
-        else {
-            rval = PyFloat_FromString(numstr, NULL);
-        }
-    }
-    else {
-        /* no fast path for unicode -> int, just call */
-        rval = PyObject_CallFunctionObjArgs(s->parse_int, numstr, NULL);
-    }
-    Py_DECREF(numstr);
-    *next_idx_ptr = idx;
-    return rval;
-}
-
-static PyObject *
-scan_once_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr)
-{
-    /* Read one JSON term (of any kind) from PyString pystr.
-    idx is the index of the first character of the term
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the number.
-
-    Returns a new PyObject representation of the term.
-    */
-    char *str = PyString_AS_STRING(pystr);
-    Py_ssize_t length = PyString_GET_SIZE(pystr);
-    if (idx >= length) {
-        PyErr_SetNone(PyExc_StopIteration);
-        return NULL;
-    }
-    switch (str[idx]) {
-        case '"':
-            /* string */
-            return scanstring_str(pystr, idx + 1,
-                PyString_AS_STRING(s->encoding),
-                PyObject_IsTrue(s->strict),
-                next_idx_ptr);
-        case '{':
-            /* object */
-            return _parse_object_str(s, pystr, idx + 1, next_idx_ptr);
-        case '[':
-            /* array */
-            return _parse_array_str(s, pystr, idx + 1, next_idx_ptr);
-        case 'n':
-            /* null */
-            if ((idx + 3 < length) && str[idx + 1] == 'u' && str[idx + 2] == 'l' && str[idx + 3] == 'l') {
-                Py_INCREF(Py_None);
-                *next_idx_ptr = idx + 4;
-                return Py_None;
-            }
-            break;
-        case 't':
-            /* true */
-            if ((idx + 3 < length) && str[idx + 1] == 'r' && str[idx + 2] == 'u' && str[idx + 3] == 'e') {
-                Py_INCREF(Py_True);
-                *next_idx_ptr = idx + 4;
-                return Py_True;
-            }
-            break;
-        case 'f':
-            /* false */
-            if ((idx + 4 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'l' && str[idx + 3] == 's' && str[idx + 4] == 'e') {
-                Py_INCREF(Py_False);
-                *next_idx_ptr = idx + 5;
-                return Py_False;
-            }
-            break;
-        case 'N':
-            /* NaN */
-            if ((idx + 2 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'N') {
-                return _parse_constant(s, "NaN", idx, next_idx_ptr);
-            }
-            break;
-        case 'I':
-            /* Infinity */
-            if ((idx + 7 < length) && str[idx + 1] == 'n' && str[idx + 2] == 'f' && str[idx + 3] == 'i' && str[idx + 4] == 'n' && str[idx + 5] == 'i' && str[idx + 6] == 't' && str[idx + 7] == 'y') {
-                return _parse_constant(s, "Infinity", idx, next_idx_ptr);
-            }
-            break;
-        case '-':
-            /* -Infinity */
-            if ((idx + 8 < length) && str[idx + 1] == 'I' && str[idx + 2] == 'n' && str[idx + 3] == 'f' && str[idx + 4] == 'i' && str[idx + 5] == 'n' && str[idx + 6] == 'i' && str[idx + 7] == 't' && str[idx + 8] == 'y') {
-                return _parse_constant(s, "-Infinity", idx, next_idx_ptr);
-            }
-            break;
-    }
-    /* Didn't find a string, object, array, or named constant. Look for a number. */
-    return _match_number_str(s, pystr, idx, next_idx_ptr);
-}
-
-static PyObject *
-scan_once_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr)
-{
-    /* Read one JSON term (of any kind) from PyUnicode pystr.
-    idx is the index of the first character of the term
-    *next_idx_ptr is a return-by-reference index to the first character after
-        the number.
-
-    Returns a new PyObject representation of the term.
-    */
-    Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
-    Py_ssize_t length = PyUnicode_GET_SIZE(pystr);
-    if (idx >= length) {
-        PyErr_SetNone(PyExc_StopIteration);
-        return NULL;
-    }
-    switch (str[idx]) {
-        case '"':
-            /* string */
-            return scanstring_unicode(pystr, idx + 1,
-                PyObject_IsTrue(s->strict),
-                next_idx_ptr);
-        case '{':
-            /* object */
-            return _parse_object_unicode(s, pystr, idx + 1, next_idx_ptr);
-        case '[':
-            /* array */
-            return _parse_array_unicode(s, pystr, idx + 1, next_idx_ptr);
-        case 'n':
-            /* null */
-            if ((idx + 3 < length) && str[idx + 1] == 'u' && str[idx + 2] == 'l' && str[idx + 3] == 'l') {
-                Py_INCREF(Py_None);
-                *next_idx_ptr = idx + 4;
-                return Py_None;
-            }
-            break;
-        case 't':
-            /* true */
-            if ((idx + 3 < length) && str[idx + 1] == 'r' && str[idx + 2] == 'u' && str[idx + 3] == 'e') {
-                Py_INCREF(Py_True);
-                *next_idx_ptr = idx + 4;
-                return Py_True;
-            }
-            break;
-        case 'f':
-            /* false */
-            if ((idx + 4 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'l' && str[idx + 3] == 's' && str[idx + 4] == 'e') {
-                Py_INCREF(Py_False);
-                *next_idx_ptr = idx + 5;
-                return Py_False;
-            }
-            break;
-        case 'N':
-            /* NaN */
-            if ((idx + 2 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'N') {
-                return _parse_constant(s, "NaN", idx, next_idx_ptr);
-            }
-            break;
-        case 'I':
-            /* Infinity */
-            if ((idx + 7 < length) && str[idx + 1] == 'n' && str[idx + 2] == 'f' && str[idx + 3] == 'i' && str[idx + 4] == 'n' && str[idx + 5] == 'i' && str[idx + 6] == 't' && str[idx + 7] == 'y') {
-                return _parse_constant(s, "Infinity", idx, next_idx_ptr);
-            }
-            break;
-        case '-':
-            /* -Infinity */
-            if ((idx + 8 < length) && str[idx + 1] == 'I' && str[idx + 2] == 'n' && str[idx + 3] == 'f' && str[idx + 4] == 'i' && str[idx + 5] == 'n' && str[idx + 6] == 'i' && str[idx + 7] == 't' && str[idx + 8] == 'y') {
-                return _parse_constant(s, "-Infinity", idx, next_idx_ptr);
-            }
-            break;
-    }
-    /* Didn't find a string, object, array, or named constant. Look for a number. */
-    return _match_number_unicode(s, pystr, idx, next_idx_ptr);
-}
-
-static PyObject *
-scanner_call(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    /* Python callable interface to scan_once_{str,unicode} */
-    PyObject *pystr;
-    PyObject *rval;
-    Py_ssize_t idx;
-    Py_ssize_t next_idx = -1;
-    static char *kwlist[] = {"string", "idx", NULL};
-    PyScannerObject *s;
-    assert(PyScanner_Check(self));
-    s = (PyScannerObject *)self;
-    if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&:scan_once", kwlist, &pystr, _convertPyInt_AsSsize_t, &idx))
-        return NULL;
-
-    if (PyString_Check(pystr)) {
-        rval = scan_once_str(s, pystr, idx, &next_idx);
-    }
-    else if (PyUnicode_Check(pystr)) {
-        rval = scan_once_unicode(s, pystr, idx, &next_idx);
-    }
-    else {
-        PyErr_Format(PyExc_TypeError,
-                 "first argument must be a string, not %.80s",
-                 Py_TYPE(pystr)->tp_name);
-        return NULL;
-    }
-    return _build_rval_index_tuple(rval, next_idx);
-}
-
-static PyObject *
-scanner_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
-{
-    PyScannerObject *s;
-    s = (PyScannerObject *)type->tp_alloc(type, 0);
-    if (s != NULL) {
-        s->encoding = NULL;
-        s->strict = NULL;
-        s->object_hook = NULL;
-        s->parse_float = NULL;
-        s->parse_int = NULL;
-        s->parse_constant = NULL;
-    }
-    return (PyObject *)s;
-}
-
-static int
-scanner_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    /* Initialize Scanner object */
-    PyObject *ctx;
-    static char *kwlist[] = {"context", NULL};
-    PyScannerObject *s;
-
-    assert(PyScanner_Check(self));
-    s = (PyScannerObject *)self;
-
-    if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:make_scanner", kwlist, &ctx))
-        return -1;
-
-    /* PyString_AS_STRING is used on encoding */
-    s->encoding = PyObject_GetAttrString(ctx, "encoding");
-    if (s->encoding == Py_None) {
-        Py_DECREF(Py_None);
-        s->encoding = PyString_InternFromString(DEFAULT_ENCODING);
-    }
-    else if (PyUnicode_Check(s->encoding)) {
-        PyObject *tmp = PyUnicode_AsEncodedString(s->encoding, NULL, NULL);
-        Py_DECREF(s->encoding);
-        s->encoding = tmp;
-    }
-    if (s->encoding == NULL || !PyString_Check(s->encoding))
-        goto bail;
-
-    /* All of these will fail "gracefully" so we don't need to verify them */
-    s->strict = PyObject_GetAttrString(ctx, "strict");
-    if (s->strict == NULL)
-        goto bail;
-    s->object_hook = PyObject_GetAttrString(ctx, "object_hook");
-    if (s->object_hook == NULL)
-        goto bail;
-    s->parse_float = PyObject_GetAttrString(ctx, "parse_float");
-    if (s->parse_float == NULL)
-        goto bail;
-    s->parse_int = PyObject_GetAttrString(ctx, "parse_int");
-    if (s->parse_int == NULL)
-        goto bail;
-    s->parse_constant = PyObject_GetAttrString(ctx, "parse_constant");
-    if (s->parse_constant == NULL)
-        goto bail;
-
-    return 0;
-
-bail:
-    Py_CLEAR(s->encoding);
-    Py_CLEAR(s->strict);
-    Py_CLEAR(s->object_hook);
-    Py_CLEAR(s->parse_float);
-    Py_CLEAR(s->parse_int);
-    Py_CLEAR(s->parse_constant);
-    return -1;
-}
-
-PyDoc_STRVAR(scanner_doc, "JSON scanner object");
-
-static
-PyTypeObject PyScannerType = {
-    PyObject_HEAD_INIT(NULL)
-    0,                    /* tp_internal */
-    "simplejson._speedups.Scanner",       /* tp_name */
-    sizeof(PyScannerObject), /* tp_basicsize */
-    0,                    /* tp_itemsize */
-    scanner_dealloc, /* tp_dealloc */
-    0,                    /* tp_print */
-    0,                    /* tp_getattr */
-    0,                    /* tp_setattr */
-    0,                    /* tp_compare */
-    0,                    /* tp_repr */
-    0,                    /* tp_as_number */
-    0,                    /* tp_as_sequence */
-    0,                    /* tp_as_mapping */
-    0,                    /* tp_hash */
-    scanner_call,         /* tp_call */
-    0,                    /* tp_str */
-    0,/* PyObject_GenericGetAttr, */                    /* tp_getattro */
-    0,/* PyObject_GenericSetAttr, */                    /* tp_setattro */
-    0,                    /* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,   /* tp_flags */
-    scanner_doc,          /* tp_doc */
-    scanner_traverse,                    /* tp_traverse */
-    scanner_clear,                    /* tp_clear */
-    0,                    /* tp_richcompare */
-    0,                    /* tp_weaklistoffset */
-    0,                    /* tp_iter */
-    0,                    /* tp_iternext */
-    0,                    /* tp_methods */
-    scanner_members,                    /* tp_members */
-    0,                    /* tp_getset */
-    0,                    /* tp_base */
-    0,                    /* tp_dict */
-    0,                    /* tp_descr_get */
-    0,                    /* tp_descr_set */
-    0,                    /* tp_dictoffset */
-    scanner_init,                    /* tp_init */
-    0,/* PyType_GenericAlloc, */        /* tp_alloc */
-    scanner_new,          /* tp_new */
-    0,/* PyObject_GC_Del, */              /* tp_free */
-};
-
-static PyObject *
-encoder_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
-{
-    PyEncoderObject *s;
-    s = (PyEncoderObject *)type->tp_alloc(type, 0);
-    if (s != NULL) {
-        s->markers = NULL;
-        s->defaultfn = NULL;
-        s->encoder = NULL;
-        s->indent = NULL;
-        s->key_separator = NULL;
-        s->item_separator = NULL;
-        s->sort_keys = NULL;
-        s->skipkeys = NULL;
-    }
-    return (PyObject *)s;
-}
-
-static int
-encoder_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    /* initialize Encoder object */
-    static char *kwlist[] = {"markers", "default", "encoder", "indent", "key_separator", "item_separator", "sort_keys", "skipkeys", "allow_nan", NULL};
-
-    PyEncoderObject *s;
-    PyObject *allow_nan;
-
-    assert(PyEncoder_Check(self));
-    s = (PyEncoderObject *)self;
-
-    if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOOOOOOOO:make_encoder", kwlist,
-        &s->markers, &s->defaultfn, &s->encoder, &s->indent, &s->key_separator, &s->item_separator, &s->sort_keys, &s->skipkeys, &allow_nan))
-        return -1;
-
-    Py_INCREF(s->markers);
-    Py_INCREF(s->defaultfn);
-    Py_INCREF(s->encoder);
-    Py_INCREF(s->indent);
-    Py_INCREF(s->key_separator);
-    Py_INCREF(s->item_separator);
-    Py_INCREF(s->sort_keys);
-    Py_INCREF(s->skipkeys);
-    s->fast_encode = (PyCFunction_Check(s->encoder) && PyCFunction_GetFunction(s->encoder) == (PyCFunction)py_encode_basestring_ascii);
-    s->allow_nan = PyObject_IsTrue(allow_nan);
-    return 0;
-}
-
-static PyObject *
-encoder_call(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    /* Python callable interface to encode_listencode_obj */
-    static char *kwlist[] = {"obj", "_current_indent_level", NULL};
-    PyObject *obj;
-    PyObject *rval;
-    Py_ssize_t indent_level;
-    PyEncoderObject *s;
-    assert(PyEncoder_Check(self));
-    s = (PyEncoderObject *)self;
-    if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&:_iterencode", kwlist,
-        &obj, _convertPyInt_AsSsize_t, &indent_level))
-        return NULL;
-    rval = PyList_New(0);
-    if (rval == NULL)
-        return NULL;
-    if (encoder_listencode_obj(s, rval, obj, indent_level)) {
-        Py_DECREF(rval);
-        return NULL;
-    }
-    return rval;
-}
-
-static PyObject *
-_encoded_const(PyObject *obj)
-{
-    /* Return the JSON string representation of None, True, False */
-    if (obj == Py_None) {
-        static PyObject *s_null = NULL;
-        if (s_null == NULL) {
-            s_null = PyString_InternFromString("null");
-        }
-        Py_INCREF(s_null);
-        return s_null;
-    }
-    else if (obj == Py_True) {
-        static PyObject *s_true = NULL;
-        if (s_true == NULL) {
-            s_true = PyString_InternFromString("true");
-        }
-        Py_INCREF(s_true);
-        return s_true;
-    }
-    else if (obj == Py_False) {
-        static PyObject *s_false = NULL;
-        if (s_false == NULL) {
-            s_false = PyString_InternFromString("false");
-        }
-        Py_INCREF(s_false);
-        return s_false;
-    }
-    else {
-        PyErr_SetString(PyExc_ValueError, "not a const");
-        return NULL;
-    }
-}
-
-static PyObject *
-encoder_encode_float(PyEncoderObject *s, PyObject *obj)
-{
-    /* Return the JSON representation of a PyFloat */
-    double i = PyFloat_AS_DOUBLE(obj);
-    if (!Py_IS_FINITE(i)) {
-        if (!s->allow_nan) {
-            PyErr_SetString(PyExc_ValueError, "Out of range float values are not JSON compliant");
-            return NULL;
-        }
-        if (i > 0) {
-            return PyString_FromString("Infinity");
-        }
-        else if (i < 0) {
-            return PyString_FromString("-Infinity");
-        }
-        else {
-            return PyString_FromString("NaN");
-        }
-    }
-    /* Use a better float format here? */
-    return PyObject_Repr(obj);
-}
-
-static PyObject *
-encoder_encode_string(PyEncoderObject *s, PyObject *obj)
-{
-    /* Return the JSON representation of a string */
-    if (s->fast_encode)
-        return py_encode_basestring_ascii(NULL, obj);
-    else
-        return PyObject_CallFunctionObjArgs(s->encoder, obj, NULL);
-}
-
-static int
-_steal_list_append(PyObject *lst, PyObject *stolen)
-{
-    /* Append stolen and then decrement its reference count */
-    int rval = PyList_Append(lst, stolen);
-    Py_DECREF(stolen);
-    return rval;
-}
-
-static int
-encoder_listencode_obj(PyEncoderObject *s, PyObject *rval, PyObject *obj, Py_ssize_t indent_level)
-{
-    /* Encode Python object obj to a JSON term, rval is a PyList */
-    PyObject *newobj;
-    int rv;
-
-    if (obj == Py_None || obj == Py_True || obj == Py_False) {
-        PyObject *cstr = _encoded_const(obj);
-        if (cstr == NULL)
-            return -1;
-        return _steal_list_append(rval, cstr);
-    }
-    else if (PyString_Check(obj) || PyUnicode_Check(obj))
-    {
-        PyObject *encoded = encoder_encode_string(s, obj);
-        if (encoded == NULL)
-            return -1;
-        return _steal_list_append(rval, encoded);
-    }
-    else if (PyInt_Check(obj) || PyLong_Check(obj)) {
-        PyObject *encoded = PyObject_Str(obj);
-        if (encoded == NULL)
-            return -1;
-        return _steal_list_append(rval, encoded);
-    }
-    else if (PyFloat_Check(obj)) {
-        PyObject *encoded = encoder_encode_float(s, obj);
-        if (encoded == NULL)
-            return -1;
-        return _steal_list_append(rval, encoded);
-    }
-    else if (PyList_Check(obj) || PyTuple_Check(obj)) {
-        return encoder_listencode_list(s, rval, obj, indent_level);
-    }
-    else if (PyDict_Check(obj)) {
-        return encoder_listencode_dict(s, rval, obj, indent_level);
-    }
-    else {
-        PyObject *ident = NULL;
-        if (s->markers != Py_None) {
-            int has_key;
-            ident = PyLong_FromVoidPtr(obj);
-            if (ident == NULL)
-                return -1;
-            has_key = PyDict_Contains(s->markers, ident);
-            if (has_key) {
-                if (has_key != -1)
-                    PyErr_SetString(PyExc_ValueError, "Circular reference detected");
-                Py_DECREF(ident);
-                return -1;
-            }
-            if (PyDict_SetItem(s->markers, ident, obj)) {
-                Py_DECREF(ident);
-                return -1;
-            }
-        }
-        newobj = PyObject_CallFunctionObjArgs(s->defaultfn, obj, NULL);
-        if (newobj == NULL) {
-            Py_XDECREF(ident);
-            return -1;
-        }
-        rv = encoder_listencode_obj(s, rval, newobj, indent_level);
-        Py_DECREF(newobj);
-        if (rv) {
-            Py_XDECREF(ident);
-            return -1;
-        }
-        if (ident != NULL) {
-            if (PyDict_DelItem(s->markers, ident)) {
-                Py_XDECREF(ident);
-                return -1;
-            }
-            Py_XDECREF(ident);
-        }
-        return rv;
-    }
-}
-
-static int
-encoder_listencode_dict(PyEncoderObject *s, PyObject *rval, PyObject *dct, Py_ssize_t indent_level)
-{
-    /* Encode Python dict dct a JSON term, rval is a PyList */
-    static PyObject *open_dict = NULL;
-    static PyObject *close_dict = NULL;
-    static PyObject *empty_dict = NULL;
-    PyObject *kstr = NULL;
-    PyObject *ident = NULL;
-    PyObject *key, *value;
-    Py_ssize_t pos;
-    int skipkeys;
-    Py_ssize_t idx;
-
-    if (open_dict == NULL || close_dict == NULL || empty_dict == NULL) {
-        open_dict = PyString_InternFromString("{");
-        close_dict = PyString_InternFromString("}");
-        empty_dict = PyString_InternFromString("{}");
-        if (open_dict == NULL || close_dict == NULL || empty_dict == NULL)
-            return -1;
-    }
-    if (PyDict_Size(dct) == 0)
-        return PyList_Append(rval, empty_dict);
-
-    if (s->markers != Py_None) {
-        int has_key;
-        ident = PyLong_FromVoidPtr(dct);
-        if (ident == NULL)
-            goto bail;
-        has_key = PyDict_Contains(s->markers, ident);
-        if (has_key) {
-            if (has_key != -1)
-                PyErr_SetString(PyExc_ValueError, "Circular reference detected");
-            goto bail;
-        }
-        if (PyDict_SetItem(s->markers, ident, dct)) {
-            goto bail;
-        }
-    }
-
-    if (PyList_Append(rval, open_dict))
-        goto bail;
-
-    if (s->indent != Py_None) {
-        /* TODO: DOES NOT RUN */
-        indent_level += 1;
-        /*
-            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
-            separator = _item_separator + newline_indent
-            buf += newline_indent
-        */
-    }
-
-    /* TODO: C speedup not implemented for sort_keys */
-
-    pos = 0;
-    skipkeys = PyObject_IsTrue(s->skipkeys);
-    idx = 0;
-    while (PyDict_Next(dct, &pos, &key, &value)) {
-        PyObject *encoded;
-
-        if (PyString_Check(key) || PyUnicode_Check(key)) {
-            Py_INCREF(key);
-            kstr = key;
-        }
-        else if (PyFloat_Check(key)) {
-            kstr = encoder_encode_float(s, key);
-            if (kstr == NULL)
-                goto bail;
-        }
-        else if (PyInt_Check(key) || PyLong_Check(key)) {
-            kstr = PyObject_Str(key);
-            if (kstr == NULL)
-                goto bail;
-        }
-        else if (key == Py_True || key == Py_False || key == Py_None) {
-            kstr = _encoded_const(key);
-            if (kstr == NULL)
-                goto bail;
-        }
-        else if (skipkeys) {
-            continue;
-        }
-        else {
-            /* TODO: include repr of key */
-            PyErr_SetString(PyExc_ValueError, "keys must be a string");
-            goto bail;
-        }
-
-        if (idx) {
-            if (PyList_Append(rval, s->item_separator))
-                goto bail;
-        }
-
-        encoded = encoder_encode_string(s, kstr);
-        Py_CLEAR(kstr);
-        if (encoded == NULL)
-            goto bail;
-        if (PyList_Append(rval, encoded)) {
-            Py_DECREF(encoded);
-            goto bail;
-        }
-        Py_DECREF(encoded);
-        if (PyList_Append(rval, s->key_separator))
-            goto bail;
-        if (encoder_listencode_obj(s, rval, value, indent_level))
-            goto bail;
-        idx += 1;
-    }
-    if (ident != NULL) {
-        if (PyDict_DelItem(s->markers, ident))
-            goto bail;
-        Py_CLEAR(ident);
-    }
-    if (s->indent != Py_None) {
-        /* TODO: DOES NOT RUN */
-        indent_level -= 1;
-        /*
-            yield '\n' + (' ' * (_indent * _current_indent_level))
-        */
-    }
-    if (PyList_Append(rval, close_dict))
-        goto bail;
-    return 0;
-
-bail:
-    Py_XDECREF(kstr);
-    Py_XDECREF(ident);
-    return -1;
-}
-
-
-static int
-encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ssize_t indent_level)
-{
-    /* Encode Python list seq to a JSON term, rval is a PyList */
-    static PyObject *open_array = NULL;
-    static PyObject *close_array = NULL;
-    static PyObject *empty_array = NULL;
-    PyObject *ident = NULL;
-    PyObject *s_fast = NULL;
-    Py_ssize_t num_items;
-    PyObject **seq_items;
-    Py_ssize_t i;
-
-    if (open_array == NULL || close_array == NULL || empty_array == NULL) {
-        open_array = PyString_InternFromString("[");
-        close_array = PyString_InternFromString("]");
-        empty_array = PyString_InternFromString("[]");
-        if (open_array == NULL || close_array == NULL || empty_array == NULL)
-            return -1;
-    }
-    ident = NULL;
-    s_fast = PySequence_Fast(seq, "_iterencode_list needs a sequence");
-    if (s_fast == NULL)
-        return -1;
-    num_items = PySequence_Fast_GET_SIZE(s_fast);
-    if (num_items == 0) {
-        Py_DECREF(s_fast);
-        return PyList_Append(rval, empty_array);
-    }
-
-    if (s->markers != Py_None) {
-        int has_key;
-        ident = PyLong_FromVoidPtr(seq);
-        if (ident == NULL)
-            goto bail;
-        has_key = PyDict_Contains(s->markers, ident);
-        if (has_key) {
-            if (has_key != -1)
-                PyErr_SetString(PyExc_ValueError, "Circular reference detected");
-            goto bail;
-        }
-        if (PyDict_SetItem(s->markers, ident, seq)) {
-            goto bail;
-        }
-    }
-
-    seq_items = PySequence_Fast_ITEMS(s_fast);
-    if (PyList_Append(rval, open_array))
-        goto bail;
-    if (s->indent != Py_None) {
-        /* TODO: DOES NOT RUN */
-        indent_level += 1;
-        /*
-            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
-            separator = _item_separator + newline_indent
-            buf += newline_indent
-        */
-    }
-    for (i = 0; i < num_items; i++) {
-        PyObject *obj = seq_items[i];
-        if (i) {
-            if (PyList_Append(rval, s->item_separator))
-                goto bail;
-        }
-        if (encoder_listencode_obj(s, rval, obj, indent_level))
-            goto bail;
-    }
-    if (ident != NULL) {
-        if (PyDict_DelItem(s->markers, ident))
-            goto bail;
-        Py_CLEAR(ident);
-    }
-    if (s->indent != Py_None) {
-        /* TODO: DOES NOT RUN */
-        indent_level -= 1;
-        /*
-            yield '\n' + (' ' * (_indent * _current_indent_level))
-        */
-    }
-    if (PyList_Append(rval, close_array))
-        goto bail;
-    Py_DECREF(s_fast);
-    return 0;
-
-bail:
-    Py_XDECREF(ident);
-    Py_DECREF(s_fast);
-    return -1;
-}
-
-static void
-encoder_dealloc(PyObject *self)
-{
-    /* Deallocate Encoder */
-    encoder_clear(self);
-    Py_TYPE(self)->tp_free(self);
-}
-
-static int
-encoder_traverse(PyObject *self, visitproc visit, void *arg)
-{
-    PyEncoderObject *s;
-    assert(PyEncoder_Check(self));
-    s = (PyEncoderObject *)self;
-    Py_VISIT(s->markers);
-    Py_VISIT(s->defaultfn);
-    Py_VISIT(s->encoder);
-    Py_VISIT(s->indent);
-    Py_VISIT(s->key_separator);
-    Py_VISIT(s->item_separator);
-    Py_VISIT(s->sort_keys);
-    Py_VISIT(s->skipkeys);
-    return 0;
-}
-
-static int
-encoder_clear(PyObject *self)
-{
-    /* Deallocate Encoder */
-    PyEncoderObject *s;
-    assert(PyEncoder_Check(self));
-    s = (PyEncoderObject *)self;
-    Py_CLEAR(s->markers);
-    Py_CLEAR(s->defaultfn);
-    Py_CLEAR(s->encoder);
-    Py_CLEAR(s->indent);
-    Py_CLEAR(s->key_separator);
-    Py_CLEAR(s->item_separator);
-    Py_CLEAR(s->sort_keys);
-    Py_CLEAR(s->skipkeys);
-    return 0;
-}
-
-PyDoc_STRVAR(encoder_doc, "_iterencode(obj, _current_indent_level) -> iterable");
-
-static
-PyTypeObject PyEncoderType = {
-    PyObject_HEAD_INIT(NULL)
-    0,                    /* tp_internal */
-    "simplejson._speedups.Encoder",       /* tp_name */
-    sizeof(PyEncoderObject), /* tp_basicsize */
-    0,                    /* tp_itemsize */
-    encoder_dealloc, /* tp_dealloc */
-    0,                    /* tp_print */
-    0,                    /* tp_getattr */
-    0,                    /* tp_setattr */
-    0,                    /* tp_compare */
-    0,                    /* tp_repr */
-    0,                    /* tp_as_number */
-    0,                    /* tp_as_sequence */
-    0,                    /* tp_as_mapping */
-    0,                    /* tp_hash */
-    encoder_call,         /* tp_call */
-    0,                    /* tp_str */
-    0,                    /* tp_getattro */
-    0,                    /* tp_setattro */
-    0,                    /* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,   /* tp_flags */
-    encoder_doc,          /* tp_doc */
-    encoder_traverse,     /* tp_traverse */
-    encoder_clear,        /* tp_clear */
-    0,                    /* tp_richcompare */
-    0,                    /* tp_weaklistoffset */
-    0,                    /* tp_iter */
-    0,                    /* tp_iternext */
-    0,                    /* tp_methods */
-    encoder_members,      /* tp_members */
-    0,                    /* tp_getset */
-    0,                    /* tp_base */
-    0,                    /* tp_dict */
-    0,                    /* tp_descr_get */
-    0,                    /* tp_descr_set */
-    0,                    /* tp_dictoffset */
-    encoder_init,         /* tp_init */
-    0,                    /* tp_alloc */
-    encoder_new,          /* tp_new */
-    0,                    /* tp_free */
-};
-
-static PyMethodDef speedups_methods[] = {
-    {"encode_basestring_ascii",
-        (PyCFunction)py_encode_basestring_ascii,
-        METH_O,
-        pydoc_encode_basestring_ascii},
-    {"scanstring",
-        (PyCFunction)py_scanstring,
-        METH_VARARGS,
-        pydoc_scanstring},
-    {NULL, NULL, 0, NULL}
-};
-
-PyDoc_STRVAR(module_doc,
-"simplejson speedups\n");
-
-void
-init_speedups(void)
-{
-    PyObject *m;
-    PyScannerType.tp_new = PyType_GenericNew;
-    if (PyType_Ready(&PyScannerType) < 0)
-        return;
-    PyEncoderType.tp_new = PyType_GenericNew;
-    if (PyType_Ready(&PyEncoderType) < 0)
-        return;
-    m = Py_InitModule3("_speedups", speedups_methods, module_doc);
-    Py_INCREF((PyObject*)&PyScannerType);
-    PyModule_AddObject(m, "make_scanner", (PyObject*)&PyScannerType);
-    Py_INCREF((PyObject*)&PyEncoderType);
-    PyModule_AddObject(m, "make_encoder", (PyObject*)&PyEncoderType);
-}
diff --git a/lib/simplejson/decoder.py b/lib/simplejson/decoder.py
deleted file mode 100644
index b769ea486ca932cd83b1689ef8e055ae2658aa72..0000000000000000000000000000000000000000
--- a/lib/simplejson/decoder.py
+++ /dev/null
@@ -1,354 +0,0 @@
-"""Implementation of JSONDecoder
-"""
-import re
-import sys
-import struct
-
-from simplejson.scanner import make_scanner
-try:
-    from simplejson._speedups import scanstring as c_scanstring
-except ImportError:
-    c_scanstring = None
-
-__all__ = ['JSONDecoder']
-
-FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
-
-def _floatconstants():
-    _BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
-    if sys.byteorder != 'big':
-        _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
-    nan, inf = struct.unpack('dd', _BYTES)
-    return nan, inf, -inf
-
-NaN, PosInf, NegInf = _floatconstants()
-
-
-def linecol(doc, pos):
-    lineno = doc.count('\n', 0, pos) + 1
-    if lineno == 1:
-        colno = pos
-    else:
-        colno = pos - doc.rindex('\n', 0, pos)
-    return lineno, colno
-
-
-def errmsg(msg, doc, pos, end=None):
-    # Note that this function is called from _speedups
-    lineno, colno = linecol(doc, pos)
-    if end is None:
-        #fmt = '{0}: line {1} column {2} (char {3})'
-        #return fmt.format(msg, lineno, colno, pos)
-        fmt = '%s: line %d column %d (char %d)'
-        return fmt % (msg, lineno, colno, pos)
-    endlineno, endcolno = linecol(doc, end)
-    #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
-    #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
-    fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
-    return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
-
-
-_CONSTANTS = {
-    '-Infinity': NegInf,
-    'Infinity': PosInf,
-    'NaN': NaN,
-}
-
-STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
-BACKSLASH = {
-    '"': u'"', '\\': u'\\', '/': u'/',
-    'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
-}
-
-DEFAULT_ENCODING = "utf-8"
-
-def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
-    """Scan the string s for a JSON string. End is the index of the
-    character in s after the quote that started the JSON string.
-    Unescapes all valid JSON string escape sequences and raises ValueError
-    on attempt to decode an invalid string. If strict is False then literal
-    control characters are allowed in the string.
-    
-    Returns a tuple of the decoded string and the index of the character in s
-    after the end quote."""
-    if encoding is None:
-        encoding = DEFAULT_ENCODING
-    chunks = []
-    _append = chunks.append
-    begin = end - 1
-    while 1:
-        chunk = _m(s, end)
-        if chunk is None:
-            raise ValueError(
-                errmsg("Unterminated string starting at", s, begin))
-        end = chunk.end()
-        content, terminator = chunk.groups()
-        # Content is contains zero or more unescaped string characters
-        if content:
-            if not isinstance(content, unicode):
-                content = unicode(content, encoding)
-            _append(content)
-        # Terminator is the end of string, a literal control character,
-        # or a backslash denoting that an escape sequence follows
-        if terminator == '"':
-            break
-        elif terminator != '\\':
-            if strict:
-                msg = "Invalid control character %r at" % (terminator,)
-                #msg = "Invalid control character {0!r} at".format(terminator)
-                raise ValueError(errmsg(msg, s, end))
-            else:
-                _append(terminator)
-                continue
-        try:
-            esc = s[end]
-        except IndexError:
-            raise ValueError(
-                errmsg("Unterminated string starting at", s, begin))
-        # If not a unicode escape sequence, must be in the lookup table
-        if esc != 'u':
-            try:
-                char = _b[esc]
-            except KeyError:
-                msg = "Invalid \\escape: " + repr(esc)
-                raise ValueError(errmsg(msg, s, end))
-            end += 1
-        else:
-            # Unicode escape sequence
-            esc = s[end + 1:end + 5]
-            next_end = end + 5
-            if len(esc) != 4:
-                msg = "Invalid \\uXXXX escape"
-                raise ValueError(errmsg(msg, s, end))
-            uni = int(esc, 16)
-            # Check for surrogate pair on UCS-4 systems
-            if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
-                msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
-                if not s[end + 5:end + 7] == '\\u':
-                    raise ValueError(errmsg(msg, s, end))
-                esc2 = s[end + 7:end + 11]
-                if len(esc2) != 4:
-                    raise ValueError(errmsg(msg, s, end))
-                uni2 = int(esc2, 16)
-                uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
-                next_end += 6
-            char = unichr(uni)
-            end = next_end
-        # Append the unescaped character
-        _append(char)
-    return u''.join(chunks), end
-
-
-# Use speedup if available
-scanstring = c_scanstring or py_scanstring
-
-WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
-WHITESPACE_STR = ' \t\n\r'
-
-def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
-    pairs = {}
-    # Use a slice to prevent IndexError from being raised, the following
-    # check will raise a more specific ValueError if the string is empty
-    nextchar = s[end:end + 1]
-    # Normally we expect nextchar == '"'
-    if nextchar != '"':
-        if nextchar in _ws:
-            end = _w(s, end).end()
-            nextchar = s[end:end + 1]
-        # Trivial empty object
-        if nextchar == '}':
-            return pairs, end + 1
-        elif nextchar != '"':
-            raise ValueError(errmsg("Expecting property name", s, end))
-    end += 1
-    while True:
-        key, end = scanstring(s, end, encoding, strict)
-
-        # To skip some function call overhead we optimize the fast paths where
-        # the JSON key separator is ": " or just ":".
-        if s[end:end + 1] != ':':
-            end = _w(s, end).end()
-            if s[end:end + 1] != ':':
-                raise ValueError(errmsg("Expecting : delimiter", s, end))
-
-        end += 1
-
-        try:
-            if s[end] in _ws:
-                end += 1
-                if s[end] in _ws:
-                    end = _w(s, end + 1).end()
-        except IndexError:
-            pass
-
-        try:
-            value, end = scan_once(s, end)
-        except StopIteration:
-            raise ValueError(errmsg("Expecting object", s, end))
-        pairs[key] = value
-
-        try:
-            nextchar = s[end]
-            if nextchar in _ws:
-                end = _w(s, end + 1).end()
-                nextchar = s[end]
-        except IndexError:
-            nextchar = ''
-        end += 1
-
-        if nextchar == '}':
-            break
-        elif nextchar != ',':
-            raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
-
-        try:
-            nextchar = s[end]
-            if nextchar in _ws:
-                end += 1
-                nextchar = s[end]
-                if nextchar in _ws:
-                    end = _w(s, end + 1).end()
-                    nextchar = s[end]
-        except IndexError:
-            nextchar = ''
-
-        end += 1
-        if nextchar != '"':
-            raise ValueError(errmsg("Expecting property name", s, end - 1))
-
-    if object_hook is not None:
-        pairs = object_hook(pairs)
-    return pairs, end
-
-def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
-    values = []
-    nextchar = s[end:end + 1]
-    if nextchar in _ws:
-        end = _w(s, end + 1).end()
-        nextchar = s[end:end + 1]
-    # Look-ahead for trivial empty array
-    if nextchar == ']':
-        return values, end + 1
-    _append = values.append
-    while True:
-        try:
-            value, end = scan_once(s, end)
-        except StopIteration:
-            raise ValueError(errmsg("Expecting object", s, end))
-        _append(value)
-        nextchar = s[end:end + 1]
-        if nextchar in _ws:
-            end = _w(s, end + 1).end()
-            nextchar = s[end:end + 1]
-        end += 1
-        if nextchar == ']':
-            break
-        elif nextchar != ',':
-            raise ValueError(errmsg("Expecting , delimiter", s, end))
-
-        try:
-            if s[end] in _ws:
-                end += 1
-                if s[end] in _ws:
-                    end = _w(s, end + 1).end()
-        except IndexError:
-            pass
-
-    return values, end
-
-class JSONDecoder(object):
-    """Simple JSON <http://json.org> decoder
-
-    Performs the following translations in decoding by default:
-
-    +---------------+-------------------+
-    | JSON          | Python            |
-    +===============+===================+
-    | object        | dict              |
-    +---------------+-------------------+
-    | array         | list              |
-    +---------------+-------------------+
-    | string        | unicode           |
-    +---------------+-------------------+
-    | number (int)  | int, long         |
-    +---------------+-------------------+
-    | number (real) | float             |
-    +---------------+-------------------+
-    | true          | True              |
-    +---------------+-------------------+
-    | false         | False             |
-    +---------------+-------------------+
-    | null          | None              |
-    +---------------+-------------------+
-
-    It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
-    their corresponding ``float`` values, which is outside the JSON spec.
-
-    """
-
-    def __init__(self, encoding=None, object_hook=None, parse_float=None,
-            parse_int=None, parse_constant=None, strict=True):
-        """``encoding`` determines the encoding used to interpret any ``str``
-        objects decoded by this instance (utf-8 by default).  It has no
-        effect when decoding ``unicode`` objects.
-
-        Note that currently only encodings that are a superset of ASCII work,
-        strings of other encodings should be passed in as ``unicode``.
-
-        ``object_hook``, if specified, will be called with the result
-        of every JSON object decoded and its return value will be used in
-        place of the given ``dict``.  This can be used to provide custom
-        deserializations (e.g. to support JSON-RPC class hinting).
-
-        ``parse_float``, if specified, will be called with the string
-        of every JSON float to be decoded. By default this is equivalent to
-        float(num_str). This can be used to use another datatype or parser
-        for JSON floats (e.g. decimal.Decimal).
-
-        ``parse_int``, if specified, will be called with the string
-        of every JSON int to be decoded. By default this is equivalent to
-        int(num_str). This can be used to use another datatype or parser
-        for JSON integers (e.g. float).
-
-        ``parse_constant``, if specified, will be called with one of the
-        following strings: -Infinity, Infinity, NaN.
-        This can be used to raise an exception if invalid JSON numbers
-        are encountered.
-
-        """
-        self.encoding = encoding
-        self.object_hook = object_hook
-        self.parse_float = parse_float or float
-        self.parse_int = parse_int or int
-        self.parse_constant = parse_constant or _CONSTANTS.__getitem__
-        self.strict = strict
-        self.parse_object = JSONObject
-        self.parse_array = JSONArray
-        self.parse_string = scanstring
-        self.scan_once = make_scanner(self)
-
-    def decode(self, s, _w=WHITESPACE.match):
-        """Return the Python representation of ``s`` (a ``str`` or ``unicode``
-        instance containing a JSON document)
-
-        """
-        obj, end = self.raw_decode(s, idx=_w(s, 0).end())
-        end = _w(s, end).end()
-        if end != len(s):
-            raise ValueError(errmsg("Extra data", s, end, len(s)))
-        return obj
-
-    def raw_decode(self, s, idx=0):
-        """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
-        with a JSON document) and return a 2-tuple of the Python
-        representation and the index in ``s`` where the document ended.
-
-        This can be used to decode a JSON document from a string that may
-        have extraneous data at the end.
-
-        """
-        try:
-            obj, end = self.scan_once(s, idx)
-        except StopIteration:
-            raise ValueError("No JSON object could be decoded")
-        return obj, end
diff --git a/lib/simplejson/encoder.py b/lib/simplejson/encoder.py
deleted file mode 100644
index cf58290366b4e33351a73123c391eabc8ebc45fa..0000000000000000000000000000000000000000
--- a/lib/simplejson/encoder.py
+++ /dev/null
@@ -1,440 +0,0 @@
-"""Implementation of JSONEncoder
-"""
-import re
-
-try:
-    from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
-except ImportError:
-    c_encode_basestring_ascii = None
-try:
-    from simplejson._speedups import make_encoder as c_make_encoder
-except ImportError:
-    c_make_encoder = None
-
-ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
-ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
-HAS_UTF8 = re.compile(r'[\x80-\xff]')
-ESCAPE_DCT = {
-    '\\': '\\\\',
-    '"': '\\"',
-    '\b': '\\b',
-    '\f': '\\f',
-    '\n': '\\n',
-    '\r': '\\r',
-    '\t': '\\t',
-}
-for i in range(0x20):
-    #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
-    ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
-
-# Assume this produces an infinity on all machines (probably not guaranteed)
-INFINITY = float('1e66666')
-FLOAT_REPR = repr
-
-def encode_basestring(s):
-    """Return a JSON representation of a Python string
-
-    """
-    def replace(match):
-        return ESCAPE_DCT[match.group(0)]
-    return '"' + ESCAPE.sub(replace, s) + '"'
-
-
-def py_encode_basestring_ascii(s):
-    """Return an ASCII-only JSON representation of a Python string
-
-    """
-    if isinstance(s, str) and HAS_UTF8.search(s) is not None:
-        s = s.decode('utf-8')
-    def replace(match):
-        s = match.group(0)
-        try:
-            return ESCAPE_DCT[s]
-        except KeyError:
-            n = ord(s)
-            if n < 0x10000:
-                #return '\\u{0:04x}'.format(n)
-                return '\\u%04x' % (n,)
-            else:
-                # surrogate pair
-                n -= 0x10000
-                s1 = 0xd800 | ((n >> 10) & 0x3ff)
-                s2 = 0xdc00 | (n & 0x3ff)
-                #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
-                return '\\u%04x\\u%04x' % (s1, s2)
-    return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
-
-
-encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
-
-class JSONEncoder(object):
-    """Extensible JSON <http://json.org> encoder for Python data structures.
-
-    Supports the following objects and types by default:
-
-    +-------------------+---------------+
-    | Python            | JSON          |
-    +===================+===============+
-    | dict              | object        |
-    +-------------------+---------------+
-    | list, tuple       | array         |
-    +-------------------+---------------+
-    | str, unicode      | string        |
-    +-------------------+---------------+
-    | int, long, float  | number        |
-    +-------------------+---------------+
-    | True              | true          |
-    +-------------------+---------------+
-    | False             | false         |
-    +-------------------+---------------+
-    | None              | null          |
-    +-------------------+---------------+
-
-    To extend this to recognize other objects, subclass and implement a
-    ``.default()`` method with another method that returns a serializable
-    object for ``o`` if possible, otherwise it should call the superclass
-    implementation (to raise ``TypeError``).
-
-    """
-    item_separator = ', '
-    key_separator = ': '
-    def __init__(self, skipkeys=False, ensure_ascii=True,
-            check_circular=True, allow_nan=True, sort_keys=False,
-            indent=None, separators=None, encoding='utf-8', default=None):
-        """Constructor for JSONEncoder, with sensible defaults.
-
-        If skipkeys is false, then it is a TypeError to attempt
-        encoding of keys that are not str, int, long, float or None.  If
-        skipkeys is True, such items are simply skipped.
-
-        If ensure_ascii is true, the output is guaranteed to be str
-        objects with all incoming unicode characters escaped.  If
-        ensure_ascii is false, the output will be unicode object.
-
-        If check_circular is true, then lists, dicts, and custom encoded
-        objects will be checked for circular references during encoding to
-        prevent an infinite recursion (which would cause an OverflowError).
-        Otherwise, no such check takes place.
-
-        If allow_nan is true, then NaN, Infinity, and -Infinity will be
-        encoded as such.  This behavior is not JSON specification compliant,
-        but is consistent with most JavaScript based encoders and decoders.
-        Otherwise, it will be a ValueError to encode such floats.
-
-        If sort_keys is true, then the output of dictionaries will be
-        sorted by key; this is useful for regression tests to ensure
-        that JSON serializations can be compared on a day-to-day basis.
-
-        If indent is a non-negative integer, then JSON array
-        elements and object members will be pretty-printed with that
-        indent level.  An indent level of 0 will only insert newlines.
-        None is the most compact representation.
-
-        If specified, separators should be a (item_separator, key_separator)
-        tuple.  The default is (', ', ': ').  To get the most compact JSON
-        representation you should specify (',', ':') to eliminate whitespace.
-
-        If specified, default is a function that gets called for objects
-        that can't otherwise be serialized.  It should return a JSON encodable
-        version of the object or raise a ``TypeError``.
-
-        If encoding is not None, then all input strings will be
-        transformed into unicode using that encoding prior to JSON-encoding.
-        The default is UTF-8.
-
-        """
-
-        self.skipkeys = skipkeys
-        self.ensure_ascii = ensure_ascii
-        self.check_circular = check_circular
-        self.allow_nan = allow_nan
-        self.sort_keys = sort_keys
-        self.indent = indent
-        if separators is not None:
-            self.item_separator, self.key_separator = separators
-        if default is not None:
-            self.default = default
-        self.encoding = encoding
-
-    def default(self, o):
-        """Implement this method in a subclass such that it returns
-        a serializable object for ``o``, or calls the base implementation
-        (to raise a ``TypeError``).
-
-        For example, to support arbitrary iterators, you could
-        implement default like this::
-
-            def default(self, o):
-                try:
-                    iterable = iter(o)
-                except TypeError:
-                    pass
-                else:
-                    return list(iterable)
-                return JSONEncoder.default(self, o)
-
-        """
-        raise TypeError(repr(o) + " is not JSON serializable")
-
-    def encode(self, o):
-        """Return a JSON string representation of a Python data structure.
-
-        >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
-        '{"foo": ["bar", "baz"]}'
-
-        """
-        # This is for extremely simple cases and benchmarks.
-        if isinstance(o, basestring):
-            if isinstance(o, str):
-                _encoding = self.encoding
-                if (_encoding is not None
-                        and not (_encoding == 'utf-8')):
-                    o = o.decode(_encoding)
-            if self.ensure_ascii:
-                return encode_basestring_ascii(o)
-            else:
-                return encode_basestring(o)
-        # This doesn't pass the iterator directly to ''.join() because the
-        # exceptions aren't as detailed.  The list call should be roughly
-        # equivalent to the PySequence_Fast that ''.join() would do.
-        chunks = self.iterencode(o, _one_shot=True)
-        if not isinstance(chunks, (list, tuple)):
-            chunks = list(chunks)
-        return ''.join(chunks)
-
-    def iterencode(self, o, _one_shot=False):
-        """Encode the given object and yield each string
-        representation as available.
-
-        For example::
-
-            for chunk in JSONEncoder().iterencode(bigobject):
-                mysocket.write(chunk)
-
-        """
-        if self.check_circular:
-            markers = {}
-        else:
-            markers = None
-        if self.ensure_ascii:
-            _encoder = encode_basestring_ascii
-        else:
-            _encoder = encode_basestring
-        if self.encoding != 'utf-8':
-            def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
-                if isinstance(o, str):
-                    o = o.decode(_encoding)
-                return _orig_encoder(o)
-
-        def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
-            # Check for specials.  Note that this type of test is processor- and/or
-            # platform-specific, so do tests which don't depend on the internals.
-
-            if o != o:
-                text = 'NaN'
-            elif o == _inf:
-                text = 'Infinity'
-            elif o == _neginf:
-                text = '-Infinity'
-            else:
-                return _repr(o)
-
-            if not allow_nan:
-                raise ValueError(
-                    "Out of range float values are not JSON compliant: " +
-                    repr(o))
-
-            return text
-
-
-        if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
-            _iterencode = c_make_encoder(
-                markers, self.default, _encoder, self.indent,
-                self.key_separator, self.item_separator, self.sort_keys,
-                self.skipkeys, self.allow_nan)
-        else:
-            _iterencode = _make_iterencode(
-                markers, self.default, _encoder, self.indent, floatstr,
-                self.key_separator, self.item_separator, self.sort_keys,
-                self.skipkeys, _one_shot)
-        return _iterencode(o, 0)
-
-def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
-        ## HACK: hand-optimized bytecode; turn globals into locals
-        False=False,
-        True=True,
-        ValueError=ValueError,
-        basestring=basestring,
-        dict=dict,
-        float=float,
-        id=id,
-        int=int,
-        isinstance=isinstance,
-        list=list,
-        long=long,
-        str=str,
-        tuple=tuple,
-    ):
-
-    def _iterencode_list(lst, _current_indent_level):
-        if not lst:
-            yield '[]'
-            return
-        if markers is not None:
-            markerid = id(lst)
-            if markerid in markers:
-                raise ValueError("Circular reference detected")
-            markers[markerid] = lst
-        buf = '['
-        if _indent is not None:
-            _current_indent_level += 1
-            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
-            separator = _item_separator + newline_indent
-            buf += newline_indent
-        else:
-            newline_indent = None
-            separator = _item_separator
-        first = True
-        for value in lst:
-            if first:
-                first = False
-            else:
-                buf = separator
-            if isinstance(value, basestring):
-                yield buf + _encoder(value)
-            elif value is None:
-                yield buf + 'null'
-            elif value is True:
-                yield buf + 'true'
-            elif value is False:
-                yield buf + 'false'
-            elif isinstance(value, (int, long)):
-                yield buf + str(value)
-            elif isinstance(value, float):
-                yield buf + _floatstr(value)
-            else:
-                yield buf
-                if isinstance(value, (list, tuple)):
-                    chunks = _iterencode_list(value, _current_indent_level)
-                elif isinstance(value, dict):
-                    chunks = _iterencode_dict(value, _current_indent_level)
-                else:
-                    chunks = _iterencode(value, _current_indent_level)
-                for chunk in chunks:
-                    yield chunk
-        if newline_indent is not None:
-            _current_indent_level -= 1
-            yield '\n' + (' ' * (_indent * _current_indent_level))
-        yield ']'
-        if markers is not None:
-            del markers[markerid]
-
-    def _iterencode_dict(dct, _current_indent_level):
-        if not dct:
-            yield '{}'
-            return
-        if markers is not None:
-            markerid = id(dct)
-            if markerid in markers:
-                raise ValueError("Circular reference detected")
-            markers[markerid] = dct
-        yield '{'
-        if _indent is not None:
-            _current_indent_level += 1
-            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
-            item_separator = _item_separator + newline_indent
-            yield newline_indent
-        else:
-            newline_indent = None
-            item_separator = _item_separator
-        first = True
-        if _sort_keys:
-            items = dct.items()
-            items.sort(key=lambda kv: kv[0])
-        else:
-            items = dct.iteritems()
-        for key, value in items:
-            if isinstance(key, basestring):
-                pass
-            # JavaScript is weakly typed for these, so it makes sense to
-            # also allow them.  Many encoders seem to do something like this.
-            elif isinstance(key, float):
-                key = _floatstr(key)
-            elif key is True:
-                key = 'true'
-            elif key is False:
-                key = 'false'
-            elif key is None:
-                key = 'null'
-            elif isinstance(key, (int, long)):
-                key = str(key)
-            elif _skipkeys:
-                continue
-            else:
-                raise TypeError("key " + repr(key) + " is not a string")
-            if first:
-                first = False
-            else:
-                yield item_separator
-            yield _encoder(key)
-            yield _key_separator
-            if isinstance(value, basestring):
-                yield _encoder(value)
-            elif value is None:
-                yield 'null'
-            elif value is True:
-                yield 'true'
-            elif value is False:
-                yield 'false'
-            elif isinstance(value, (int, long)):
-                yield str(value)
-            elif isinstance(value, float):
-                yield _floatstr(value)
-            else:
-                if isinstance(value, (list, tuple)):
-                    chunks = _iterencode_list(value, _current_indent_level)
-                elif isinstance(value, dict):
-                    chunks = _iterencode_dict(value, _current_indent_level)
-                else:
-                    chunks = _iterencode(value, _current_indent_level)
-                for chunk in chunks:
-                    yield chunk
-        if newline_indent is not None:
-            _current_indent_level -= 1
-            yield '\n' + (' ' * (_indent * _current_indent_level))
-        yield '}'
-        if markers is not None:
-            del markers[markerid]
-
-    def _iterencode(o, _current_indent_level):
-        if isinstance(o, basestring):
-            yield _encoder(o)
-        elif o is None:
-            yield 'null'
-        elif o is True:
-            yield 'true'
-        elif o is False:
-            yield 'false'
-        elif isinstance(o, (int, long)):
-            yield str(o)
-        elif isinstance(o, float):
-            yield _floatstr(o)
-        elif isinstance(o, (list, tuple)):
-            for chunk in _iterencode_list(o, _current_indent_level):
-                yield chunk
-        elif isinstance(o, dict):
-            for chunk in _iterencode_dict(o, _current_indent_level):
-                yield chunk
-        else:
-            if markers is not None:
-                markerid = id(o)
-                if markerid in markers:
-                    raise ValueError("Circular reference detected")
-                markers[markerid] = o
-            o = _default(o)
-            for chunk in _iterencode(o, _current_indent_level):
-                yield chunk
-            if markers is not None:
-                del markers[markerid]
-
-    return _iterencode
diff --git a/lib/simplejson/scanner.py b/lib/simplejson/scanner.py
deleted file mode 100644
index adbc6ec979c9f05d54e1556a6fd007499a953ee6..0000000000000000000000000000000000000000
--- a/lib/simplejson/scanner.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""JSON token scanner
-"""
-import re
-try:
-    from simplejson._speedups import make_scanner as c_make_scanner
-except ImportError:
-    c_make_scanner = None
-
-__all__ = ['make_scanner']
-
-NUMBER_RE = re.compile(
-    r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
-    (re.VERBOSE | re.MULTILINE | re.DOTALL))
-
-def py_make_scanner(context):
-    parse_object = context.parse_object
-    parse_array = context.parse_array
-    parse_string = context.parse_string
-    match_number = NUMBER_RE.match
-    encoding = context.encoding
-    strict = context.strict
-    parse_float = context.parse_float
-    parse_int = context.parse_int
-    parse_constant = context.parse_constant
-    object_hook = context.object_hook
-
-    def _scan_once(string, idx):
-        try:
-            nextchar = string[idx]
-        except IndexError:
-            raise StopIteration
-
-        if nextchar == '"':
-            return parse_string(string, idx + 1, encoding, strict)
-        elif nextchar == '{':
-            return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
-        elif nextchar == '[':
-            return parse_array((string, idx + 1), _scan_once)
-        elif nextchar == 'n' and string[idx:idx + 4] == 'null':
-            return None, idx + 4
-        elif nextchar == 't' and string[idx:idx + 4] == 'true':
-            return True, idx + 4
-        elif nextchar == 'f' and string[idx:idx + 5] == 'false':
-            return False, idx + 5
-
-        m = match_number(string, idx)
-        if m is not None:
-            integer, frac, exp = m.groups()
-            if frac or exp:
-                res = parse_float(integer + (frac or '') + (exp or ''))
-            else:
-                res = parse_int(integer)
-            return res, m.end()
-        elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
-            return parse_constant('NaN'), idx + 3
-        elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
-            return parse_constant('Infinity'), idx + 8
-        elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
-            return parse_constant('-Infinity'), idx + 9
-        else:
-            raise StopIteration
-
-    return _scan_once
-
-make_scanner = c_make_scanner or py_make_scanner
diff --git a/lib/socks.py b/lib/socks.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c91ea6a81f887e5cc1c7a4ca174d09eab3c2c84
--- /dev/null
+++ b/lib/socks.py
@@ -0,0 +1,831 @@
+"""
+SocksiPy - Python SOCKS module.
+
+Copyright 2006 Dan-Haim. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+3. Neither the name of Dan Haim nor the names of his contributors may be used
+   to endorse or promote products derived from this software without specific
+   prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
+OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
+
+
+This module provides a standard socket-like interface for Python
+for tunneling connections through SOCKS proxies.
+
+===============================================================================
+
+Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
+for use in PyLoris (http://pyloris.sourceforge.net/)
+
+Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
+mainly to merge bug fixes found in Sourceforge
+
+Modifications made by Anorov (https://github.com/Anorov)
+-Forked and renamed to PySocks
+-Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method)
+-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
+ courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py
+-Re-styled code to make it readable
+    -Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
+    -Improved exception handling and output
+    -Removed irritating use of sequence indexes, replaced with tuple unpacked variables
+    -Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
+    -Other general fixes
+-Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies
+-Various small bug fixes
+"""
+
+__version__ = "1.6.7"
+
+import socket
+import struct
+from errno import EOPNOTSUPP, EINVAL, EAGAIN
+from io import BytesIO
+from os import SEEK_CUR
+import os
+import sys
+import functools
+import logging
+from collections import Callable
+from base64 import b64encode
+
+
+if os.name == "nt" and sys.version_info < (3, 0):
+    try:
+        import win_inet_pton
+    except ImportError:
+        raise ImportError("To run PySocks on Windows you must install win_inet_pton")
+
+log = logging.getLogger(__name__)
+
+PROXY_TYPE_SOCKS4 = SOCKS4 = 1
+PROXY_TYPE_SOCKS5 = SOCKS5 = 2
+PROXY_TYPE_HTTP = HTTP = 3
+
+PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP}
+PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys()))
+
+_orgsocket = _orig_socket = socket.socket
+
+
+def set_self_blocking(function):
+
+    @functools.wraps(function)
+    def wrapper(*args, **kwargs):
+        self = args[0]
+        try:
+            _is_blocking =  self.gettimeout()
+            if _is_blocking == 0:
+                self.setblocking(True)
+            return function(*args, **kwargs)
+        except Exception as e:
+            raise
+        finally:
+            # set orgin blcoking
+            if _is_blocking == 0:
+                self.setblocking(False)
+    return wrapper
+
+class ProxyError(IOError):
+    """
+    socket_err contains original socket.error exception.
+    """
+    def __init__(self, msg, socket_err=None):
+        self.msg = msg
+        self.socket_err = socket_err
+
+        if socket_err:
+            self.msg += ": {0}".format(socket_err)
+
+    def __str__(self):
+        return self.msg
+
+class GeneralProxyError(ProxyError): pass
+class ProxyConnectionError(ProxyError): pass
+class SOCKS5AuthError(ProxyError): pass
+class SOCKS5Error(ProxyError): pass
+class SOCKS4Error(ProxyError): pass
+class HTTPError(ProxyError): pass
+
+SOCKS4_ERRORS = { 0x5B: "Request rejected or failed",
+                  0x5C: "Request rejected because SOCKS server cannot connect to identd on the client",
+                  0x5D: "Request rejected because the client program and identd report different user-ids"
+                }
+
+SOCKS5_ERRORS = { 0x01: "General SOCKS server failure",
+                  0x02: "Connection not allowed by ruleset",
+                  0x03: "Network unreachable",
+                  0x04: "Host unreachable",
+                  0x05: "Connection refused",
+                  0x06: "TTL expired",
+                  0x07: "Command not supported, or protocol error",
+                  0x08: "Address type not supported"
+                }
+
+DEFAULT_PORTS = { SOCKS4: 1080,
+                  SOCKS5: 1080,
+                  HTTP: 8080
+                }
+
+def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
+    """
+    set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]])
+
+    Sets a default proxy which all further socksocket objects will use,
+    unless explicitly changed. All parameters are as for socket.set_proxy().
+    """
+    socksocket.default_proxy = (proxy_type, addr, port, rdns,
+                                username.encode() if username else None,
+                                password.encode() if password else None)
+
+def setdefaultproxy(*args, **kwargs):
+    if 'proxytype' in kwargs:
+        kwargs['proxy_type'] = kwargs.pop('proxytype')
+    return set_default_proxy(*args, **kwargs)
+
+def get_default_proxy():
+    """
+    Returns the default proxy, set by set_default_proxy.
+    """
+    return socksocket.default_proxy
+
+getdefaultproxy = get_default_proxy
+
+def wrap_module(module):
+    """
+    Attempts to replace a module's socket library with a SOCKS socket. Must set
+    a default proxy using set_default_proxy(...) first.
+    This will only work on modules that import socket directly into the namespace;
+    most of the Python Standard Library falls into this category.
+    """
+    if socksocket.default_proxy:
+        module.socket.socket = socksocket
+    else:
+        raise GeneralProxyError("No default proxy specified")
+
+wrapmodule = wrap_module
+
+def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
+                      proxy_port=None, proxy_rdns=True,
+                      proxy_username=None, proxy_password=None,
+                      timeout=None, source_address=None,
+                      socket_options=None):
+    """create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
+
+    Like socket.create_connection(), but connects to proxy
+    before returning the socket object.
+
+    dest_pair - 2-tuple of (IP/hostname, port).
+    **proxy_args - Same args passed to socksocket.set_proxy() if present.
+    timeout - Optional socket timeout value, in seconds.
+    source_address - tuple (host, port) for the socket to bind to as its source
+    address before connecting (only for compatibility)
+    """
+    # Remove IPv6 brackets on the remote address and proxy address.
+    remote_host, remote_port = dest_pair
+    if remote_host.startswith('['):
+        remote_host = remote_host.strip('[]')
+    if proxy_addr and proxy_addr.startswith('['):
+        proxy_addr = proxy_addr.strip('[]')
+
+    err = None
+
+    # Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
+    for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
+        family, socket_type, proto, canonname, sa = r
+        sock = None
+        try:
+            sock = socksocket(family, socket_type, proto)
+
+            if socket_options:
+                for opt in socket_options:
+                    sock.setsockopt(*opt)
+
+            if isinstance(timeout, (int, float)):
+                sock.settimeout(timeout)
+
+            if proxy_type:
+                sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
+                               proxy_username, proxy_password)
+            if source_address:
+                sock.bind(source_address)
+
+            sock.connect((remote_host, remote_port))
+            return sock
+
+        except (socket.error, ProxyConnectionError) as e:
+            err = e
+            if sock:
+                sock.close()
+                sock = None
+
+    if err:
+        raise err
+
+    raise socket.error("gai returned empty list.")
+
+class _BaseSocket(socket.socket):
+    """Allows Python 2's "delegated" methods such as send() to be overridden
+    """
+    def __init__(self, *pos, **kw):
+        _orig_socket.__init__(self, *pos, **kw)
+
+        self._savedmethods = dict()
+        for name in self._savenames:
+            self._savedmethods[name] = getattr(self, name)
+            delattr(self, name)  # Allows normal overriding mechanism to work
+
+    _savenames = list()
+
+def _makemethod(name):
+    return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
+for name in ("sendto", "send", "recvfrom", "recv"):
+    method = getattr(_BaseSocket, name, None)
+
+    # Determine if the method is not defined the usual way
+    # as a function in the class.
+    # Python 2 uses __slots__, so there are descriptors for each method,
+    # but they are not functions.
+    if not isinstance(method, Callable):
+        _BaseSocket._savenames.append(name)
+        setattr(_BaseSocket, name, _makemethod(name))
+
+class socksocket(_BaseSocket):
+    """socksocket([family[, type[, proto]]]) -> socket object
+
+    Open a SOCKS enabled socket. The parameters are the same as
+    those of the standard socket init. In order for SOCKS to work,
+    you must specify family=AF_INET and proto=0.
+    The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
+    """
+
+    default_proxy = None
+
+    def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
+        if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
+            msg = "Socket type must be stream or datagram, not {!r}"
+            raise ValueError(msg.format(type))
+
+        super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
+        self._proxyconn = None  # TCP connection to keep UDP relay alive
+
+        if self.default_proxy:
+            self.proxy = self.default_proxy
+        else:
+            self.proxy = (None, None, None, None, None, None)
+        self.proxy_sockname = None
+        self.proxy_peername = None
+
+        self._timeout = None
+
+    def _readall(self, file, count):
+        """
+        Receive EXACTLY the number of bytes requested from the file object.
+        Blocks until the required number of bytes have been received.
+        """
+        data = b""
+        while len(data) < count:
+            d = file.read(count - len(data))
+            if not d:
+                raise GeneralProxyError("Connection closed unexpectedly")
+            data += d
+        return data
+
+    def settimeout(self, timeout):
+        self._timeout = timeout
+        try:
+            # test if we're connected, if so apply timeout
+            peer = self.get_proxy_peername()
+            super(socksocket, self).settimeout(self._timeout)
+        except socket.error:
+            pass
+
+    def gettimeout(self):
+        return self._timeout
+
+    def setblocking(self, v):
+        if v:
+            self.settimeout(None)
+        else:
+            self.settimeout(0.0)
+
+    def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
+        """set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
+        Sets the proxy to be used.
+
+        proxy_type -    The type of the proxy to be used. Three types
+                        are supported: PROXY_TYPE_SOCKS4 (including socks4a),
+                        PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
+        addr -        The address of the server (IP or DNS).
+        port -        The port of the server. Defaults to 1080 for SOCKS
+                       servers and 8080 for HTTP proxy servers.
+        rdns -        Should DNS queries be performed on the remote side
+                       (rather than the local side). The default is True.
+                       Note: This has no effect with SOCKS4 servers.
+        username -    Username to authenticate with to the server.
+                       The default is no authentication.
+        password -    Password to authenticate with to the server.
+                       Only relevant when username is also provided.
+        """
+        self.proxy = (proxy_type, addr, port, rdns,
+                      username.encode() if username else None,
+                      password.encode() if password else None)
+
+    def setproxy(self, *args, **kwargs):
+        if 'proxytype' in kwargs:
+            kwargs['proxy_type'] = kwargs.pop('proxytype')
+        return self.set_proxy(*args, **kwargs)
+
+    def bind(self, *pos, **kw):
+        """
+        Implements proxy connection for UDP sockets,
+        which happens during the bind() phase.
+        """
+        proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
+        if not proxy_type or self.type != socket.SOCK_DGRAM:
+            return _orig_socket.bind(self, *pos, **kw)
+
+        if self._proxyconn:
+            raise socket.error(EINVAL, "Socket already bound to an address")
+        if proxy_type != SOCKS5:
+            msg = "UDP only supported by SOCKS5 proxy type"
+            raise socket.error(EOPNOTSUPP, msg)
+        super(socksocket, self).bind(*pos, **kw)
+
+        # Need to specify actual local port because
+        # some relays drop packets if a port of zero is specified.
+        # Avoid specifying host address in case of NAT though.
+        _, port = self.getsockname()
+        dst = ("0", port)
+
+        self._proxyconn = _orig_socket()
+        proxy = self._proxy_addr()
+        self._proxyconn.connect(proxy)
+
+        UDP_ASSOCIATE = b"\x03"
+        _, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
+
+        # The relay is most likely on the same host as the SOCKS proxy,
+        # but some proxies return a private IP address (10.x.y.z)
+        host, _ = proxy
+        _, port = relay
+        super(socksocket, self).connect((host, port))
+        super(socksocket, self).settimeout(self._timeout)
+        self.proxy_sockname = ("0.0.0.0", 0)  # Unknown
+
+    def sendto(self, bytes, *args, **kwargs):
+        if self.type != socket.SOCK_DGRAM:
+            return super(socksocket, self).sendto(bytes, *args, **kwargs)
+        if not self._proxyconn:
+            self.bind(("", 0))
+
+        address = args[-1]
+        flags = args[:-1]
+
+        header = BytesIO()
+        RSV = b"\x00\x00"
+        header.write(RSV)
+        STANDALONE = b"\x00"
+        header.write(STANDALONE)
+        self._write_SOCKS5_address(address, header)
+
+        sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
+        return sent - header.tell()
+
+    def send(self, bytes, flags=0, **kwargs):
+        if self.type == socket.SOCK_DGRAM:
+            return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
+        else:
+            return super(socksocket, self).send(bytes, flags, **kwargs)
+
+    def recvfrom(self, bufsize, flags=0):
+        if self.type != socket.SOCK_DGRAM:
+            return super(socksocket, self).recvfrom(bufsize, flags)
+        if not self._proxyconn:
+            self.bind(("", 0))
+
+        buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
+        buf.seek(2, SEEK_CUR)
+        frag = buf.read(1)
+        if ord(frag):
+            raise NotImplementedError("Received UDP packet fragment")
+        fromhost, fromport = self._read_SOCKS5_address(buf)
+
+        if self.proxy_peername:
+            peerhost, peerport = self.proxy_peername
+            if fromhost != peerhost or peerport not in (0, fromport):
+                raise socket.error(EAGAIN, "Packet filtered")
+
+        return (buf.read(bufsize), (fromhost, fromport))
+
+    def recv(self, *pos, **kw):
+        bytes, _ = self.recvfrom(*pos, **kw)
+        return bytes
+
+    def close(self):
+        if self._proxyconn:
+            self._proxyconn.close()
+        return super(socksocket, self).close()
+
+    def get_proxy_sockname(self):
+        """
+        Returns the bound IP address and port number at the proxy.
+        """
+        return self.proxy_sockname
+
+    getproxysockname = get_proxy_sockname
+
+    def get_proxy_peername(self):
+        """
+        Returns the IP and port number of the proxy.
+        """
+        return super(socksocket, self).getpeername()
+
+    getproxypeername = get_proxy_peername
+
+    def get_peername(self):
+        """
+        Returns the IP address and port number of the destination
+        machine (note: get_proxy_peername returns the proxy)
+        """
+        return self.proxy_peername
+
+    getpeername = get_peername
+
+    def _negotiate_SOCKS5(self, *dest_addr):
+        """
+        Negotiates a stream connection through a SOCKS5 server.
+        """
+        CONNECT = b"\x01"
+        self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
+            CONNECT, dest_addr)
+
+    def _SOCKS5_request(self, conn, cmd, dst):
+        """
+        Send SOCKS5 request with given command (CMD field) and
+        address (DST field). Returns resolved DST address that was used.
+        """
+        proxy_type, addr, port, rdns, username, password = self.proxy
+
+        writer = conn.makefile("wb")
+        reader = conn.makefile("rb", 0)  # buffering=0 renamed in Python 3
+        try:
+            # First we'll send the authentication packages we support.
+            if username and password:
+                # The username/password details were supplied to the
+                # set_proxy method so we support the USERNAME/PASSWORD
+                # authentication (in addition to the standard none).
+                writer.write(b"\x05\x02\x00\x02")
+            else:
+                # No username/password were entered, therefore we
+                # only support connections with no authentication.
+                writer.write(b"\x05\x01\x00")
+
+            # We'll receive the server's response to determine which
+            # method was selected
+            writer.flush()
+            chosen_auth = self._readall(reader, 2)
+
+            if chosen_auth[0:1] != b"\x05":
+                # Note: string[i:i+1] is used because indexing of a bytestring
+                # via bytestring[i] yields an integer in Python 3
+                raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
+
+            # Check the chosen authentication method
+
+            if chosen_auth[1:2] == b"\x02":
+                # Okay, we need to perform a basic username/password
+                # authentication.
+                writer.write(b"\x01" + chr(len(username)).encode()
+                             + username
+                             + chr(len(password)).encode()
+                             + password)
+                writer.flush()
+                auth_status = self._readall(reader, 2)
+                if auth_status[0:1] != b"\x01":
+                    # Bad response
+                    raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
+                if auth_status[1:2] != b"\x00":
+                    # Authentication failed
+                    raise SOCKS5AuthError("SOCKS5 authentication failed")
+
+                # Otherwise, authentication succeeded
+
+            # No authentication is required if 0x00
+            elif chosen_auth[1:2] != b"\x00":
+                # Reaching here is always bad
+                if chosen_auth[1:2] == b"\xFF":
+                    raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
+                else:
+                    raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
+
+            # Now we can request the actual connection
+            writer.write(b"\x05" + cmd + b"\x00")
+            resolved = self._write_SOCKS5_address(dst, writer)
+            writer.flush()
+
+            # Get the response
+            resp = self._readall(reader, 3)
+            if resp[0:1] != b"\x05":
+                raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
+
+            status = ord(resp[1:2])
+            if status != 0x00:
+                # Connection failed: server returned an error
+                error = SOCKS5_ERRORS.get(status, "Unknown error")
+                raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
+
+            # Get the bound address/port
+            bnd = self._read_SOCKS5_address(reader)
+
+            super(socksocket, self).settimeout(self._timeout)
+            return (resolved, bnd)
+        finally:
+            reader.close()
+            writer.close()
+
+    def _write_SOCKS5_address(self, addr, file):
+        """
+        Return the host and port packed for the SOCKS5 protocol,
+        and the resolved address as a tuple object.
+        """
+        host, port = addr
+        proxy_type, _, _, rdns, username, password = self.proxy
+        family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
+
+        # If the given destination address is an IP address, we'll
+        # use the IP address request even if remote resolving was specified.
+        # Detect whether the address is IPv4/6 directly.
+        for family in (socket.AF_INET, socket.AF_INET6):
+            try:
+                addr_bytes = socket.inet_pton(family, host)
+                file.write(family_to_byte[family] + addr_bytes)
+                host = socket.inet_ntop(family, addr_bytes)
+                file.write(struct.pack(">H", port))
+                return host, port
+            except socket.error:
+                continue
+
+        # Well it's not an IP number, so it's probably a DNS name.
+        if rdns:
+            # Resolve remotely
+            host_bytes = host.encode('idna')
+            file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
+        else:
+            # Resolve locally
+            addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
+            # We can't really work out what IP is reachable, so just pick the
+            # first.
+            target_addr = addresses[0]
+            family = target_addr[0]
+            host = target_addr[4][0]
+
+            addr_bytes = socket.inet_pton(family, host)
+            file.write(family_to_byte[family] + addr_bytes)
+            host = socket.inet_ntop(family, addr_bytes)
+        file.write(struct.pack(">H", port))
+        return host, port
+
+    def _read_SOCKS5_address(self, file):
+        atyp = self._readall(file, 1)
+        if atyp == b"\x01":
+            addr = socket.inet_ntoa(self._readall(file, 4))
+        elif atyp == b"\x03":
+            length = self._readall(file, 1)
+            addr = self._readall(file, ord(length))
+        elif atyp == b"\x04":
+            addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
+        else:
+            raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
+
+        port = struct.unpack(">H", self._readall(file, 2))[0]
+        return addr, port
+
+    def _negotiate_SOCKS4(self, dest_addr, dest_port):
+        """
+        Negotiates a connection through a SOCKS4 server.
+        """
+        proxy_type, addr, port, rdns, username, password = self.proxy
+
+        writer = self.makefile("wb")
+        reader = self.makefile("rb", 0)  # buffering=0 renamed in Python 3
+        try:
+            # Check if the destination address provided is an IP address
+            remote_resolve = False
+            try:
+                addr_bytes = socket.inet_aton(dest_addr)
+            except socket.error:
+                # It's a DNS name. Check where it should be resolved.
+                if rdns:
+                    addr_bytes = b"\x00\x00\x00\x01"
+                    remote_resolve = True
+                else:
+                    addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
+
+            # Construct the request packet
+            writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
+            writer.write(addr_bytes)
+
+            # The username parameter is considered userid for SOCKS4
+            if username:
+                writer.write(username)
+            writer.write(b"\x00")
+
+            # DNS name if remote resolving is required
+            # NOTE: This is actually an extension to the SOCKS4 protocol
+            # called SOCKS4A and may not be supported in all cases.
+            if remote_resolve:
+                writer.write(dest_addr.encode('idna') + b"\x00")
+            writer.flush()
+
+            # Get the response from the server
+            resp = self._readall(reader, 8)
+            if resp[0:1] != b"\x00":
+                # Bad data
+                raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
+
+            status = ord(resp[1:2])
+            if status != 0x5A:
+                # Connection failed: server returned an error
+                error = SOCKS4_ERRORS.get(status, "Unknown error")
+                raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
+
+            # Get the bound address/port
+            self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
+            if remote_resolve:
+                self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
+            else:
+                self.proxy_peername = dest_addr, dest_port
+        finally:
+            reader.close()
+            writer.close()
+
+    def _negotiate_HTTP(self, dest_addr, dest_port):
+        """
+        Negotiates a connection through an HTTP server.
+        NOTE: This currently only supports HTTP CONNECT-style proxies.
+        """
+        proxy_type, addr, port, rdns, username, password = self.proxy
+
+        # If we need to resolve locally, we do this now
+        addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
+
+        http_headers = [
+            b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
+            b"Host: " + dest_addr.encode('idna')
+        ]
+
+        if username and password:
+            http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
+
+        http_headers.append(b"\r\n")
+
+        self.sendall(b"\r\n".join(http_headers))
+
+        # We just need the first line to check if the connection was successful
+        fobj = self.makefile()
+        status_line = fobj.readline()
+        fobj.close()
+
+        if not status_line:
+            raise GeneralProxyError("Connection closed unexpectedly")
+
+        try:
+            proto, status_code, status_msg = status_line.split(" ", 2)
+        except ValueError:
+            raise GeneralProxyError("HTTP proxy server sent invalid response")
+
+        if not proto.startswith("HTTP/"):
+            raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
+
+        try:
+            status_code = int(status_code)
+        except ValueError:
+            raise HTTPError("HTTP proxy server did not return a valid HTTP status")
+
+        if status_code != 200:
+            error = "{0}: {1}".format(status_code, status_msg)
+            if status_code in (400, 403, 405):
+                # It's likely that the HTTP proxy server does not support the CONNECT tunneling method
+                error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
+                          " (must be a CONNECT tunnel proxy)")
+            raise HTTPError(error)
+
+        self.proxy_sockname = (b"0.0.0.0", 0)
+        self.proxy_peername = addr, dest_port
+
+    _proxy_negotiators = {
+                           SOCKS4: _negotiate_SOCKS4,
+                           SOCKS5: _negotiate_SOCKS5,
+                           HTTP: _negotiate_HTTP
+                         }
+
+    @set_self_blocking
+    def connect(self, dest_pair):
+        """
+        Connects to the specified destination through a proxy.
+        Uses the same API as socket's connect().
+        To select the proxy server, use set_proxy().
+
+        dest_pair - 2-tuple of (IP/hostname, port).
+        """
+        if len(dest_pair) != 2 or dest_pair[0].startswith("["):
+            # Probably IPv6, not supported -- raise an error, and hope
+            # Happy Eyeballs (RFC6555) makes sure at least the IPv4
+            # connection works...
+            raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
+
+        dest_addr, dest_port = dest_pair
+
+        if self.type == socket.SOCK_DGRAM:
+            if not self._proxyconn:
+                self.bind(("", 0))
+            dest_addr = socket.gethostbyname(dest_addr)
+
+            # If the host address is INADDR_ANY or similar, reset the peer
+            # address so that packets are received from any peer
+            if dest_addr == "0.0.0.0" and not dest_port:
+                self.proxy_peername = None
+            else:
+                self.proxy_peername = (dest_addr, dest_port)
+            return
+
+        proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
+
+        # Do a minimal input check first
+        if (not isinstance(dest_pair, (list, tuple))
+                or len(dest_pair) != 2
+                or not dest_addr
+                or not isinstance(dest_port, int)):
+            raise GeneralProxyError("Invalid destination-connection (host, port) pair")
+
+
+        # We set the timeout here so that we don't hang in connection or during
+        # negotiation.
+        super(socksocket, self).settimeout(self._timeout)
+
+        if proxy_type is None:
+            # Treat like regular socket object
+            self.proxy_peername = dest_pair
+            super(socksocket, self).settimeout(self._timeout)
+            super(socksocket, self).connect((dest_addr, dest_port))
+            return
+
+        proxy_addr = self._proxy_addr()
+
+        try:
+            # Initial connection to proxy server.
+            super(socksocket, self).connect(proxy_addr)
+
+        except socket.error as error:
+            # Error while connecting to proxy
+            self.close()
+            proxy_addr, proxy_port = proxy_addr
+            proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
+            printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
+
+            msg = "Error connecting to {0} proxy {1}".format(printable_type,
+                                                           proxy_server)
+            log.debug("%s due to: %s", msg, error)
+            raise ProxyConnectionError(msg, error)
+
+        else:
+            # Connected to proxy server, now negotiate
+            try:
+                # Calls negotiate_{SOCKS4, SOCKS5, HTTP}
+                negotiate = self._proxy_negotiators[proxy_type]
+                negotiate(self, dest_addr, dest_port)
+            except socket.error as error:
+                # Wrap socket errors
+                self.close()
+                raise GeneralProxyError("Socket error", error)
+            except ProxyError:
+                # Protocol error while negotiating with proxy
+                self.close()
+                raise
+
+    def _proxy_addr(self):
+        """
+        Return proxy address to connect to as tuple object
+        """
+        proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
+        proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
+        if not proxy_port:
+            raise GeneralProxyError("Invalid proxy type")
+        return proxy_addr, proxy_port
diff --git a/lib/socks/__init__.py b/lib/socks/__init__.py
deleted file mode 100644
index 628d37c50b7b8f056679de258d98ae537241761d..0000000000000000000000000000000000000000
--- a/lib/socks/__init__.py
+++ /dev/null
@@ -1,393 +0,0 @@
-"""SocksiPy - Python SOCKS module.
-Version 1.00
-
-Copyright 2006 Dan-Haim. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-3. Neither the name of Dan Haim nor the names of his contributors may be used
-   to endorse or promote products derived from this software without specific
-   prior written permission.
-   
-THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
-OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
-
-
-This module provides a standard socket-like interface for Python
-for tunneling connections through SOCKS proxies.
-
-"""
-
-"""
-
-Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
-for use in PyLoris (http://pyloris.sourceforge.net/)
-
-Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
-mainly to merge bug fixes found in Sourceforge
-
-"""
-
-import re
-import socket
-import struct
-import sys
-
-PROXY_TYPE_SOCKS4 = 1
-PROXY_TYPE_SOCKS5 = 2
-PROXY_TYPE_HTTP = 3
-
-PROXY_REGEX = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*):([^/?#]*))?")
-
-_defaultproxy = None
-_orgsocket = socket.socket
-
-class ProxyError(Exception): pass
-class GeneralProxyError(ProxyError): pass
-class Socks5AuthError(ProxyError): pass
-class Socks5Error(ProxyError): pass
-class Socks4Error(ProxyError): pass
-class HTTPError(ProxyError): pass
-
-_generalerrors = ("success",
-    "invalid data",
-    "not connected",
-    "not available",
-    "bad proxy type",
-    "bad input")
-
-_socks5errors = ("succeeded",
-    "general SOCKS server failure",
-    "connection not allowed by ruleset",
-    "Network unreachable",
-    "Host unreachable",
-    "Connection refused",
-    "TTL expired",
-    "Command not supported",
-    "Address type not supported",
-    "Unknown error")
-
-_socks5autherrors = ("succeeded",
-    "authentication is required",
-    "all offered authentication methods were rejected",
-    "unknown username or invalid password",
-    "unknown error")
-
-_socks4errors = ("request granted",
-    "request rejected or failed",
-    "request rejected because SOCKS server cannot connect to identd on the client",
-    "request rejected because the client program and identd report different user-ids",
-    "unknown error")
-
-def parseproxyuri(proxyurl):
-    """Parses a http proxy uri in the format x://a.b.c.d:port
-
-        (protocol, addr, port) = parseproxyuri(uri)
-    """
-    groups = PROXY_REGEX.match(proxyurl).groups()
-    return (groups[1], groups[3], groups[4])
-
-def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
-    """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
-    Sets a default proxy which all further socksocket objects will use,
-    unless explicitly changed.
-    """
-    global _defaultproxy
-    _defaultproxy = (proxytype, addr, port, rdns, username, password)
-
-def wrapmodule(module):
-    """wrapmodule(module)
-    Attempts to replace a module's socket library with a SOCKS socket. Must set
-    a default proxy using setdefaultproxy(...) first.
-    This will only work on modules that import socket directly into the namespace;
-    most of the Python Standard Library falls into this category.
-    """
-    if _defaultproxy != None:
-        module.socket.socket = socksocket
-    else:
-        raise GeneralProxyError((4, "no proxy specified"))
-
-class socksocket(socket.socket):
-    """socksocket([family[, type[, proto]]]) -> socket object
-    Open a SOCKS enabled socket. The parameters are the same as
-    those of the standard socket init. In order for SOCKS to work,
-    you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
-    """
-
-    def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
-        _orgsocket.__init__(self, family, type, proto, _sock)
-        if _defaultproxy != None:
-            self.__proxy = _defaultproxy
-        else:
-            self.__proxy = (None, None, None, None, None, None)
-        self.__proxysockname = None
-        self.__proxypeername = None
-
-    def __recvall(self, count):
-        """__recvall(count) -> data
-        Receive EXACTLY the number of bytes requested from the socket.
-        Blocks until the required number of bytes have been received.
-        """
-        data = self.recv(count)
-        while len(data) < count:
-            d = self.recv(count-len(data))
-            if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
-            data = data + d
-        return data
-
-    def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
-        """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
-        Sets the proxy to be used.
-        proxytype -    The type of the proxy to be used. Three types
-                are supported: PROXY_TYPE_SOCKS4 (including socks4a),
-                PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
-        addr -        The address of the server (IP or DNS).
-        port -        The port of the server. Defaults to 1080 for SOCKS
-                servers and 8080 for HTTP proxy servers.
-        rdns -        Should DNS queries be preformed on the remote side
-                (rather than the local side). The default is True.
-                Note: This has no effect with SOCKS4 servers.
-        username -    Username to authenticate with to the server.
-                The default is no authentication.
-        password -    Password to authenticate with to the server.
-                Only relevant when username is also provided.
-        """
-        self.__proxy = (proxytype, addr, port, rdns, username, password)
-
-    def __negotiatesocks5(self, destaddr, destport):
-        """__negotiatesocks5(self,destaddr,destport)
-        Negotiates a connection through a SOCKS5 server.
-        """
-        # First we'll send the authentication packages we support.
-        if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
-            # The username/password details were supplied to the
-            # setproxy method so we support the USERNAME/PASSWORD
-            # authentication (in addition to the standard none).
-            self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
-        else:
-            # No username/password were entered, therefore we
-            # only support connections with no authentication.
-            self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
-        # We'll receive the server's response to determine which
-        # method was selected
-        chosenauth = self.__recvall(2)
-        if chosenauth[0:1] != chr(0x05).encode():
-            self.close()
-            raise GeneralProxyError((1, _generalerrors[1]))
-        # Check the chosen authentication method
-        if chosenauth[1:2] == chr(0x00).encode():
-            # No authentication is required
-            pass
-        elif chosenauth[1:2] == chr(0x02).encode():
-            # Okay, we need to perform a basic username/password
-            # authentication.
-            self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
-            authstat = self.__recvall(2)
-            if authstat[0:1] != chr(0x01).encode():
-                # Bad response
-                self.close()
-                raise GeneralProxyError((1, _generalerrors[1]))
-            if authstat[1:2] != chr(0x00).encode():
-                # Authentication failed
-                self.close()
-                raise Socks5AuthError((3, _socks5autherrors[3]))
-            # Authentication succeeded
-        else:
-            # Reaching here is always bad
-            self.close()
-            if chosenauth[1] == chr(0xFF).encode():
-                raise Socks5AuthError((2, _socks5autherrors[2]))
-            else:
-                raise GeneralProxyError((1, _generalerrors[1]))
-        # Now we can request the actual connection
-        req = struct.pack('BBB', 0x05, 0x01, 0x00)
-        # If the given destination address is an IP address, we'll
-        # use the IPv4 address request even if remote resolving was specified.
-        try:
-            ipaddr = socket.inet_aton(destaddr)
-            req = req + chr(0x01).encode() + ipaddr
-        except socket.error:
-            # Well it's not an IP number,  so it's probably a DNS name.
-            if self.__proxy[3]:
-                # Resolve remotely
-                ipaddr = None
-                req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
-            else:
-                # Resolve locally
-                ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
-                req = req + chr(0x01).encode() + ipaddr
-        req = req + struct.pack(">H", destport)
-        self.sendall(req)
-        # Get the response
-        resp = self.__recvall(4)
-        if resp[0:1] != chr(0x05).encode():
-            self.close()
-            raise GeneralProxyError((1, _generalerrors[1]))
-        elif resp[1:2] != chr(0x00).encode():
-            # Connection failed
-            self.close()
-            if ord(resp[1:2])<=8:
-                raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
-            else:
-                raise Socks5Error((9, _socks5errors[9]))
-        # Get the bound address/port
-        elif resp[3:4] == chr(0x01).encode():
-            boundaddr = self.__recvall(4)
-        elif resp[3:4] == chr(0x03).encode():
-            resp = resp + self.recv(1)
-            boundaddr = self.__recvall(ord(resp[4:5]))
-        else:
-            self.close()
-            raise GeneralProxyError((1,_generalerrors[1]))
-        boundport = struct.unpack(">H", self.__recvall(2))[0]
-        self.__proxysockname = (boundaddr, boundport)
-        if ipaddr != None:
-            self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
-        else:
-            self.__proxypeername = (destaddr, destport)
-
-    def getproxysockname(self):
-        """getsockname() -> address info
-        Returns the bound IP address and port number at the proxy.
-        """
-        return self.__proxysockname
-
-    def getproxypeername(self):
-        """getproxypeername() -> address info
-        Returns the IP and port number of the proxy.
-        """
-        return _orgsocket.getpeername(self)
-
-    def getpeername(self):
-        """getpeername() -> address info
-        Returns the IP address and port number of the destination
-        machine (note: getproxypeername returns the proxy)
-        """
-        return self.__proxypeername
-
-    def __negotiatesocks4(self,destaddr,destport):
-        """__negotiatesocks4(self,destaddr,destport)
-        Negotiates a connection through a SOCKS4 server.
-        """
-        # Check if the destination address provided is an IP address
-        rmtrslv = False
-        try:
-            ipaddr = socket.inet_aton(destaddr)
-        except socket.error:
-            # It's a DNS name. Check where it should be resolved.
-            if self.__proxy[3]:
-                ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
-                rmtrslv = True
-            else:
-                ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
-        # Construct the request packet
-        req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
-        # The username parameter is considered userid for SOCKS4
-        if self.__proxy[4] != None:
-            req = req + self.__proxy[4]
-        req = req + chr(0x00).encode()
-        # DNS name if remote resolving is required
-        # NOTE: This is actually an extension to the SOCKS4 protocol
-        # called SOCKS4A and may not be supported in all cases.
-        if rmtrslv:
-            req = req + destaddr + chr(0x00).encode()
-        self.sendall(req)
-        # Get the response from the server
-        resp = self.__recvall(8)
-        if resp[0:1] != chr(0x00).encode():
-            # Bad data
-            self.close()
-            raise GeneralProxyError((1,_generalerrors[1]))
-        if resp[1:2] != chr(0x5A).encode():
-            # Server returned an error
-            self.close()
-            if ord(resp[1:2]) in (91, 92, 93):
-                self.close()
-                raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
-            else:
-                raise Socks4Error((94, _socks4errors[4]))
-        # Get the bound address/port
-        self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
-        if rmtrslv != None:
-            self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
-        else:
-            self.__proxypeername = (destaddr, destport)
-
-    def __negotiatehttp(self, destaddr, destport):
-        """__negotiatehttp(self,destaddr,destport)
-        Negotiates a connection through an HTTP server.
-        """
-        # If we need to resolve locally, we do this now
-        if not self.__proxy[3]:
-            addr = socket.gethostbyname(destaddr)
-        else:
-            addr = destaddr
-        self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
-        # We read the response until we get the string "\r\n\r\n"
-        resp = self.recv(1)
-        while resp.find("\r\n\r\n".encode()) == -1:
-            resp = resp + self.recv(1)
-        # We just need the first line to check if the connection
-        # was successful
-        statusline = resp.splitlines()[0].split(" ".encode(), 2)
-        if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
-            self.close()
-            raise GeneralProxyError((1, _generalerrors[1]))
-        try:
-            statuscode = int(statusline[1])
-        except ValueError:
-            self.close()
-            raise GeneralProxyError((1, _generalerrors[1]))
-        if statuscode != 200:
-            self.close()
-            raise HTTPError((statuscode, statusline[2]))
-        self.__proxysockname = ("0.0.0.0", 0)
-        self.__proxypeername = (addr, destport)
-
-    def connect(self, destpair):
-        """connect(self, despair)
-        Connects to the specified destination through a proxy.
-        destpar - A tuple of the IP/DNS address and the port number.
-        (identical to socket's connect).
-        To select the proxy server use setproxy().
-        """
-        # Do a minimal input check first
-        if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
-            raise GeneralProxyError((5, _generalerrors[5]))
-        if self.__proxy[0] == PROXY_TYPE_SOCKS5:
-            if self.__proxy[2] != None:
-                portnum = self.__proxy[2]
-            else:
-                portnum = 1080
-            _orgsocket.connect(self, (self.__proxy[1], portnum))
-            self.__negotiatesocks5(destpair[0], destpair[1])
-        elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
-            if self.__proxy[2] != None:
-                portnum = self.__proxy[2]
-            else:
-                portnum = 1080
-            _orgsocket.connect(self,(self.__proxy[1], portnum))
-            self.__negotiatesocks4(destpair[0], destpair[1])
-        elif self.__proxy[0] == PROXY_TYPE_HTTP:
-            if self.__proxy[2] != None:
-                portnum = self.__proxy[2]
-            else:
-                portnum = 8080
-            _orgsocket.connect(self,(self.__proxy[1], portnum))
-            self.__negotiatehttp(destpair[0], destpair[1])
-        elif self.__proxy[0] == None:
-            _orgsocket.connect(self, (destpair[0], destpair[1]))
-        else:
-            raise GeneralProxyError((4, _generalerrors[4]))
diff --git a/lib/sockshandler.py b/lib/sockshandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..26c83439cc50ea607cd1853da16caf0f9ed06bcb
--- /dev/null
+++ b/lib/sockshandler.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+"""
+SocksiPy + urllib2 handler
+
+version: 0.3
+author: e<e@tr0ll.in>
+
+This module provides a Handler which you can use with urllib2 to allow it to tunnel your connection through a socks.sockssocket socket, with out monkey patching the original socket...
+"""
+import ssl
+
+try:
+    import urllib2
+    import httplib
+except ImportError: # Python 3
+    import urllib.request as urllib2
+    import http.client as httplib
+
+import socks # $ pip install PySocks
+
+def merge_dict(a, b):
+    d = a.copy()
+    d.update(b)
+    return d
+
+class SocksiPyConnection(httplib.HTTPConnection):
+    def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
+        self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
+        httplib.HTTPConnection.__init__(self, *args, **kwargs)
+
+    def connect(self):
+        self.sock = socks.socksocket()
+        self.sock.setproxy(*self.proxyargs)
+        if type(self.timeout) in (int, float):
+            self.sock.settimeout(self.timeout)
+        self.sock.connect((self.host, self.port))
+
+class SocksiPyConnectionS(httplib.HTTPSConnection):
+    def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
+        self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
+        httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+
+    def connect(self):
+        sock = socks.socksocket()
+        sock.setproxy(*self.proxyargs)
+        if type(self.timeout) in (int, float):
+            sock.settimeout(self.timeout)
+        sock.connect((self.host, self.port))
+        self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
+
+class SocksiPyHandler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
+    def __init__(self, *args, **kwargs):
+        self.args = args
+        self.kw = kwargs
+        urllib2.HTTPHandler.__init__(self)
+
+    def http_open(self, req):
+        def build(host, port=None, timeout=0, **kwargs):
+            kw = merge_dict(self.kw, kwargs)
+            conn = SocksiPyConnection(*self.args, host=host, port=port, timeout=timeout, **kw)
+            return conn
+        return self.do_open(build, req)
+
+    def https_open(self, req):
+        def build(host, port=None, timeout=0, **kwargs):
+            kw = merge_dict(self.kw, kwargs)
+            conn = SocksiPyConnectionS(*self.args, host=host, port=port, timeout=timeout, **kw)
+            return conn
+        return self.do_open(build, req)
+
+if __name__ == "__main__":
+    import sys
+    try:
+        port = int(sys.argv[1])
+    except (ValueError, IndexError):
+        port = 9050
+    opener = urllib2.build_opener(SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, "localhost", port))
+    print("HTTP: " + opener.open("http://httpbin.org/ip").read().decode())
+    print("HTTPS: " + opener.open("https://httpbin.org/ip").read().decode())
diff --git a/lib/tzlocal/darwin.py b/lib/tzlocal/darwin.py
index 0485fb72098349d490165b10807ebefb3dac394e..4e8540bc088b4ca8702b2a4c61c54f56175d0565 100644
--- a/lib/tzlocal/darwin.py
+++ b/lib/tzlocal/darwin.py
@@ -2,25 +2,45 @@ from __future__ import with_statement
 import os
 import pytz
 import subprocess
+import sys
 
 _cache_tz = None
 
+if sys.version_info[0] == 2:
 
-def _get_localzone():
-    pipe = subprocess.Popen(
+    class Popen(subprocess.Popen):
+
+        def __enter__(self):
+            return self
+
+        def __exit__(self, type, value, traceback):
+            if self.stdout:
+                self.stdout.close()
+            if self.stderr:
+                self.stderr.close()
+            if self.stdin:
+                self.stdin.close()
+            # Wait for the process to terminate, to avoid zombies.
+            self.wait()
+
+else:
+    from subprocess import Popen
+
+
+def _get_localzone(_root='/'):
+    with Popen(
         "systemsetup -gettimezone",
         shell=True,
         stderr=subprocess.PIPE,
         stdout=subprocess.PIPE
-    )
-    tzname = pipe.stdout.read().replace(b'Time Zone: ', b'').strip()
+    ) as pipe:
+        tzname = pipe.stdout.read().replace(b'Time Zone: ', b'').strip()
 
     if not tzname or tzname not in pytz.all_timezones_set:
         # link will be something like /usr/share/zoneinfo/America/Los_Angeles.
-        link = os.readlink("/etc/localtime")
+        link = os.readlink(os.path.join(_root, "etc/localtime"))
         tzname = link[link.rfind("zoneinfo/") + 9:]
-    pipe.stdout.close()
-    pipe.stderr.close()
+
     return pytz.timezone(tzname)
 
 
diff --git a/lib/tzlocal/test_data/Harare b/lib/tzlocal/test_data/Harare
deleted file mode 100644
index 258b393637294912a6d6c78973c09424136ed50e..0000000000000000000000000000000000000000
Binary files a/lib/tzlocal/test_data/Harare and /dev/null differ
diff --git a/lib/tzlocal/test_data/localtime/etc/localtime b/lib/tzlocal/test_data/localtime/etc/localtime
deleted file mode 100644
index 258b393637294912a6d6c78973c09424136ed50e..0000000000000000000000000000000000000000
Binary files a/lib/tzlocal/test_data/localtime/etc/localtime and /dev/null differ
diff --git a/lib/tzlocal/test_data/symlink_localtime/etc/localtime b/lib/tzlocal/test_data/symlink_localtime/etc/localtime
deleted file mode 120000
index 2f01cab109846006945ee1ce8a517253144bd4bc..0000000000000000000000000000000000000000
--- a/lib/tzlocal/test_data/symlink_localtime/etc/localtime
+++ /dev/null
@@ -1 +0,0 @@
-../usr/share/zoneinfo/Africa/Harare
\ No newline at end of file
diff --git a/lib/tzlocal/test_data/symlink_localtime/usr/share/zoneinfo/Africa/Harare b/lib/tzlocal/test_data/symlink_localtime/usr/share/zoneinfo/Africa/Harare
deleted file mode 100644
index 258b393637294912a6d6c78973c09424136ed50e..0000000000000000000000000000000000000000
Binary files a/lib/tzlocal/test_data/symlink_localtime/usr/share/zoneinfo/Africa/Harare and /dev/null differ
diff --git a/lib/tzlocal/test_data/timezone/etc/timezone b/lib/tzlocal/test_data/timezone/etc/timezone
deleted file mode 100644
index 28b3372d20178c226897295e83a52cddc082989b..0000000000000000000000000000000000000000
--- a/lib/tzlocal/test_data/timezone/etc/timezone
+++ /dev/null
@@ -1 +0,0 @@
-Africa/Harare
diff --git a/lib/tzlocal/test_data/timezone_setting/etc/conf.d/clock b/lib/tzlocal/test_data/timezone_setting/etc/conf.d/clock
deleted file mode 100644
index 95032934d63e86ca9e240657e9a0a403aa152b16..0000000000000000000000000000000000000000
--- a/lib/tzlocal/test_data/timezone_setting/etc/conf.d/clock
+++ /dev/null
@@ -1 +0,0 @@
-TIMEZONE = "Africa/Harare"
diff --git a/lib/tzlocal/test_data/vardbzoneinfo/var/db/zoneinfo b/lib/tzlocal/test_data/vardbzoneinfo/var/db/zoneinfo
deleted file mode 100644
index 28b3372d20178c226897295e83a52cddc082989b..0000000000000000000000000000000000000000
--- a/lib/tzlocal/test_data/vardbzoneinfo/var/db/zoneinfo
+++ /dev/null
@@ -1 +0,0 @@
-Africa/Harare
diff --git a/lib/tzlocal/test_data/zone_setting/etc/sysconfig/clock b/lib/tzlocal/test_data/zone_setting/etc/sysconfig/clock
deleted file mode 100644
index e1ddbfd6ec7f39e307b25fcd6f2e4e07dc5e4a27..0000000000000000000000000000000000000000
--- a/lib/tzlocal/test_data/zone_setting/etc/sysconfig/clock
+++ /dev/null
@@ -1 +0,0 @@
-ZONE="Africa/Harare"
diff --git a/lib/tzlocal/tests.py b/lib/tzlocal/tests.py
deleted file mode 100644
index e736eb4084bfa09c5834f8134ca26220b7af5f6e..0000000000000000000000000000000000000000
--- a/lib/tzlocal/tests.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import sys
-import os
-from datetime import datetime
-import unittest
-import pytz
-import tzlocal.unix
-
-class TzLocalTests(unittest.TestCase):
-    def setUp(self):
-        if 'TZ' in os.environ:
-            del os.environ['TZ']
-
-    def test_env(self):
-        tz_harare = tzlocal.unix._tz_from_env(':Africa/Harare')
-        self.assertEqual(tz_harare.zone, 'Africa/Harare')
-
-        # Some Unices allow this as well, so we must allow it:
-        tz_harare = tzlocal.unix._tz_from_env('Africa/Harare')
-        self.assertEqual(tz_harare.zone, 'Africa/Harare')
-
-        local_path = os.path.split(__file__)[0]
-        tz_local = tzlocal.unix._tz_from_env(':' + os.path.join(local_path, 'test_data', 'Harare'))
-        self.assertEqual(tz_local.zone, 'local')
-        # Make sure the local timezone is the same as the Harare one above.
-        # We test this with a past date, so that we don't run into future changes
-        # of the Harare timezone.
-        dt = datetime(2012, 1, 1, 5)
-        self.assertEqual(tz_harare.localize(dt), tz_local.localize(dt))
-
-        # Non-zoneinfo timezones are not supported in the TZ environment.
-        self.assertRaises(pytz.UnknownTimeZoneError, tzlocal.unix._tz_from_env, 'GMT+03:00')
-
-    def test_timezone(self):
-        # Most versions of Ubuntu
-        local_path = os.path.split(__file__)[0]
-        tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'timezone'))
-        self.assertEqual(tz.zone, 'Africa/Harare')
-
-    def test_zone_setting(self):
-        # A ZONE setting in /etc/sysconfig/clock, f ex CentOS
-        local_path = os.path.split(__file__)[0]
-        tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'zone_setting'))
-        self.assertEqual(tz.zone, 'Africa/Harare')
-
-    def test_timezone_setting(self):
-        # A ZONE setting in /etc/conf.d/clock, f ex Gentoo
-        local_path = os.path.split(__file__)[0]
-        tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'timezone_setting'))
-        self.assertEqual(tz.zone, 'Africa/Harare')
-
-    def test_symlink_localtime(self):
-        # A ZONE setting in the target path of a symbolic linked localtime, f ex systemd distributions
-        local_path = os.path.split(__file__)[0]
-        tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'symlink_localtime'))
-        self.assertEqual(tz.zone, 'Africa/Harare')
-
-    def test_vardbzoneinfo_setting(self):
-        # A ZONE setting in /etc/conf.d/clock, f ex Gentoo
-        local_path = os.path.split(__file__)[0]
-        tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'vardbzoneinfo'))
-        self.assertEqual(tz.zone, 'Africa/Harare')
-
-    def test_only_localtime(self):
-        local_path = os.path.split(__file__)[0]
-        tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'localtime'))
-        self.assertEqual(tz.zone, 'local')
-        dt = datetime(2012, 1, 1, 5)
-        self.assertEqual(pytz.timezone('Africa/Harare').localize(dt), tz.localize(dt))
-
-if sys.platform == 'win32':
-
-    import tzlocal.win32
-    class TzWin32Tests(unittest.TestCase):
-
-        def test_win32(self):
-            tzlocal.win32.get_localzone()
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/lib/tzlocal/windows_tz.py b/lib/tzlocal/windows_tz.py
index ee4be8a6827d13ccaea5732fc47fed1ac3e7a527..de89c85676b86da90ab702f250d00ea1f0fc1f04 100644
--- a/lib/tzlocal/windows_tz.py
+++ b/lib/tzlocal/windows_tz.py
@@ -303,6 +303,7 @@ tz_win = {'Africa/Abidjan': 'Greenwich Standard Time',
  'America/Port_of_Spain': 'SA Western Standard Time',
  'America/Porto_Velho': 'SA Western Standard Time',
  'America/Puerto_Rico': 'SA Western Standard Time',
+ 'America/Punta_Arenas': 'SA Eastern Standard Time',
  'America/Rainy_River': 'Central Standard Time',
  'America/Rankin_Inlet': 'Central Standard Time',
  'America/Recife': 'SA Eastern Standard Time',
@@ -340,7 +341,7 @@ tz_win = {'Africa/Abidjan': 'Greenwich Standard Time',
  'Antarctica/Macquarie': 'Central Pacific Standard Time',
  'Antarctica/Mawson': 'West Asia Standard Time',
  'Antarctica/McMurdo': 'New Zealand Standard Time',
- 'Antarctica/Palmer': 'Pacific SA Standard Time',
+ 'Antarctica/Palmer': 'SA Eastern Standard Time',
  'Antarctica/Rothera': 'SA Eastern Standard Time',
  'Antarctica/Syowa': 'E. Africa Standard Time',
  'Antarctica/Vostok': 'Central Asia Standard Time',
@@ -370,6 +371,7 @@ tz_win = {'Africa/Abidjan': 'Greenwich Standard Time',
  'Asia/Dili': 'Tokyo Standard Time',
  'Asia/Dubai': 'Arabian Standard Time',
  'Asia/Dushanbe': 'West Asia Standard Time',
+ 'Asia/Famagusta': 'Turkey Standard Time',
  'Asia/Gaza': 'West Bank Standard Time',
  'Asia/Hebron': 'West Bank Standard Time',
  'Asia/Hong_Kong': 'China Standard Time',
diff --git a/lib/unidecode/__init__.py b/lib/unidecode/__init__.py
index 82eb5a3fc45b462fe06d2f5d9a43b98f554a7b42..3b68de4c904a5527309ff9c330a6f16aadc5043d 100644
--- a/lib/unidecode/__init__.py
+++ b/lib/unidecode/__init__.py
@@ -1,4 +1,5 @@
 # -*- coding: utf-8 -*-
+# vi:tabstop=4:expandtab:sw=4
 """Transliterate Unicode text into plain 7-bit ASCII.
 
 Example usage:
@@ -18,19 +19,53 @@ from sys import version_info
 
 Cache = {}
 
-def unidecode(string):
+
+def _warn_if_not_unicode(string):
+    if version_info[0] < 3 and not isinstance(string, unicode):
+        warnings.warn(  "Argument %r is not an unicode object. "
+                        "Passing an encoded string will likely have "
+                        "unexpected results." % (type(string),),
+                        RuntimeWarning, 2)
+
+
+def unidecode_expect_ascii(string):
     """Transliterate an Unicode object into an ASCII string
 
     >>> unidecode(u"\u5317\u4EB0")
     "Bei Jing "
+
+    This function first tries to convert the string using ASCII codec.
+    If it fails (because of non-ASCII characters), it falls back to
+    transliteration using the character tables.
+
+    This is approx. five times faster if the string only contains ASCII
+    characters, but slightly slower than using unidecode directly if non-ASCII
+    chars are present.
     """
 
-    if version_info[0] < 3 and not isinstance(string, unicode):
-        warnings.warn(  "Argument %r is not an unicode object. "
-                        "Passing an encoded string will likely have "
-                        "unexpected results." % (type(string),),
-			RuntimeWarning, 2)
+    _warn_if_not_unicode(string)
+    try:
+        bytestring = string.encode('ASCII')
+    except UnicodeEncodeError:
+        return _unidecode(string)
+    if version_info[0] >= 3:
+        return string
+    else:
+        return bytestring
+
+def unidecode_expect_nonascii(string):
+    """Transliterate an Unicode object into an ASCII string
 
+    >>> unidecode(u"\u5317\u4EB0")
+    "Bei Jing "
+    """
+
+    _warn_if_not_unicode(string)
+    return _unidecode(string)
+
+unidecode = unidecode_expect_ascii
+
+def _unidecode(string):
     retval = []
 
     for char in string:
@@ -43,6 +78,11 @@ def unidecode(string):
         if codepoint > 0xeffff:
             continue # Characters in Private Use Area and above are ignored
 
+        if 0xd800 <= codepoint <= 0xdfff:
+            warnings.warn(  "Surrogate character %r will be ignored. "
+                            "You might be using a narrow Python build." % (char,),
+                            RuntimeWarning, 2)
+
         section = codepoint >> 8   # Chop off the last two hex digits
         position = codepoint % 256 # Last two hex digits
 
@@ -50,7 +90,7 @@ def unidecode(string):
             table = Cache[section]
         except KeyError:
             try:
-                mod = __import__('unidecode.x%03x'%(section), [], [], ['data'])
+                mod = __import__('unidecode.x%03x'%(section), globals(), locals(), ['data'])
             except ImportError:
                 Cache[section] = None
                 continue   # No match: ignore this character and carry on.
diff --git a/lib/unidecode/util.py b/lib/unidecode/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..477280d188c9f6a0a7b7a5de742db1e9bc199a4f
--- /dev/null
+++ b/lib/unidecode/util.py
@@ -0,0 +1,58 @@
+# vim:ts=4 sw=4 expandtab softtabstop=4
+from __future__ import print_function
+import optparse
+import locale
+import os
+import sys
+import warnings
+
+from unidecode import unidecode
+
+PY3 = sys.version_info[0] >= 3
+
+def fatal(msg):
+    sys.stderr.write(msg + "\n")
+    sys.exit(1)
+
+def main():
+    default_encoding = locale.getpreferredencoding()
+
+    parser = optparse.OptionParser('%prog [options] [FILE]',
+            description="Transliterate Unicode text into ASCII. FILE is path to file to transliterate. "
+            "Standard input is used if FILE is omitted and -c is not specified.")
+    parser.add_option('-e', '--encoding', metavar='ENCODING', default=default_encoding,
+            help='Specify an encoding (default is %s)' % (default_encoding,))
+    parser.add_option('-c', metavar='TEXT', dest='text',
+            help='Transliterate TEXT instead of FILE')
+
+    options, args = parser.parse_args()
+
+    encoding = options.encoding
+
+    if args:
+        if options.text:
+            fatal("Can't use both FILE and -c option")
+        else:
+            with open(args[0], 'rb') as f:
+                stream = f.read()
+    elif options.text:
+        if PY3:
+            stream = os.fsencode(options.text)
+        else:
+            stream = options.text
+        # add a newline to the string if it comes from the
+        # command line so that the result is printed nicely
+        # on the console.
+        stream += '\n'.encode('ascii')
+    else:
+        if PY3:
+            stream = sys.stdin.buffer.read()
+        else:
+            stream = sys.stdin.read()
+
+    try:
+        stream = stream.decode(encoding)
+    except UnicodeDecodeError as e:
+        fatal('Unable to decode input: %s, start: %d, end: %d' % (e.reason, e.start, e.end))
+
+    sys.stdout.write(unidecode(stream))
diff --git a/lib/unidecode/x000.py b/lib/unidecode/x000.py
index 6821df47db0020bccd86045d6cbad398d0a31d05..c3f8f5157228aae9b721a423f3c1003d7968377d 100644
--- a/lib/unidecode/x000.py
+++ b/lib/unidecode/x000.py
@@ -1,132 +1,15 @@
 data = (
-'\x00',    # 0x00
-'\x01',    # 0x01
-'\x02',    # 0x02
-'\x03',    # 0x03
-'\x04',    # 0x04
-'\x05',    # 0x05
-'\x06',    # 0x06
-'\x07',    # 0x07
-'\x08',    # 0x08
-'\x09',    # 0x09
-'\x0a',    # 0x0a
-'\x0b',    # 0x0b
-'\x0c',    # 0x0c
-'\x0d',    # 0x0d
-'\x0e',    # 0x0e
-'\x0f',    # 0x0f
-'\x10',    # 0x10
-'\x11',    # 0x11
-'\x12',    # 0x12
-'\x13',    # 0x13
-'\x14',    # 0x14
-'\x15',    # 0x15
-'\x16',    # 0x16
-'\x17',    # 0x17
-'\x18',    # 0x18
-'\x19',    # 0x19
-'\x1a',    # 0x1a
-'\x1b',    # 0x1b
-'\x1c',    # 0x1c
-'\x1d',    # 0x1d
-'\x1e',    # 0x1e
-'\x1f',    # 0x1f
-' ',    # 0x20
-'!',    # 0x21
-'"',    # 0x22
-'#',    # 0x23
-'$',    # 0x24
-'%',    # 0x25
-'&',    # 0x26
-'\'',    # 0x27
-'(',    # 0x28
-')',    # 0x29
-'*',    # 0x2a
-'+',    # 0x2b
-',',    # 0x2c
-'-',    # 0x2d
-'.',    # 0x2e
-'/',    # 0x2f
-'0',    # 0x30
-'1',    # 0x31
-'2',    # 0x32
-'3',    # 0x33
-'4',    # 0x34
-'5',    # 0x35
-'6',    # 0x36
-'7',    # 0x37
-'8',    # 0x38
-'9',    # 0x39
-':',    # 0x3a
-';',    # 0x3b
-'<',    # 0x3c
-'=',    # 0x3d
-'>',    # 0x3e
-'?',    # 0x3f
-'@',    # 0x40
-'A',    # 0x41
-'B',    # 0x42
-'C',    # 0x43
-'D',    # 0x44
-'E',    # 0x45
-'F',    # 0x46
-'G',    # 0x47
-'H',    # 0x48
-'I',    # 0x49
-'J',    # 0x4a
-'K',    # 0x4b
-'L',    # 0x4c
-'M',    # 0x4d
-'N',    # 0x4e
-'O',    # 0x4f
-'P',    # 0x50
-'Q',    # 0x51
-'R',    # 0x52
-'S',    # 0x53
-'T',    # 0x54
-'U',    # 0x55
-'V',    # 0x56
-'W',    # 0x57
-'X',    # 0x58
-'Y',    # 0x59
-'Z',    # 0x5a
-']',    # 0x5b
-'\\',    # 0x5c
-']',    # 0x5d
-'^',    # 0x5e
-'_',    # 0x5f
-'`',    # 0x60
-'a',    # 0x61
-'b',    # 0x62
-'c',    # 0x63
-'d',    # 0x64
-'e',    # 0x65
-'f',    # 0x66
-'g',    # 0x67
-'h',    # 0x68
-'i',    # 0x69
-'j',    # 0x6a
-'k',    # 0x6b
-'l',    # 0x6c
-'m',    # 0x6d
-'n',    # 0x6e
-'o',    # 0x6f
-'p',    # 0x70
-'q',    # 0x71
-'r',    # 0x72
-'s',    # 0x73
-'t',    # 0x74
-'u',    # 0x75
-'v',    # 0x76
-'w',    # 0x77
-'x',    # 0x78
-'y',    # 0x79
-'z',    # 0x7a
-'{',    # 0x7b
-'|',    # 0x7c
-'}',    # 0x7d
-'~',    # 0x7e
-'',    # 0x7f
+# Code points u+007f and below are equivalent to ASCII and are handled by a
+# special case in the code. Hence they are not present in this table.
+'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+
 '',    # 0x80
 '',    # 0x81
 '',    # 0x82
@@ -162,7 +45,10 @@ data = (
 ' ',    # 0xa0
 '!',    # 0xa1
 'C/',    # 0xa2
+
+# Not "GBP" - Pound Sign is used for more than just British Pounds.
 'PS',    # 0xa3
+
 '$?',    # 0xa4
 'Y=',    # 0xa5
 '|',    # 0xa6
@@ -177,8 +63,11 @@ data = (
 '-',    # 0xaf
 'deg',    # 0xb0
 '+-',    # 0xb1
+
+# These might be combined with other superscript digits (u+2070 - u+2079)
 '2',    # 0xb2
 '3',    # 0xb3
+
 '\'',    # 0xb4
 'u',    # 0xb5
 'P',    # 0xb6
@@ -195,7 +84,10 @@ data = (
 'A',    # 0xc1
 'A',    # 0xc2
 'A',    # 0xc3
+
+# Not "AE" - used in languages other than German
 'A',    # 0xc4
+
 'A',    # 0xc5
 'AE',    # 0xc6
 'C',    # 0xc7
@@ -213,13 +105,19 @@ data = (
 'O',    # 0xd3
 'O',    # 0xd4
 'O',    # 0xd5
+
+# Not "OE" - used in languages other than German
 'O',    # 0xd6
+
 'x',    # 0xd7
 'O',    # 0xd8
 'U',    # 0xd9
 'U',    # 0xda
 'U',    # 0xdb
+
+# Not "UE" - used in languages other than German
 'U',    # 0xdc
+
 'Y',    # 0xdd
 'Th',    # 0xde
 'ss',    # 0xdf
@@ -227,7 +125,10 @@ data = (
 'a',    # 0xe1
 'a',    # 0xe2
 'a',    # 0xe3
+
+# Not "ae" - used in languages other than German
 'a',    # 0xe4
+
 'a',    # 0xe5
 'ae',    # 0xe6
 'c',    # 0xe7
@@ -245,13 +146,19 @@ data = (
 'o',    # 0xf3
 'o',    # 0xf4
 'o',    # 0xf5
+
+# Not "oe" - used in languages other than German
 'o',    # 0xf6
+
 '/',    # 0xf7
 'o',    # 0xf8
 'u',    # 0xf9
 'u',    # 0xfa
 'u',    # 0xfb
+
+# Not "ue" - used in languages other than German
 'u',    # 0xfc
+
 'y',    # 0xfd
 'th',    # 0xfe
 'y',    # 0xff
diff --git a/lib/unidecode/x002.py b/lib/unidecode/x002.py
index ea45441e1c601ef71bcaabe79900d365f9ab2118..d7028cdf3517030862f529b457ac189bfe04035c 100644
--- a/lib/unidecode/x002.py
+++ b/lib/unidecode/x002.py
@@ -175,7 +175,7 @@ data = (
 ']]',    # 0xad
 'h',    # 0xae
 'h',    # 0xaf
-'k',    # 0xb0
+'h',    # 0xb0
 'h',    # 0xb1
 'j',    # 0xb2
 'r',    # 0xb3
diff --git a/lib/unidecode/x005.py b/lib/unidecode/x005.py
index 738a99623d7c4e29232708844762e5631660ad6c..2913ffff0bca1f2d153433ded888b1e5e24cec44 100644
--- a/lib/unidecode/x005.py
+++ b/lib/unidecode/x005.py
@@ -136,7 +136,7 @@ data = (
 'f',    # 0x86
 'ew',    # 0x87
 '[?]',    # 0x88
-'.',    # 0x89
+':',    # 0x89
 '-',    # 0x8a
 '[?]',    # 0x8b
 '[?]',    # 0x8c
@@ -191,7 +191,7 @@ data = (
 '',    # 0xbd
 '',    # 0xbe
 '',    # 0xbf
-'',    # 0xc0
+'|',    # 0xc0
 '',    # 0xc1
 '',    # 0xc2
 ':',    # 0xc3
diff --git a/lib/unidecode/x020.py b/lib/unidecode/x020.py
index cb6963e2d3762244ec4d2fe87ad596f1d02e47c3..bee561b03dac9caec75a37e7548163fb44eb5549 100644
--- a/lib/unidecode/x020.py
+++ b/lib/unidecode/x020.py
@@ -70,23 +70,23 @@ data = (
 '/',    # 0x44
 '-[',    # 0x45
 ']-',    # 0x46
-'[?]',    # 0x47
+'??',    # 0x47
 '?!',    # 0x48
 '!?',    # 0x49
 '7',    # 0x4a
 'PP',    # 0x4b
 '(]',    # 0x4c
 '[)',    # 0x4d
-'[?]',    # 0x4e
+'*',    # 0x4e
 '[?]',    # 0x4f
 '[?]',    # 0x50
 '[?]',    # 0x51
-'[?]',    # 0x52
-'[?]',    # 0x53
+'%',    # 0x52
+'~',    # 0x53
 '[?]',    # 0x54
 '[?]',    # 0x55
 '[?]',    # 0x56
-'[?]',    # 0x57
+"''''",    # 0x57
 '[?]',    # 0x58
 '[?]',    # 0x59
 '[?]',    # 0x5a
@@ -94,8 +94,8 @@ data = (
 '[?]',    # 0x5c
 '[?]',    # 0x5d
 '[?]',    # 0x5e
-'[?]',    # 0x5f
-'[?]',    # 0x60
+' ',    # 0x5f
+'',    # 0x60
 '[?]',    # 0x61
 '[?]',    # 0x62
 '[?]',    # 0x63
@@ -112,7 +112,7 @@ data = (
 '',    # 0x6e
 '',    # 0x6f
 '0',    # 0x70
-'',    # 0x71
+'i',    # 0x71
 '',    # 0x72
 '',    # 0x73
 '4',    # 0x74
@@ -143,19 +143,19 @@ data = (
 '(',    # 0x8d
 ')',    # 0x8e
 '[?]',    # 0x8f
-'[?]',    # 0x90
-'[?]',    # 0x91
-'[?]',    # 0x92
-'[?]',    # 0x93
+'a',    # 0x90
+'e',    # 0x91
+'o',    # 0x92
+'x',    # 0x93
 '[?]',    # 0x94
-'[?]',    # 0x95
-'[?]',    # 0x96
-'[?]',    # 0x97
-'[?]',    # 0x98
-'[?]',    # 0x99
-'[?]',    # 0x9a
-'[?]',    # 0x9b
-'[?]',    # 0x9c
+'h',    # 0x95
+'k',    # 0x96
+'l',    # 0x97
+'m',    # 0x98
+'n',    # 0x99
+'p',    # 0x9a
+'s',    # 0x9b
+'t',    # 0x9c
 '[?]',    # 0x9d
 '[?]',    # 0x9e
 '[?]',    # 0x9f
@@ -171,7 +171,7 @@ data = (
 'W',    # 0xa9
 'NS',    # 0xaa
 'D',    # 0xab
-'EU',    # 0xac
+'EUR',    # 0xac
 'K',    # 0xad
 'T',    # 0xae
 'Dr',    # 0xaf
@@ -228,7 +228,7 @@ data = (
 '',    # 0xe2
 '',    # 0xe3
 '[?]',    # 0xe4
-'[?]',    # 0xe5
+'',    # 0xe5
 '[?]',    # 0xe6
 '[?]',    # 0xe7
 '[?]',    # 0xe8
diff --git a/lib/unidecode/x021.py b/lib/unidecode/x021.py
index 0164cdb5baae7eb5dc2ddd536017a0f7e7e28a2d..cc74bc65fae7b6fc77a1d8868cf4b8adb9ececa0 100644
--- a/lib/unidecode/x021.py
+++ b/lib/unidecode/x021.py
@@ -1,87 +1,87 @@
 data = (
-'',    # 0x00
-'',    # 0x01
-'',    # 0x02
+' a/c ',    # 0x00
+' a/s ',    # 0x01
+'C',    # 0x02
 '',    # 0x03
 '',    # 0x04
-'',    # 0x05
-'',    # 0x06
+' c/o ',    # 0x05
+' c/u ',    # 0x06
 '',    # 0x07
 '',    # 0x08
 '',    # 0x09
-'',    # 0x0a
-'',    # 0x0b
-'',    # 0x0c
-'',    # 0x0d
-'',    # 0x0e
+'g',    # 0x0a
+'H',    # 0x0b
+'H',    # 0x0c
+'H',    # 0x0d
+'h',    # 0x0e
 '',    # 0x0f
-'',    # 0x10
-'',    # 0x11
-'',    # 0x12
-'',    # 0x13
+'I',    # 0x10
+'I',    # 0x11
+'L',    # 0x12
+'l',    # 0x13
 '',    # 0x14
-'',    # 0x15
+'N',    # 0x15
 '',    # 0x16
 '',    # 0x17
 '',    # 0x18
-'',    # 0x19
-'',    # 0x1a
-'',    # 0x1b
-'',    # 0x1c
-'',    # 0x1d
+'P',    # 0x19
+'Q',    # 0x1a
+'R',    # 0x1b
+'R',    # 0x1c
+'R',    # 0x1d
 '',    # 0x1e
 '',    # 0x1f
-'',    # 0x20
-'',    # 0x21
-'',    # 0x22
+'(sm)',    # 0x20
+'TEL',    # 0x21
+'(tm)',    # 0x22
 '',    # 0x23
-'',    # 0x24
+'Z',    # 0x24
 '',    # 0x25
 '',    # 0x26
 '',    # 0x27
-'',    # 0x28
+'Z',    # 0x28
 '',    # 0x29
 'K',    # 0x2a
 'A',    # 0x2b
-'',    # 0x2c
-'',    # 0x2d
-'',    # 0x2e
-'',    # 0x2f
-'',    # 0x30
-'',    # 0x31
+'B',    # 0x2c
+'C',    # 0x2d
+'e',    # 0x2e
+'e',    # 0x2f
+'E',    # 0x30
+'F',    # 0x31
 'F',    # 0x32
-'',    # 0x33
-'',    # 0x34
+'M',    # 0x33
+'o',    # 0x34
 '',    # 0x35
 '',    # 0x36
 '',    # 0x37
 '',    # 0x38
-'',    # 0x39
+'i',    # 0x39
 '',    # 0x3a
-'[?]',    # 0x3b
-'[?]',    # 0x3c
-'[?]',    # 0x3d
-'[?]',    # 0x3e
-'[?]',    # 0x3f
+'FAX',    # 0x3b
+'',    # 0x3c
+'',    # 0x3d
+'',    # 0x3e
+'',    # 0x3f
 '[?]',    # 0x40
 '[?]',    # 0x41
 '[?]',    # 0x42
 '[?]',    # 0x43
 '[?]',    # 0x44
-'[?]',    # 0x45
-'[?]',    # 0x46
-'[?]',    # 0x47
-'[?]',    # 0x48
-'[?]',    # 0x49
+'D',    # 0x45
+'d',    # 0x46
+'e',    # 0x47
+'i',    # 0x48
+'j',    # 0x49
 '[?]',    # 0x4a
 '[?]',    # 0x4b
 '[?]',    # 0x4c
 '[?]',    # 0x4d
 'F',    # 0x4e
 '[?]',    # 0x4f
-'[?]',    # 0x50
-'[?]',    # 0x51
-'[?]',    # 0x52
+' 1/7 ',    # 0x50
+' 1/9 ',    # 0x51
+' 1/10 ',    # 0x52
 ' 1/3 ',    # 0x53
 ' 2/3 ',    # 0x54
 ' 1/5 ',    # 0x55
@@ -136,7 +136,7 @@ data = (
 '[?]',    # 0x86
 '[?]',    # 0x87
 '[?]',    # 0x88
-'[?]',    # 0x89
+' 0/3 ',    # 0x89
 '[?]',    # 0x8a
 '[?]',    # 0x8b
 '[?]',    # 0x8c
diff --git a/lib/unidecode/x022.py b/lib/unidecode/x022.py
index 2046a9b4e5de625e7d1055213869a0112439451a..e38fb5cca1d8ed41c6c022b412ffec5f0ed6ee07 100644
--- a/lib/unidecode/x022.py
+++ b/lib/unidecode/x022.py
@@ -17,12 +17,12 @@ data = (
 '[?]',    # 0x0f
 '[?]',    # 0x10
 '[?]',    # 0x11
-'[?]',    # 0x12
+'-',    # 0x12
 '[?]',    # 0x13
 '[?]',    # 0x14
-'[?]',    # 0x15
-'[?]',    # 0x16
-'[?]',    # 0x17
+'/',    # 0x15
+'\\',    # 0x16
+'*',    # 0x17
 '[?]',    # 0x18
 '[?]',    # 0x19
 '[?]',    # 0x1a
@@ -34,7 +34,7 @@ data = (
 '[?]',    # 0x20
 '[?]',    # 0x21
 '[?]',    # 0x22
-'[?]',    # 0x23
+'|',    # 0x23
 '[?]',    # 0x24
 '[?]',    # 0x25
 '[?]',    # 0x26
@@ -53,13 +53,13 @@ data = (
 '[?]',    # 0x33
 '[?]',    # 0x34
 '[?]',    # 0x35
-'[?]',    # 0x36
+':',    # 0x36
 '[?]',    # 0x37
 '[?]',    # 0x38
 '[?]',    # 0x39
 '[?]',    # 0x3a
 '[?]',    # 0x3b
-'[?]',    # 0x3c
+'~',    # 0x3c
 '[?]',    # 0x3d
 '[?]',    # 0x3e
 '[?]',    # 0x3f
@@ -99,10 +99,10 @@ data = (
 '[?]',    # 0x61
 '[?]',    # 0x62
 '[?]',    # 0x63
-'[?]',    # 0x64
-'[?]',    # 0x65
-'[?]',    # 0x66
-'[?]',    # 0x67
+'<=',    # 0x64
+'>=',    # 0x65
+'<=',    # 0x66
+'>=',    # 0x67
 '[?]',    # 0x68
 '[?]',    # 0x69
 '[?]',    # 0x6a
diff --git a/lib/unidecode/x023.py b/lib/unidecode/x023.py
index 2046a9b4e5de625e7d1055213869a0112439451a..3c4462e2966377f513742b82389a5c006ef5aa1a 100644
--- a/lib/unidecode/x023.py
+++ b/lib/unidecode/x023.py
@@ -2,7 +2,7 @@ data = (
 '[?]',    # 0x00
 '[?]',    # 0x01
 '[?]',    # 0x02
-'[?]',    # 0x03
+'^',    # 0x03
 '[?]',    # 0x04
 '[?]',    # 0x05
 '[?]',    # 0x06
@@ -40,8 +40,8 @@ data = (
 '[?]',    # 0x26
 '[?]',    # 0x27
 '[?]',    # 0x28
-'[?]',    # 0x29
-'[?]',    # 0x2a
+'<',    # 0x29
+'> ',    # 0x2a
 '[?]',    # 0x2b
 '[?]',    # 0x2c
 '[?]',    # 0x2d
diff --git a/lib/unidecode/x024.py b/lib/unidecode/x024.py
index 76d8c8cd832be2f47d73363bc4abcef0eaab6094..231b0ca14e2eced9cb3299c79cf9a7ccb5e591a6 100644
--- a/lib/unidecode/x024.py
+++ b/lib/unidecode/x024.py
@@ -95,118 +95,118 @@ data = (
 '[?]',    # 0x5d
 '[?]',    # 0x5e
 '[?]',    # 0x5f
-'',    # 0x60
-'',    # 0x61
-'',    # 0x62
-'',    # 0x63
-'',    # 0x64
-'',    # 0x65
-'',    # 0x66
-'',    # 0x67
-'',    # 0x68
-'',    # 0x69
-'',    # 0x6a
-'',    # 0x6b
-'',    # 0x6c
-'',    # 0x6d
-'',    # 0x6e
-'',    # 0x6f
-'',    # 0x70
-'',    # 0x71
-'',    # 0x72
-'',    # 0x73
-'',    # 0x74
-'',    # 0x75
-'',    # 0x76
-'',    # 0x77
-'',    # 0x78
-'',    # 0x79
-'',    # 0x7a
-'',    # 0x7b
-'',    # 0x7c
-'',    # 0x7d
-'',    # 0x7e
-'',    # 0x7f
-'',    # 0x80
-'',    # 0x81
-'',    # 0x82
-'',    # 0x83
-'',    # 0x84
-'',    # 0x85
-'',    # 0x86
-'',    # 0x87
-'',    # 0x88
-'',    # 0x89
-'',    # 0x8a
-'',    # 0x8b
-'',    # 0x8c
-'',    # 0x8d
-'',    # 0x8e
-'',    # 0x8f
-'',    # 0x90
-'',    # 0x91
-'',    # 0x92
-'',    # 0x93
-'',    # 0x94
-'',    # 0x95
-'',    # 0x96
-'',    # 0x97
-'',    # 0x98
-'',    # 0x99
-'',    # 0x9a
-'',    # 0x9b
-'',    # 0x9c
-'',    # 0x9d
-'',    # 0x9e
-'',    # 0x9f
-'',    # 0xa0
-'',    # 0xa1
-'',    # 0xa2
-'',    # 0xa3
-'',    # 0xa4
-'',    # 0xa5
-'',    # 0xa6
-'',    # 0xa7
-'',    # 0xa8
-'',    # 0xa9
-'',    # 0xaa
-'',    # 0xab
-'',    # 0xac
-'',    # 0xad
-'',    # 0xae
-'',    # 0xaf
-'',    # 0xb0
-'',    # 0xb1
-'',    # 0xb2
-'',    # 0xb3
-'',    # 0xb4
-'',    # 0xb5
-'',    # 0xb6
-'',    # 0xb7
-'',    # 0xb8
-'',    # 0xb9
-'',    # 0xba
-'',    # 0xbb
-'',    # 0xbc
-'',    # 0xbd
-'',    # 0xbe
-'',    # 0xbf
-'',    # 0xc0
-'',    # 0xc1
-'',    # 0xc2
-'',    # 0xc3
-'',    # 0xc4
-'',    # 0xc5
-'',    # 0xc6
-'',    # 0xc7
-'',    # 0xc8
-'',    # 0xc9
-'',    # 0xca
-'',    # 0xcb
-'',    # 0xcc
-'',    # 0xcd
-'',    # 0xce
-'',    # 0xcf
+'1',    # 0x60
+'2',    # 0x61
+'3',    # 0x62
+'4',    # 0x63
+'5',    # 0x64
+'6',    # 0x65
+'7',    # 0x66
+'8',    # 0x67
+'9',    # 0x68
+'10',    # 0x69
+'11',    # 0x6a
+'12',    # 0x6b
+'13',    # 0x6c
+'14',    # 0x6d
+'15',    # 0x6e
+'16',    # 0x6f
+'17',    # 0x70
+'18',    # 0x71
+'19',    # 0x72
+'20',    # 0x73
+'(1)',    # 0x74
+'(2)',    # 0x75
+'(3)',    # 0x76
+'(4)',    # 0x77
+'(5)',    # 0x78
+'(6)',    # 0x79
+'(7)',    # 0x7a
+'(8)',    # 0x7b
+'(9)',    # 0x7c
+'(10)',    # 0x7d
+'(11)',    # 0x7e
+'(12)',    # 0x7f
+'(13)',    # 0x80
+'(14)',    # 0x81
+'(15)',    # 0x82
+'(16)',    # 0x83
+'(17)',    # 0x84
+'(18)',    # 0x85
+'(19)',    # 0x86
+'(20)',    # 0x87
+'1.',    # 0x88
+'2.',    # 0x89
+'3.',    # 0x8a
+'4.',    # 0x8b
+'5.',    # 0x8c
+'6.',    # 0x8d
+'7.',    # 0x8e
+'8.',    # 0x8f
+'9.',    # 0x90
+'10.',    # 0x91
+'11.',    # 0x92
+'12.',    # 0x93
+'13.',    # 0x94
+'14.',    # 0x95
+'15.',    # 0x96
+'16.',    # 0x97
+'17.',    # 0x98
+'18.',    # 0x99
+'19.',    # 0x9a
+'20.',    # 0x9b
+'(a)',    # 0x9c
+'(b)',    # 0x9d
+'(c)',    # 0x9e
+'(d)',    # 0x9f
+'(e)',    # 0xa0
+'(f)',    # 0xa1
+'(g)',    # 0xa2
+'(h)',    # 0xa3
+'(i)',    # 0xa4
+'(j)',    # 0xa5
+'(k)',    # 0xa6
+'(l)',    # 0xa7
+'(m)',    # 0xa8
+'(n)',    # 0xa9
+'(o)',    # 0xaa
+'(p)',    # 0xab
+'(q)',    # 0xac
+'(r)',    # 0xad
+'(s)',    # 0xae
+'(t)',    # 0xaf
+'(u)',    # 0xb0
+'(v)',    # 0xb1
+'(w)',    # 0xb2
+'(x)',    # 0xb3
+'(y)',    # 0xb4
+'(z)',    # 0xb5
+'A',    # 0xb6
+'B',    # 0xb7
+'C',    # 0xb8
+'D',    # 0xb9
+'E',    # 0xba
+'F',    # 0xbb
+'G',    # 0xbc
+'H',    # 0xbd
+'I',    # 0xbe
+'J',    # 0xbf
+'K',    # 0xc0
+'L',    # 0xc1
+'M',    # 0xc2
+'N',    # 0xc3
+'O',    # 0xc4
+'P',    # 0xc5
+'Q',    # 0xc6
+'R',    # 0xc7
+'S',    # 0xc8
+'T',    # 0xc9
+'U',    # 0xca
+'V',    # 0xcb
+'W',    # 0xcc
+'X',    # 0xcd
+'Y',    # 0xce
+'Z',    # 0xcf
 'a',    # 0xd0
 'b',    # 0xd1
 'c',    # 0xd2
@@ -234,24 +234,25 @@ data = (
 'y',    # 0xe8
 'z',    # 0xe9
 '0',    # 0xea
-'[?]',    # 0xeb
-'[?]',    # 0xec
-'[?]',    # 0xed
-'[?]',    # 0xee
-'[?]',    # 0xef
-'[?]',    # 0xf0
-'[?]',    # 0xf1
-'[?]',    # 0xf2
-'[?]',    # 0xf3
-'[?]',    # 0xf4
-'[?]',    # 0xf5
-'[?]',    # 0xf6
-'[?]',    # 0xf7
-'[?]',    # 0xf8
-'[?]',    # 0xf9
-'[?]',    # 0xfa
-'[?]',    # 0xfb
-'[?]',    # 0xfc
-'[?]',    # 0xfd
-'[?]',    # 0xfe
+'11',    # 0xeb
+'12',    # 0xec
+'13',    # 0xed
+'14',    # 0xee
+'15',    # 0xef
+'16',    # 0xf0
+'17',    # 0xf1
+'18',    # 0xf2
+'19',    # 0xf3
+'20',    # 0xf4
+'1',    # 0xf5
+'2',    # 0xf6
+'3',    # 0xf7
+'4',    # 0xf8
+'5',    # 0xf9
+'6',    # 0xfa
+'7',    # 0xfb
+'8',    # 0xfc
+'9',    # 0xfd
+'10',    # 0xfe
+'0',    # 0xff
 )
diff --git a/lib/unidecode/x026.py b/lib/unidecode/x026.py
index bfb03a9af0e35ebbf414ac7aa432346c1d894d5f..c575472c742bee3b44e267d1612e97ba954313fc 100644
--- a/lib/unidecode/x026.py
+++ b/lib/unidecode/x026.py
@@ -110,7 +110,7 @@ data = (
 '',    # 0x6c
 '',    # 0x6d
 '',    # 0x6e
-'',    # 0x6f
+'#',    # 0x6f
 '',    # 0x70
 '',    # 0x71
 '[?]',    # 0x72
diff --git a/lib/unidecode/x027.py b/lib/unidecode/x027.py
index 473cfc772b2a71886ced2c65ed571f1886e5768e..3c74c073892e138ee0b091ae0ca96c72e5674940 100644
--- a/lib/unidecode/x027.py
+++ b/lib/unidecode/x027.py
@@ -48,7 +48,7 @@ data = (
 '',    # 0x2e
 '',    # 0x2f
 '',    # 0x30
-'',    # 0x31
+'*',    # 0x31
 '',    # 0x32
 '',    # 0x33
 '',    # 0x34
@@ -87,7 +87,7 @@ data = (
 '',    # 0x55
 '',    # 0x56
 '',    # 0x57
-'',    # 0x58
+'|',    # 0x58
 '',    # 0x59
 '',    # 0x5a
 '',    # 0x5b
@@ -97,7 +97,7 @@ data = (
 '[?]',    # 0x5f
 '[?]',    # 0x60
 '',    # 0x61
-'',    # 0x62
+'!',    # 0x62
 '',    # 0x63
 '',    # 0x64
 '',    # 0x65
@@ -229,10 +229,10 @@ data = (
 '[?]',    # 0xe3
 '[?]',    # 0xe4
 '[?]',    # 0xe5
-'[?]',    # 0xe6
+'[',    # 0xe6
 '[?]',    # 0xe7
-'[?]',    # 0xe8
-'[?]',    # 0xe9
+'<',    # 0xe8
+'> ',    # 0xe9
 '[?]',    # 0xea
 '[?]',    # 0xeb
 '[?]',    # 0xec
diff --git a/lib/unidecode/x029.py b/lib/unidecode/x029.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2df25484963e7dcef2c978c65e188a2f7c1c99d
--- /dev/null
+++ b/lib/unidecode/x029.py
@@ -0,0 +1,257 @@
+data = (
+'',    # 0x00
+'',    # 0x01
+'',    # 0x02
+'',    # 0x03
+'',    # 0x04
+'',    # 0x05
+'',    # 0x06
+'',    # 0x07
+'',    # 0x08
+'',    # 0x09
+'',    # 0x0a
+'',    # 0x0b
+'',    # 0x0c
+'',    # 0x0d
+'',    # 0x0e
+'',    # 0x0f
+'',    # 0x10
+'',    # 0x11
+'',    # 0x12
+'',    # 0x13
+'',    # 0x14
+'',    # 0x15
+'',    # 0x16
+'',    # 0x17
+'',    # 0x18
+'',    # 0x19
+'',    # 0x1a
+'',    # 0x1b
+'',    # 0x1c
+'',    # 0x1d
+'',    # 0x1e
+'',    # 0x1f
+'',    # 0x20
+'',    # 0x21
+'',    # 0x22
+'',    # 0x23
+'',    # 0x24
+'',    # 0x25
+'',    # 0x26
+'',    # 0x27
+'',    # 0x28
+'',    # 0x29
+'',    # 0x2a
+'',    # 0x2b
+'',    # 0x2c
+'',    # 0x2d
+'',    # 0x2e
+'',    # 0x2f
+'',    # 0x30
+'',    # 0x31
+'',    # 0x32
+'',    # 0x33
+'',    # 0x34
+'',    # 0x35
+'',    # 0x36
+'',    # 0x37
+'',    # 0x38
+'',    # 0x39
+'',    # 0x3a
+'',    # 0x3b
+'',    # 0x3c
+'',    # 0x3d
+'',    # 0x3e
+'',    # 0x3f
+'',    # 0x40
+'',    # 0x41
+'',    # 0x42
+'',    # 0x43
+'',    # 0x44
+'',    # 0x45
+'',    # 0x46
+'',    # 0x47
+'',    # 0x48
+'',    # 0x49
+'',    # 0x4a
+'',    # 0x4b
+'',    # 0x4c
+'',    # 0x4d
+'',    # 0x4e
+'',    # 0x4f
+'',    # 0x50
+'',    # 0x51
+'',    # 0x52
+'',    # 0x53
+'',    # 0x54
+'',    # 0x55
+'',    # 0x56
+'',    # 0x57
+'',    # 0x58
+'',    # 0x59
+'',    # 0x5a
+'',    # 0x5b
+'',    # 0x5c
+'',    # 0x5d
+'',    # 0x5e
+'',    # 0x5f
+'',    # 0x60
+'',    # 0x61
+'',    # 0x62
+'',    # 0x63
+'',    # 0x64
+'',    # 0x65
+'',    # 0x66
+'',    # 0x67
+'',    # 0x68
+'',    # 0x69
+'',    # 0x6a
+'',    # 0x6b
+'',    # 0x6c
+'',    # 0x6d
+'',    # 0x6e
+'',    # 0x6f
+'',    # 0x70
+'',    # 0x71
+'',    # 0x72
+'',    # 0x73
+'',    # 0x74
+'',    # 0x75
+'',    # 0x76
+'',    # 0x77
+'',    # 0x78
+'',    # 0x79
+'',    # 0x7a
+'',    # 0x7b
+'',    # 0x7c
+'',    # 0x7d
+'',    # 0x7e
+'',    # 0x7f
+'',    # 0x80
+'',    # 0x81
+'',    # 0x82
+'{',    # 0x83
+'} ',    # 0x84
+'',    # 0x85
+'',    # 0x86
+'',    # 0x87
+'',    # 0x88
+'',    # 0x89
+'',    # 0x8a
+'',    # 0x8b
+'',    # 0x8c
+'',    # 0x8d
+'',    # 0x8e
+'',    # 0x8f
+'',    # 0x90
+'',    # 0x91
+'',    # 0x92
+'',    # 0x93
+'',    # 0x94
+'',    # 0x95
+'',    # 0x96
+'',    # 0x97
+'',    # 0x98
+'',    # 0x99
+'',    # 0x9a
+'',    # 0x9b
+'',    # 0x9c
+'',    # 0x9d
+'',    # 0x9e
+'',    # 0x9f
+'',    # 0xa0
+'',    # 0xa1
+'',    # 0xa2
+'',    # 0xa3
+'',    # 0xa4
+'',    # 0xa5
+'',    # 0xa6
+'',    # 0xa7
+'',    # 0xa8
+'',    # 0xa9
+'',    # 0xaa
+'',    # 0xab
+'',    # 0xac
+'',    # 0xad
+'',    # 0xae
+'',    # 0xaf
+'',    # 0xb0
+'',    # 0xb1
+'',    # 0xb2
+'',    # 0xb3
+'',    # 0xb4
+'',    # 0xb5
+'',    # 0xb6
+'',    # 0xb7
+'',    # 0xb8
+'',    # 0xb9
+'',    # 0xba
+'',    # 0xbb
+'',    # 0xbc
+'',    # 0xbd
+'',    # 0xbe
+'',    # 0xbf
+'',    # 0xc0
+'',    # 0xc1
+'',    # 0xc2
+'',    # 0xc3
+'',    # 0xc4
+'',    # 0xc5
+'',    # 0xc6
+'',    # 0xc7
+'',    # 0xc8
+'',    # 0xc9
+'',    # 0xca
+'',    # 0xcb
+'',    # 0xcc
+'',    # 0xcd
+'',    # 0xce
+'',    # 0xcf
+'',    # 0xd0
+'',    # 0xd1
+'',    # 0xd2
+'',    # 0xd3
+'',    # 0xd4
+'',    # 0xd5
+'',    # 0xd6
+'',    # 0xd7
+'',    # 0xd8
+'',    # 0xd9
+'',    # 0xda
+'',    # 0xdb
+'',    # 0xdc
+'',    # 0xdd
+'',    # 0xde
+'',    # 0xdf
+'',    # 0xe0
+'',    # 0xe1
+'',    # 0xe2
+'',    # 0xe3
+'',    # 0xe4
+'',    # 0xe5
+'',    # 0xe6
+'',    # 0xe7
+'',    # 0xe8
+'',    # 0xe9
+'',    # 0xea
+'',    # 0xeb
+'',    # 0xec
+'',    # 0xed
+'',    # 0xee
+'',    # 0xef
+'',    # 0xf0
+'',    # 0xf1
+'',    # 0xf2
+'',    # 0xf3
+'',    # 0xf4
+'',    # 0xf5
+'',    # 0xf6
+'',    # 0xf7
+'',    # 0xf8
+'',    # 0xf9
+'',    # 0xfa
+'',    # 0xfb
+'',    # 0xfc
+'',    # 0xfd
+'',    # 0xfe
+)
diff --git a/lib/unidecode/x02a.py b/lib/unidecode/x02a.py
new file mode 100644
index 0000000000000000000000000000000000000000..b832ef3528a360244187fe32f08e2c48820ca3fd
--- /dev/null
+++ b/lib/unidecode/x02a.py
@@ -0,0 +1,257 @@
+data = (
+'',    # 0x00
+'',    # 0x01
+'',    # 0x02
+'',    # 0x03
+'',    # 0x04
+'',    # 0x05
+'',    # 0x06
+'',    # 0x07
+'',    # 0x08
+'',    # 0x09
+'',    # 0x0a
+'',    # 0x0b
+'',    # 0x0c
+'',    # 0x0d
+'',    # 0x0e
+'',    # 0x0f
+'',    # 0x10
+'',    # 0x11
+'',    # 0x12
+'',    # 0x13
+'',    # 0x14
+'',    # 0x15
+'',    # 0x16
+'',    # 0x17
+'',    # 0x18
+'',    # 0x19
+'',    # 0x1a
+'',    # 0x1b
+'',    # 0x1c
+'',    # 0x1d
+'',    # 0x1e
+'',    # 0x1f
+'',    # 0x20
+'',    # 0x21
+'',    # 0x22
+'',    # 0x23
+'',    # 0x24
+'',    # 0x25
+'',    # 0x26
+'',    # 0x27
+'',    # 0x28
+'',    # 0x29
+'',    # 0x2a
+'',    # 0x2b
+'',    # 0x2c
+'',    # 0x2d
+'',    # 0x2e
+'',    # 0x2f
+'',    # 0x30
+'',    # 0x31
+'',    # 0x32
+'',    # 0x33
+'',    # 0x34
+'',    # 0x35
+'',    # 0x36
+'',    # 0x37
+'',    # 0x38
+'',    # 0x39
+'',    # 0x3a
+'',    # 0x3b
+'',    # 0x3c
+'',    # 0x3d
+'',    # 0x3e
+'',    # 0x3f
+'',    # 0x40
+'',    # 0x41
+'',    # 0x42
+'',    # 0x43
+'',    # 0x44
+'',    # 0x45
+'',    # 0x46
+'',    # 0x47
+'',    # 0x48
+'',    # 0x49
+'',    # 0x4a
+'',    # 0x4b
+'',    # 0x4c
+'',    # 0x4d
+'',    # 0x4e
+'',    # 0x4f
+'',    # 0x50
+'',    # 0x51
+'',    # 0x52
+'',    # 0x53
+'',    # 0x54
+'',    # 0x55
+'',    # 0x56
+'',    # 0x57
+'',    # 0x58
+'',    # 0x59
+'',    # 0x5a
+'',    # 0x5b
+'',    # 0x5c
+'',    # 0x5d
+'',    # 0x5e
+'',    # 0x5f
+'',    # 0x60
+'',    # 0x61
+'',    # 0x62
+'',    # 0x63
+'',    # 0x64
+'',    # 0x65
+'',    # 0x66
+'',    # 0x67
+'',    # 0x68
+'',    # 0x69
+'',    # 0x6a
+'',    # 0x6b
+'',    # 0x6c
+'',    # 0x6d
+'',    # 0x6e
+'',    # 0x6f
+'',    # 0x70
+'',    # 0x71
+'',    # 0x72
+'',    # 0x73
+'::=',    # 0x74
+'==',    # 0x75
+'===',    # 0x76
+'',    # 0x77
+'',    # 0x78
+'',    # 0x79
+'',    # 0x7a
+'',    # 0x7b
+'',    # 0x7c
+'',    # 0x7d
+'',    # 0x7e
+'',    # 0x7f
+'',    # 0x80
+'',    # 0x81
+'',    # 0x82
+'',    # 0x83
+'',    # 0x84
+'',    # 0x85
+'',    # 0x86
+'',    # 0x87
+'',    # 0x88
+'',    # 0x89
+'',    # 0x8a
+'',    # 0x8b
+'',    # 0x8c
+'',    # 0x8d
+'',    # 0x8e
+'',    # 0x8f
+'',    # 0x90
+'',    # 0x91
+'',    # 0x92
+'',    # 0x93
+'',    # 0x94
+'',    # 0x95
+'',    # 0x96
+'',    # 0x97
+'',    # 0x98
+'',    # 0x99
+'',    # 0x9a
+'',    # 0x9b
+'',    # 0x9c
+'',    # 0x9d
+'',    # 0x9e
+'',    # 0x9f
+'',    # 0xa0
+'',    # 0xa1
+'',    # 0xa2
+'',    # 0xa3
+'',    # 0xa4
+'',    # 0xa5
+'',    # 0xa6
+'',    # 0xa7
+'',    # 0xa8
+'',    # 0xa9
+'',    # 0xaa
+'',    # 0xab
+'',    # 0xac
+'',    # 0xad
+'',    # 0xae
+'',    # 0xaf
+'',    # 0xb0
+'',    # 0xb1
+'',    # 0xb2
+'',    # 0xb3
+'',    # 0xb4
+'',    # 0xb5
+'',    # 0xb6
+'',    # 0xb7
+'',    # 0xb8
+'',    # 0xb9
+'',    # 0xba
+'',    # 0xbb
+'',    # 0xbc
+'',    # 0xbd
+'',    # 0xbe
+'',    # 0xbf
+'',    # 0xc0
+'',    # 0xc1
+'',    # 0xc2
+'',    # 0xc3
+'',    # 0xc4
+'',    # 0xc5
+'',    # 0xc6
+'',    # 0xc7
+'',    # 0xc8
+'',    # 0xc9
+'',    # 0xca
+'',    # 0xcb
+'',    # 0xcc
+'',    # 0xcd
+'',    # 0xce
+'',    # 0xcf
+'',    # 0xd0
+'',    # 0xd1
+'',    # 0xd2
+'',    # 0xd3
+'',    # 0xd4
+'',    # 0xd5
+'',    # 0xd6
+'',    # 0xd7
+'',    # 0xd8
+'',    # 0xd9
+'',    # 0xda
+'',    # 0xdb
+'',    # 0xdc
+'',    # 0xdd
+'',    # 0xde
+'',    # 0xdf
+'',    # 0xe0
+'',    # 0xe1
+'',    # 0xe2
+'',    # 0xe3
+'',    # 0xe4
+'',    # 0xe5
+'',    # 0xe6
+'',    # 0xe7
+'',    # 0xe8
+'',    # 0xe9
+'',    # 0xea
+'',    # 0xeb
+'',    # 0xec
+'',    # 0xed
+'',    # 0xee
+'',    # 0xef
+'',    # 0xf0
+'',    # 0xf1
+'',    # 0xf2
+'',    # 0xf3
+'',    # 0xf4
+'',    # 0xf5
+'',    # 0xf6
+'',    # 0xf7
+'',    # 0xf8
+'',    # 0xf9
+'',    # 0xfa
+'',    # 0xfb
+'',    # 0xfc
+'',    # 0xfd
+'',    # 0xfe
+)
diff --git a/lib/unidecode/x032.py b/lib/unidecode/x032.py
index 3295a25cab4bf40d358e82ceadb6aa3d07b25a20..a0c21d11e88d02b66941d1c094829fad19aaf908 100644
--- a/lib/unidecode/x032.py
+++ b/lib/unidecode/x032.py
@@ -80,21 +80,21 @@ data = (
 '[?]',    # 0x4e
 '[?]',    # 0x4f
 '[?]',    # 0x50
-'[?]',    # 0x51
-'[?]',    # 0x52
-'[?]',    # 0x53
-'[?]',    # 0x54
-'[?]',    # 0x55
-'[?]',    # 0x56
-'[?]',    # 0x57
-'[?]',    # 0x58
-'[?]',    # 0x59
-'[?]',    # 0x5a
-'[?]',    # 0x5b
-'[?]',    # 0x5c
-'[?]',    # 0x5d
-'[?]',    # 0x5e
-'[?]',    # 0x5f
+'21',    # 0x51
+'22',    # 0x52
+'23',    # 0x53
+'24',    # 0x54
+'25',    # 0x55
+'26',    # 0x56
+'27',    # 0x57
+'28',    # 0x58
+'29',    # 0x59
+'30',    # 0x5a
+'31',    # 0x5b
+'32',    # 0x5c
+'33',    # 0x5d
+'34',    # 0x5e
+'35',    # 0x5f
 '(g)',    # 0x60
 '(n)',    # 0x61
 '(d)',    # 0x62
@@ -176,21 +176,21 @@ data = (
 '(Zi) ',    # 0xae
 '(Xie) ',    # 0xaf
 '(Ye) ',    # 0xb0
-'[?]',    # 0xb1
-'[?]',    # 0xb2
-'[?]',    # 0xb3
-'[?]',    # 0xb4
-'[?]',    # 0xb5
-'[?]',    # 0xb6
-'[?]',    # 0xb7
-'[?]',    # 0xb8
-'[?]',    # 0xb9
-'[?]',    # 0xba
-'[?]',    # 0xbb
-'[?]',    # 0xbc
-'[?]',    # 0xbd
-'[?]',    # 0xbe
-'[?]',    # 0xbf
+'36',    # 0xb1
+'37',    # 0xb2
+'38',    # 0xb3
+'39',    # 0xb4
+'40',    # 0xb5
+'41',    # 0xb6
+'42',    # 0xb7
+'43',    # 0xb8
+'44',    # 0xb9
+'45',    # 0xba
+'46',    # 0xbb
+'47',    # 0xbc
+'48',    # 0xbd
+'49',    # 0xbe
+'50',    # 0xbf
 '1M',    # 0xc0
 '2M',    # 0xc1
 '3M',    # 0xc2
@@ -203,10 +203,10 @@ data = (
 '10M',    # 0xc9
 '11M',    # 0xca
 '12M',    # 0xcb
-'[?]',    # 0xcc
-'[?]',    # 0xcd
-'[?]',    # 0xce
-'[?]',    # 0xcf
+'Hg',    # 0xcc
+'erg',    # 0xcd
+'eV',    # 0xce
+'LTD',    # 0xcf
 'a',    # 0xd0
 'i',    # 0xd1
 'u',    # 0xd2
diff --git a/lib/unidecode/x033.py b/lib/unidecode/x033.py
index 64eb651a7d63b41f28eba5d89547cb244ee21079..b9536832d44349bac23d1978e9d6d2e71d02a3fe 100644
--- a/lib/unidecode/x033.py
+++ b/lib/unidecode/x033.py
@@ -112,16 +112,16 @@ data = (
 '22h',    # 0x6e
 '23h',    # 0x6f
 '24h',    # 0x70
-'HPA',    # 0x71
+'hPa',    # 0x71
 'da',    # 0x72
 'AU',    # 0x73
 'bar',    # 0x74
 'oV',    # 0x75
 'pc',    # 0x76
-'[?]',    # 0x77
-'[?]',    # 0x78
-'[?]',    # 0x79
-'[?]',    # 0x7a
+'dm',    # 0x77
+'dm^2',    # 0x78
+'dm^3',    # 0x79
+'IU',    # 0x7a
 'Heisei',    # 0x7b
 'Syouwa',    # 0x7c
 'Taisyou',    # 0x7d
@@ -162,7 +162,7 @@ data = (
 'cm^2',    # 0xa0
 'm^2',    # 0xa1
 'km^2',    # 0xa2
-'mm^4',    # 0xa3
+'mm^3',    # 0xa3
 'cm^3',    # 0xa4
 'm^3',    # 0xa5
 'km^3',    # 0xa6
@@ -254,4 +254,5 @@ data = (
 '29d',    # 0xfc
 '30d',    # 0xfd
 '31d',    # 0xfe
+'gal',    # 0xff
 )
diff --git a/lib/unidecode/x04e.py b/lib/unidecode/x04e.py
index e346f67bfbc7db8233b50ca389fc9a3e40437468..b472b85596c5683f997206cdbbff6560cbf296a3 100644
--- a/lib/unidecode/x04e.py
+++ b/lib/unidecode/x04e.py
@@ -1,5 +1,5 @@
 data = (
-'[?] ',    # 0x00
+'Yi ',    # 0x00
 'Ding ',    # 0x01
 'Kao ',    # 0x02
 'Qi ',    # 0x03
diff --git a/lib/unidecode/x1f1.py b/lib/unidecode/x1f1.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba0481fcf7bc738eadc21b0811a6220f7e3550c1
--- /dev/null
+++ b/lib/unidecode/x1f1.py
@@ -0,0 +1,258 @@
+data = (
+'0.',    # 0x00
+'0,',    # 0x01
+'1,',    # 0x02
+'2,',    # 0x03
+'3,',    # 0x04
+'4,',    # 0x05
+'5,',    # 0x06
+'6,',    # 0x07
+'7,',    # 0x08
+'8,',    # 0x09
+'9,',    # 0x0a
+'',    # 0x0b
+'',    # 0x0c
+'',    # 0x0d
+'',    # 0x0e
+'',    # 0x0f
+'(A)',    # 0x10
+'(B)',    # 0x11
+'(C)',    # 0x12
+'(D)',    # 0x13
+'(E)',    # 0x14
+'(F)',    # 0x15
+'(G)',    # 0x16
+'(H)',    # 0x17
+'(I)',    # 0x18
+'(J)',    # 0x19
+'(K)',   # 0x1a
+'(L)',    # 0x1b
+'(M)',    # 0x1c
+'(N)',    # 0x1d
+'(O)',    # 0x1e
+'(P)',    # 0x1f
+'(Q)',    # 0x20
+'(R)',    # 0x21
+'(S)',    # 0x22
+'(T)',    # 0x23
+'(U)',    # 0x24
+'(V)',    # 0x25
+'(W)',    # 0x26
+'(X)',    # 0x27
+'(Y)',    # 0x28
+'(Z)',    # 0x29
+'',    # 0x2a
+'',    # 0x2b
+'',    # 0x2c
+'',    # 0x2d
+'',    # 0x2e
+'',    # 0x2f
+'',    # 0x30
+'',    # 0x31
+'',    # 0x32
+'',    # 0x33
+'',    # 0x34
+'',    # 0x35
+'',    # 0x36
+'',    # 0x37
+'',    # 0x38
+'',    # 0x39
+'',    # 0x3a
+'',    # 0x3b
+'',    # 0x3c
+'',    # 0x3d
+'',    # 0x3e
+'',    # 0x3f
+'',    # 0x40
+'',    # 0x41
+'',    # 0x42
+'',    # 0x43
+'',    # 0x44
+'',    # 0x45
+'',    # 0x46
+'',    # 0x47
+'',    # 0x48
+'',    # 0x49
+'',    # 0x4a
+'',    # 0x4b
+'',    # 0x4c
+'',    # 0x4d
+'',    # 0x4e
+'',    # 0x4f
+'',    # 0x50
+'',    # 0x51
+'',    # 0x52
+'',    # 0x53
+'',    # 0x54
+'',    # 0x55
+'',    # 0x56
+'',    # 0x57
+'',    # 0x58
+'',    # 0x59
+'',    # 0x5a
+'',    # 0x5b
+'',    # 0x5c
+'',    # 0x5d
+'',    # 0x5e
+'',    # 0x5f
+'',    # 0x60
+'',    # 0x61
+'',    # 0x62
+'',    # 0x63
+'',    # 0x64
+'',    # 0x65
+'',    # 0x66
+'',    # 0x67
+'',    # 0x68
+'',    # 0x69
+'',    # 0x6a
+'',    # 0x6b
+'',    # 0x6c
+'',    # 0x6d
+'',    # 0x6e
+'',    # 0x6f
+'',    # 0x70
+'',    # 0x71
+'',    # 0x72
+'',    # 0x73
+'',    # 0x74
+'',    # 0x75
+'',    # 0x76
+'',    # 0x77
+'',    # 0x78
+'',    # 0x79
+'',    # 0x7a
+'',    # 0x7b
+'',    # 0x7c
+'',    # 0x7d
+'',    # 0x7e
+'',    # 0x7f
+'',    # 0x80
+'',    # 0x81
+'',    # 0x82
+'',    # 0x83
+'',    # 0x84
+'',    # 0x85
+'',    # 0x86
+'',    # 0x87
+'',    # 0x88
+'',    # 0x89
+'',    # 0x8a
+'',    # 0x8b
+'',    # 0x8c
+'',    # 0x8d
+'',    # 0x8e
+'',    # 0x8f
+'',    # 0x90
+'',    # 0x91
+'',    # 0x92
+'',    # 0x93
+'',    # 0x94
+'',    # 0x95
+'',    # 0x96
+'',    # 0x97
+'',    # 0x98
+'',    # 0x99
+'',    # 0x9a
+'',    # 0x9b
+'',    # 0x9c
+'',    # 0x9d
+'',    # 0x9e
+'',    # 0x9f
+'',    # 0xa0
+'',    # 0xa1
+'',    # 0xa2
+'',    # 0xa3
+'',    # 0xa4
+'',    # 0xa5
+'',    # 0xa6
+'',    # 0xa7
+'',    # 0xa8
+'',    # 0xa9
+'',    # 0xaa
+'',    # 0xab
+'',    # 0xac
+'',    # 0xad
+'',    # 0xae
+'',    # 0xaf
+'',    # 0xb0
+'',    # 0xb1
+'',    # 0xb2
+'',    # 0xb3
+'',    # 0xb4
+'',    # 0xb5
+'',    # 0xb6
+'',    # 0xb7
+'',    # 0xb8
+'',    # 0xb9
+'',    # 0xba
+'',    # 0xbb
+'',    # 0xbc
+'',    # 0xbd
+'',    # 0xbe
+'',    # 0xbf
+'',    # 0xc0
+'',    # 0xc1
+'',    # 0xc2
+'',    # 0xc3
+'',    # 0xc4
+'',    # 0xc5
+'',    # 0xc6
+'',    # 0xc7
+'',    # 0xc8
+'',    # 0xc9
+'',    # 0xca
+'',    # 0xcb
+'',    # 0xcc
+'',    # 0xcd
+'',    # 0xce
+'',    # 0xcf
+'',    # 0xd0
+'',    # 0xd1
+'',    # 0xd2
+'',    # 0xd3
+'',    # 0xd4
+'',    # 0xd5
+'',    # 0xd6
+'',    # 0xd7
+'',    # 0xd8
+'',    # 0xd9
+'',    # 0xda
+'',    # 0xdb
+'',    # 0xdc
+'',    # 0xdd
+'',    # 0xde
+'',    # 0xdf
+'',    # 0xe0
+'',    # 0xe1
+'',    # 0xe2
+'',    # 0xe3
+'',    # 0xe4
+'',    # 0xe5
+'',    # 0xe6
+'',    # 0xe7
+'',    # 0xe8
+'',    # 0xe9
+'',    # 0xea
+'',    # 0xeb
+'',    # 0xec
+'',    # 0xed
+'',    # 0xee
+'',    # 0xef
+'',    # 0xf0
+'',    # 0xf1
+'',    # 0xf2
+'',    # 0xf3
+'',    # 0xf4
+'',    # 0xf5
+'',    # 0xf6
+'',    # 0xf7
+'',    # 0xf8
+'',    # 0xf9
+'',    # 0xfa
+'',    # 0xfb
+'',    # 0xfc
+'',    # 0xfd
+'',    # 0xfe
+'',    # 0xff
+)
diff --git a/lib/win_inet_pton.py b/lib/win_inet_pton.py
new file mode 100644
index 0000000000000000000000000000000000000000..12aaf46c4aba0909794f9eb8e5114a49acf14f90
--- /dev/null
+++ b/lib/win_inet_pton.py
@@ -0,0 +1,84 @@
+# This software released into the public domain. Anyone is free to copy,
+# modify, publish, use, compile, sell, or distribute this software,
+# either in source code form or as a compiled binary, for any purpose,
+# commercial or non-commercial, and by any means.
+
+import socket
+import ctypes
+import os
+
+
+class sockaddr(ctypes.Structure):
+    _fields_ = [("sa_family", ctypes.c_short),
+                ("__pad1", ctypes.c_ushort),
+                ("ipv4_addr", ctypes.c_byte * 4),
+                ("ipv6_addr", ctypes.c_byte * 16),
+                ("__pad2", ctypes.c_ulong)]
+
+if hasattr(ctypes, 'windll'):
+    WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA
+    WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA
+else:
+    def not_windows():
+        raise SystemError(
+            "Invalid platform. ctypes.windll must be available."
+        )
+    WSAStringToAddressA = not_windows
+    WSAAddressToStringA = not_windows
+
+
+def inet_pton(address_family, ip_string):
+    addr = sockaddr()
+    addr.sa_family = address_family
+    addr_size = ctypes.c_int(ctypes.sizeof(addr))
+
+    if WSAStringToAddressA(
+            ip_string,
+            address_family,
+            None,
+            ctypes.byref(addr),
+            ctypes.byref(addr_size)
+    ) != 0:
+        raise socket.error(ctypes.FormatError())
+
+    if address_family == socket.AF_INET:
+        return ctypes.string_at(addr.ipv4_addr, 4)
+    if address_family == socket.AF_INET6:
+        return ctypes.string_at(addr.ipv6_addr, 16)
+
+    raise socket.error('unknown address family')
+
+
+def inet_ntop(address_family, packed_ip):
+    addr = sockaddr()
+    addr.sa_family = address_family
+    addr_size = ctypes.c_int(ctypes.sizeof(addr))
+    ip_string = ctypes.create_string_buffer(128)
+    ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string))
+
+    if address_family == socket.AF_INET:
+        if len(packed_ip) != ctypes.sizeof(addr.ipv4_addr):
+            raise socket.error('packed IP wrong length for inet_ntoa')
+        ctypes.memmove(addr.ipv4_addr, packed_ip, 4)
+    elif address_family == socket.AF_INET6:
+        if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr):
+            raise socket.error('packed IP wrong length for inet_ntoa')
+        ctypes.memmove(addr.ipv6_addr, packed_ip, 16)
+    else:
+        raise socket.error('unknown address family')
+
+    if WSAAddressToStringA(
+            ctypes.byref(addr),
+            addr_size,
+            None,
+            ip_string,
+            ctypes.byref(ip_string_size)
+    ) != 0:
+        raise socket.error(ctypes.FormatError())
+
+    return ip_string[:ip_string_size.value - 1]
+
+# Adding our two functions to the socket library
+if os.name == 'nt':
+    socket.inet_pton = inet_pton
+    socket.inet_ntop = inet_ntop
diff --git a/lib/xmltodict.py b/lib/xmltodict.py
index 746a4bcd7cf5abdc80b19ffdeac3645b463cc83f..ce532d91ddfb315647dd17bf274712e2d273f6a8 100644
--- a/lib/xmltodict.py
+++ b/lib/xmltodict.py
@@ -1,7 +1,10 @@
 #!/usr/bin/env python
 "Makes working with XML feel like you are working with JSON"
 
-from xml.parsers import expat
+try:
+    from defusedexpat import pyexpat as expat
+except ImportError:
+    from xml.parsers import expat
 from xml.sax.saxutils import XMLGenerator
 from xml.sax.xmlreader import AttributesImpl
 try:  # pragma no cover
@@ -29,7 +32,7 @@ except NameError:  # pragma no cover
     _unicode = str
 
 __author__ = 'Martin Blech'
-__version__ = '0.9.2'
+__version__ = '0.11.0'
 __license__ = 'MIT'
 
 
@@ -50,10 +53,11 @@ class _DictSAXHandler(object):
                  dict_constructor=OrderedDict,
                  strip_whitespace=True,
                  namespace_separator=':',
-                 namespaces=None):
+                 namespaces=None,
+                 force_list=None):
         self.path = []
         self.stack = []
-        self.data = None
+        self.data = []
         self.item = None
         self.item_depth = item_depth
         self.xml_attribs = xml_attribs
@@ -67,6 +71,8 @@ class _DictSAXHandler(object):
         self.strip_whitespace = strip_whitespace
         self.namespace_separator = namespace_separator
         self.namespaces = namespaces
+        self.namespace_declarations = OrderedDict()
+        self.force_list = force_list
 
     def _build_name(self, full_name):
         if not self.namespaces:
@@ -86,34 +92,51 @@ class _DictSAXHandler(object):
             return attrs
         return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
 
+    def startNamespaceDecl(self, prefix, uri):
+        self.namespace_declarations[prefix or ''] = uri
+
     def startElement(self, full_name, attrs):
         name = self._build_name(full_name)
         attrs = self._attrs_to_dict(attrs)
+        if attrs and self.namespace_declarations:
+            attrs['xmlns'] = self.namespace_declarations
+            self.namespace_declarations = OrderedDict()
         self.path.append((name, attrs or None))
         if len(self.path) > self.item_depth:
             self.stack.append((self.item, self.data))
             if self.xml_attribs:
-                attrs = self.dict_constructor(
-                    (self.attr_prefix+self._build_name(key), value)
-                    for (key, value) in attrs.items())
+                attr_entries = []
+                for key, value in attrs.items():
+                    key = self.attr_prefix+self._build_name(key)
+                    if self.postprocessor:
+                        entry = self.postprocessor(self.path, key, value)
+                    else:
+                        entry = (key, value)
+                    if entry:
+                        attr_entries.append(entry)
+                attrs = self.dict_constructor(attr_entries)
             else:
                 attrs = None
             self.item = attrs or None
-            self.data = None
+            self.data = []
 
     def endElement(self, full_name):
         name = self._build_name(full_name)
         if len(self.path) == self.item_depth:
             item = self.item
             if item is None:
-                item = self.data
+                item = (None if not self.data
+                        else self.cdata_separator.join(self.data))
+
             should_continue = self.item_callback(self.path, item)
             if not should_continue:
                 raise ParsingInterrupted()
         if len(self.stack):
-            item, data = self.item, self.data
+            data = (None if not self.data
+                    else self.cdata_separator.join(self.data))
+            item = self.item
             self.item, self.data = self.stack.pop()
-            if self.strip_whitespace and data is not None:
+            if self.strip_whitespace and data:
                 data = data.strip() or None
             if data and self.force_cdata and item is None:
                 item = self.dict_constructor()
@@ -124,14 +147,15 @@ class _DictSAXHandler(object):
             else:
                 self.item = self.push_data(self.item, name, data)
         else:
-            self.item = self.data = None
+            self.item = None
+            self.data = []
         self.path.pop()
 
     def characters(self, data):
         if not self.data:
-            self.data = data
+            self.data = [data]
         else:
-            self.data += self.cdata_separator + data
+            self.data.append(data)
 
     def push_data(self, item, key, data):
         if self.postprocessor is not None:
@@ -148,12 +172,23 @@ class _DictSAXHandler(object):
             else:
                 item[key] = [value, data]
         except KeyError:
-            item[key] = data
+            if self._should_force_list(key, data):
+                item[key] = [data]
+            else:
+                item[key] = data
         return item
 
+    def _should_force_list(self, key, value):
+        if not self.force_list:
+            return False
+        try:
+            return key in self.force_list
+        except TypeError:
+            return self.force_list(self.path[:-1], key, value)
+
 
 def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
-          namespace_separator=':', **kwargs):
+          namespace_separator=':', disable_entities=True, **kwargs):
     """Parse the given XML input and convert it into a dictionary.
 
     `xml_input` can either be a `string` or a file-like object.
@@ -189,7 +224,7 @@ def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
     Streaming example::
 
         >>> def handle(path, item):
-        ...     print 'path:%s item:%s' % (path, item)
+        ...     print('path:%s item:%s' % (path, item))
         ...     return True
         ...
         >>> xmltodict.parse(\"\"\"
@@ -220,6 +255,41 @@ def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
         >>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
         OrderedDict([(u'a', u'hello')])
 
+    You can use the force_list argument to force lists to be created even
+    when there is only a single child of a given level of hierarchy. The
+    force_list argument is a tuple of keys. If the key for a given level
+    of hierarchy is in the force_list argument, that level of hierarchy
+    will have a list as a child (even if there is only one sub-element).
+    The index_keys operation takes precendence over this. This is applied
+    after any user-supplied postprocessor has already run.
+
+        For example, given this input:
+        <servers>
+          <server>
+            <name>host1</name>
+            <os>Linux</os>
+            <interfaces>
+              <interface>
+                <name>em0</name>
+                <ip_address>10.0.0.1</ip_address>
+              </interface>
+            </interfaces>
+          </server>
+        </servers>
+
+        If called with force_list=('interface',), it will produce
+        this dictionary:
+        {'servers':
+          {'server':
+            {'name': 'host1',
+             'os': 'Linux'},
+             'interfaces':
+              {'interface':
+                [ {'name': 'em0', 'ip_address': '10.0.0.1' } ] } } }
+
+        `force_list` can also be a callable that receives `path`, `key` and
+        `value`. This is helpful in cases where the logic that decides whether
+        a list should be forced is more complex.
     """
     handler = _DictSAXHandler(namespace_separator=namespace_separator,
                               **kwargs)
@@ -238,17 +308,44 @@ def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
     except AttributeError:
         # Jython's expat does not support ordered_attributes
         pass
+    parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
     parser.StartElementHandler = handler.startElement
     parser.EndElementHandler = handler.endElement
     parser.CharacterDataHandler = handler.characters
     parser.buffer_text = True
-    try:
+    if disable_entities:
+        try:
+            # Attempt to disable DTD in Jython's expat parser (Xerces-J).
+            feature = "http://apache.org/xml/features/disallow-doctype-decl"
+            parser._reader.setFeature(feature, True)
+        except AttributeError:
+            # For CPython / expat parser.
+            # Anything not handled ends up here and entities aren't expanded.
+            parser.DefaultHandler = lambda x: None
+            # Expects an integer return; zero means failure -> expat.ExpatError.
+            parser.ExternalEntityRefHandler = lambda *x: 1
+    if hasattr(xml_input, 'read'):
         parser.ParseFile(xml_input)
-    except (TypeError, AttributeError):
+    else:
         parser.Parse(xml_input, True)
     return handler.item
 
 
+def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
+    if not namespaces:
+        return name
+    try:
+        ns, name = name.rsplit(ns_sep, 1)
+    except ValueError:
+        pass
+    else:
+        ns_res = namespaces.get(ns.strip(attr_prefix))
+        name = '{0}{1}{2}{3}'.format(
+            attr_prefix if ns.startswith(attr_prefix) else '',
+            ns_res, ns_sep, name) if ns_res else name
+    return name
+
+
 def _emit(key, value, content_handler,
           attr_prefix='@',
           cdata_key='#text',
@@ -257,7 +354,10 @@ def _emit(key, value, content_handler,
           pretty=False,
           newl='\n',
           indent='\t',
+          namespace_separator=':',
+          namespaces=None,
           full_document=True):
+    key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
     if preprocessor is not None:
         result = preprocessor(key, value)
         if result is None:
@@ -284,6 +384,15 @@ def _emit(key, value, content_handler,
                 cdata = iv
                 continue
             if ik.startswith(attr_prefix):
+                ik = _process_namespace(ik, namespaces, namespace_separator,
+                                        attr_prefix)
+                if ik == '@xmlns' and isinstance(iv, dict):
+                    for k, v in iv.items():
+                        attr = 'xmlns{0}'.format(':{0}'.format(k) if k else '')
+                        attrs[attr] = _unicode(v)
+                    continue
+                if not isinstance(iv, _unicode):
+                    iv = _unicode(iv)
                 attrs[ik[len(attr_prefix):]] = iv
                 continue
             children.append((ik, iv))
@@ -295,7 +404,8 @@ def _emit(key, value, content_handler,
         for child_key, child_value in children:
             _emit(child_key, child_value, content_handler,
                   attr_prefix, cdata_key, depth+1, preprocessor,
-                  pretty, newl, indent)
+                  pretty, newl, indent, namespaces=namespaces,
+                  namespace_separator=namespace_separator)
         if cdata is not None:
             content_handler.characters(cdata)
         if pretty and children:
@@ -306,6 +416,7 @@ def _emit(key, value, content_handler,
 
 
 def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
+            short_empty_elements=False,
             **kwargs):
     """Emit an XML document for the given `input_dict` (reverse of `parse`).
 
@@ -327,7 +438,10 @@ def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
     if output is None:
         output = StringIO()
         must_return = True
-    content_handler = XMLGenerator(output, encoding)
+    if short_empty_elements:
+        content_handler = XMLGenerator(output, encoding, True)
+    else:
+        content_handler = XMLGenerator(output, encoding)
     if full_document:
         content_handler.startDocument()
     for key, value in input_dict.items():
@@ -346,16 +460,23 @@ def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
 if __name__ == '__main__':  # pragma: no cover
     import sys
     import marshal
+    try:
+        stdin = sys.stdin.buffer
+        stdout = sys.stdout.buffer
+    except AttributeError:
+        stdin = sys.stdin
+        stdout = sys.stdout
 
     (item_depth,) = sys.argv[1:]
     item_depth = int(item_depth)
 
+
     def handle_item(path, item):
-        marshal.dump((path, item), sys.stdout)
+        marshal.dump((path, item), stdout)
         return True
 
     try:
-        root = parse(sys.stdin,
+        root = parse(stdin,
                      item_depth=item_depth,
                      item_callback=handle_item,
                      dict_constructor=dict)
diff --git a/patches/feedparser.diff b/patches/feedparser.diff
deleted file mode 100644
index 1fd3637bbf1a94591744f26c9d32b6fea26653a9..0000000000000000000000000000000000000000
--- a/patches/feedparser.diff
+++ /dev/null
@@ -1,43 +0,0 @@
-diff --git a/lib/feedparser/api.py b/lib/feedparser/api.py
-index 614bd2d..12eafd2 100644
---- a/lib/feedparser/api.py
-+++ b/lib/feedparser/api.py
-@@ -60,6 +60,7 @@ from .sanitizer import replace_doctype
- from .sgml import *
- from .urls import _convert_to_idn, _makeSafeAbsoluteURI
- from .util import FeedParserDict
-+from . import USER_AGENT
- 
- bytes_ = type(b'')
- unicode_ = type('')
-diff --git a/lib/feedparser/util.py b/lib/feedparser/util.py
-index f7c02c0..df36b3e 100644
---- a/lib/feedparser/util.py
-+++ b/lib/feedparser/util.py
-@@ -122,9 +122,23 @@ class FeedParserDict(dict):
- 
-     def __setitem__(self, key, value):
-         key = self.keymap.get(key, key)
--        if isinstance(key, list):
--            key = key[0]
--        return dict.__setitem__(self, key, value)
-+        if key == 'newznab_attr':
-+            if isinstance(value, dict) and value.keys() == ['name', 'value']:
-+                key = value['name']
-+                value = value['value']
-+
-+            if not dict.__contains__(self, 'categories'):
-+                dict.__setitem__(self, 'categories', [])
-+
-+            if key == 'category':
-+                self['categories'].append(value)
-+            else:
-+                dict.__setitem__(self, key, value)
-+        else:
-+            if isinstance(key, list):
-+                key = key[0]
-+
-+            return dict.__setitem__(self, key, value)
- 
-     def setdefault(self, key, value):
-         if key not in self:
diff --git a/sickbeard/logger.py b/sickbeard/logger.py
index 38151be233a97f0a33bf0548202ffdd234a46251..f3ac40cbc69408463c5ffb76bc5cd5a05d74e018 100644
--- a/sickbeard/logger.py
+++ b/sickbeard/logger.py
@@ -38,7 +38,8 @@ from logging import NullHandler
 
 import sickbeard
 import six
-from github import InputFileContent, RateLimitExceededException, TwoFactorException
+from github import InputFileContent
+from github.GithubException import RateLimitExceededException, TwoFactorException
 from sickbeard import classes
 from sickrage.helper.common import dateTimeFormat
 from sickrage.helper.encoding import ek, ss
diff --git a/sickrage/helper/common.py b/sickrage/helper/common.py
index 9d61f69e01c8eacbf643442bbabf35b1985df160..b53478c66b6dbdd76916388224bcfdac934e1c3a 100644
--- a/sickrage/helper/common.py
+++ b/sickrage/helper/common.py
@@ -30,7 +30,8 @@ from fnmatch import fnmatch
 
 import sickbeard
 import six
-from github import BadCredentialsException, Github, TwoFactorException
+from github import Github
+from github.GithubException import BadCredentialsException, TwoFactorException
 
 dateFormat = '%Y-%m-%d'
 dateTimeFormat = '%Y-%m-%d %H:%M:%S'