diff --git a/gui/slick/views/config_postProcessing.mako b/gui/slick/views/config_postProcessing.mako
index 94a071e06f97608826b0a243f1751776b4afc779..4855c86502c2c74bd3833ecf6319177323371525 100644
--- a/gui/slick/views/config_postProcessing.mako
+++ b/gui/slick/views/config_postProcessing.mako
@@ -384,6 +384,21 @@
                                           <td>%Q_N</td>
                                           <td>720p_BluRay</td>
                                         </tr>
+                                        <tr>
+                                          <td class="align-right"><b>Scene Quality:</b></td>
+                                          <td>%SQN</td>
+                                          <td>720p HDTV x264</td>
+                                        </tr>
+                                        <tr class="even">
+                                          <td>&nbsp;</td>
+                                          <td>%SQ.N</td>
+                                          <td>720p.HDTV.x264</td>
+                                        </tr>
+                                        <tr>
+                                          <td>&nbsp;</td>
+                                          <td>%SQ_N</td>
+                                          <td>720p_HDTV_x264</td>
+                                        </tr>
                                         <tr class="even">
                                           <td class="align-right"><i class="glyphicon glyphicon-info-sign" title="Multi-EP style is ignored"></i> <b>Release Name:</b></td>
                                           <td>%RN</td>
diff --git a/lib/BeautifulSoup.py b/lib/BeautifulSoup.py
deleted file mode 100644
index 7278215ca2a899d72a7eb122e5e52cfeda56b24b..0000000000000000000000000000000000000000
--- a/lib/BeautifulSoup.py
+++ /dev/null
@@ -1,2017 +0,0 @@
-"""Beautiful Soup
-Elixir and Tonic
-"The Screen-Scraper's Friend"
-http://www.crummy.com/software/BeautifulSoup/
-
-Beautiful Soup parses a (possibly invalid) XML or HTML document into a
-tree representation. It provides methods and Pythonic idioms that make
-it easy to navigate, search, and modify the tree.
-
-A well-formed XML/HTML document yields a well-formed data
-structure. An ill-formed XML/HTML document yields a correspondingly
-ill-formed data structure. If your document is only locally
-well-formed, you can use this library to find and process the
-well-formed part of it.
-
-Beautiful Soup works with Python 2.2 and up. It has no external
-dependencies, but you'll have more success at converting data to UTF-8
-if you also install these three packages:
-
-* chardet, for auto-detecting character encodings
-  http://chardet.feedparser.org/
-* cjkcodecs and iconv_codec, which add more encodings to the ones supported
-  by stock Python.
-  http://cjkpython.i18n.org/
-
-Beautiful Soup defines classes for two main parsing strategies:
-
- * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
-   language that kind of looks like XML.
-
- * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
-   or invalid. This class has web browser-like heuristics for
-   obtaining a sensible parse tree in the face of common HTML errors.
-
-Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
-the encoding of an HTML or XML document, and converting it to
-Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
-
-For more than you ever wanted to know about Beautiful Soup, see the
-documentation:
-http://www.crummy.com/software/BeautifulSoup/documentation.html
-
-Here, have some legalese:
-
-Copyright (c) 2004-2010, Leonard Richardson
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-  * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-
-  * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following
-    disclaimer in the documentation and/or other materials provided
-    with the distribution.
-
-  * Neither the name of the the Beautiful Soup Consortium and All
-    Night Kosher Bakery nor the names of its contributors may be
-    used to endorse or promote products derived from this software
-    without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
-
-"""
-from __future__ import generators
-
-__author__ = "Leonard Richardson (leonardr@segfault.org)"
-__version__ = "3.2.1"
-__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson"
-__license__ = "New-style BSD"
-
-from sgmllib import SGMLParser, SGMLParseError
-import codecs
-import markupbase
-import types
-import re
-import sgmllib
-try:
-  from htmlentitydefs import name2codepoint
-except ImportError:
-  name2codepoint = {}
-try:
-    set
-except NameError:
-    from sets import Set as set
-
-#These hacks make Beautiful Soup able to parse XML with namespaces
-sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
-markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
-
-DEFAULT_OUTPUT_ENCODING = "utf-8"
-
-def _match_css_class(str):
-    """Build a RE to match the given CSS class."""
-    return re.compile(r"(^|.*\s)%s($|\s)" % str)
-
-# First, the classes that represent markup elements.
-
-class PageElement(object):
-    """Contains the navigational information for some part of the page
-    (either a tag or a piece of text)"""
-
-    def _invert(h):
-        "Cheap function to invert a hash."
-        i = {}
-        for k,v in h.items():
-            i[v] = k
-        return i
-
-    XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
-                                      "quot" : '"',
-                                      "amp" : "&",
-                                      "lt" : "<",
-                                      "gt" : ">" }
-
-    XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
-
-    def setup(self, parent=None, previous=None):
-        """Sets up the initial relations between this element and
-        other elements."""
-        self.parent = parent
-        self.previous = previous
-        self.next = None
-        self.previousSibling = None
-        self.nextSibling = None
-        if self.parent and self.parent.contents:
-            self.previousSibling = self.parent.contents[-1]
-            self.previousSibling.nextSibling = self
-
-    def replaceWith(self, replaceWith):
-        oldParent = self.parent
-        myIndex = self.parent.index(self)
-        if hasattr(replaceWith, "parent")\
-                  and replaceWith.parent is self.parent:
-            # We're replacing this element with one of its siblings.
-            index = replaceWith.parent.index(replaceWith)
-            if index and index < myIndex:
-                # Furthermore, it comes before this element. That
-                # means that when we extract it, the index of this
-                # element will change.
-                myIndex = myIndex - 1
-        self.extract()
-        oldParent.insert(myIndex, replaceWith)
-
-    def replaceWithChildren(self):
-        myParent = self.parent
-        myIndex = self.parent.index(self)
-        self.extract()
-        reversedChildren = list(self.contents)
-        reversedChildren.reverse()
-        for child in reversedChildren:
-            myParent.insert(myIndex, child)
-
-    def extract(self):
-        """Destructively rips this element out of the tree."""
-        if self.parent:
-            try:
-                del self.parent.contents[self.parent.index(self)]
-            except ValueError:
-                pass
-
-        #Find the two elements that would be next to each other if
-        #this element (and any children) hadn't been parsed. Connect
-        #the two.
-        lastChild = self._lastRecursiveChild()
-        nextElement = lastChild.next
-
-        if self.previous:
-            self.previous.next = nextElement
-        if nextElement:
-            nextElement.previous = self.previous
-        self.previous = None
-        lastChild.next = None
-
-        self.parent = None
-        if self.previousSibling:
-            self.previousSibling.nextSibling = self.nextSibling
-        if self.nextSibling:
-            self.nextSibling.previousSibling = self.previousSibling
-        self.previousSibling = self.nextSibling = None
-        return self
-
-    def _lastRecursiveChild(self):
-        "Finds the last element beneath this object to be parsed."
-        lastChild = self
-        while hasattr(lastChild, 'contents') and lastChild.contents:
-            lastChild = lastChild.contents[-1]
-        return lastChild
-
-    def insert(self, position, newChild):
-        if isinstance(newChild, basestring) \
-            and not isinstance(newChild, NavigableString):
-            newChild = NavigableString(newChild)
-
-        position =  min(position, len(self.contents))
-        if hasattr(newChild, 'parent') and newChild.parent is not None:
-            # We're 'inserting' an element that's already one
-            # of this object's children.
-            if newChild.parent is self:
-                index = self.index(newChild)
-                if index > position:
-                    # Furthermore we're moving it further down the
-                    # list of this object's children. That means that
-                    # when we extract this element, our target index
-                    # will jump down one.
-                    position = position - 1
-            newChild.extract()
-
-        newChild.parent = self
-        previousChild = None
-        if position == 0:
-            newChild.previousSibling = None
-            newChild.previous = self
-        else:
-            previousChild = self.contents[position-1]
-            newChild.previousSibling = previousChild
-            newChild.previousSibling.nextSibling = newChild
-            newChild.previous = previousChild._lastRecursiveChild()
-        if newChild.previous:
-            newChild.previous.next = newChild
-
-        newChildsLastElement = newChild._lastRecursiveChild()
-
-        if position >= len(self.contents):
-            newChild.nextSibling = None
-
-            parent = self
-            parentsNextSibling = None
-            while not parentsNextSibling:
-                parentsNextSibling = parent.nextSibling
-                parent = parent.parent
-                if not parent: # This is the last element in the document.
-                    break
-            if parentsNextSibling:
-                newChildsLastElement.next = parentsNextSibling
-            else:
-                newChildsLastElement.next = None
-        else:
-            nextChild = self.contents[position]
-            newChild.nextSibling = nextChild
-            if newChild.nextSibling:
-                newChild.nextSibling.previousSibling = newChild
-            newChildsLastElement.next = nextChild
-
-        if newChildsLastElement.next:
-            newChildsLastElement.next.previous = newChildsLastElement
-        self.contents.insert(position, newChild)
-
-    def append(self, tag):
-        """Appends the given tag to the contents of this tag."""
-        self.insert(len(self.contents), tag)
-
-    def findNext(self, name=None, attrs={}, text=None, **kwargs):
-        """Returns the first item that matches the given criteria and
-        appears after this Tag in the document."""
-        return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
-
-    def findAllNext(self, name=None, attrs={}, text=None, limit=None,
-                    **kwargs):
-        """Returns all items that match the given criteria and appear
-        after this Tag in the document."""
-        return self._findAll(name, attrs, text, limit, self.nextGenerator,
-                             **kwargs)
-
-    def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
-        """Returns the closest sibling to this Tag that matches the
-        given criteria and appears after this Tag in the document."""
-        return self._findOne(self.findNextSiblings, name, attrs, text,
-                             **kwargs)
-
-    def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
-                         **kwargs):
-        """Returns the siblings of this Tag that match the given
-        criteria and appear after this Tag in the document."""
-        return self._findAll(name, attrs, text, limit,
-                             self.nextSiblingGenerator, **kwargs)
-    fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
-
-    def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
-        """Returns the first item that matches the given criteria and
-        appears before this Tag in the document."""
-        return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
-
-    def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
-                        **kwargs):
-        """Returns all items that match the given criteria and appear
-        before this Tag in the document."""
-        return self._findAll(name, attrs, text, limit, self.previousGenerator,
-                           **kwargs)
-    fetchPrevious = findAllPrevious # Compatibility with pre-3.x
-
-    def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
-        """Returns the closest sibling to this Tag that matches the
-        given criteria and appears before this Tag in the document."""
-        return self._findOne(self.findPreviousSiblings, name, attrs, text,
-                             **kwargs)
-
-    def findPreviousSiblings(self, name=None, attrs={}, text=None,
-                             limit=None, **kwargs):
-        """Returns the siblings of this Tag that match the given
-        criteria and appear before this Tag in the document."""
-        return self._findAll(name, attrs, text, limit,
-                             self.previousSiblingGenerator, **kwargs)
-    fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
-
-    def findParent(self, name=None, attrs={}, **kwargs):
-        """Returns the closest parent of this Tag that matches the given
-        criteria."""
-        # NOTE: We can't use _findOne because findParents takes a different
-        # set of arguments.
-        r = None
-        l = self.findParents(name, attrs, 1)
-        if l:
-            r = l[0]
-        return r
-
-    def findParents(self, name=None, attrs={}, limit=None, **kwargs):
-        """Returns the parents of this Tag that match the given
-        criteria."""
-
-        return self._findAll(name, attrs, None, limit, self.parentGenerator,
-                             **kwargs)
-    fetchParents = findParents # Compatibility with pre-3.x
-
-    #These methods do the real heavy lifting.
-
-    def _findOne(self, method, name, attrs, text, **kwargs):
-        r = None
-        l = method(name, attrs, text, 1, **kwargs)
-        if l:
-            r = l[0]
-        return r
-
-    def _findAll(self, name, attrs, text, limit, generator, **kwargs):
-        "Iterates over a generator looking for things that match."
-
-        if isinstance(name, SoupStrainer):
-            strainer = name
-        # (Possibly) special case some findAll*(...) searches
-        elif text is None and not limit and not attrs and not kwargs:
-            # findAll*(True)
-            if name is True:
-                return [element for element in generator()
-                        if isinstance(element, Tag)]
-            # findAll*('tag-name')
-            elif isinstance(name, basestring):
-                return [element for element in generator()
-                        if isinstance(element, Tag) and
-                        element.name == name]
-            else:
-                strainer = SoupStrainer(name, attrs, text, **kwargs)
-        # Build a SoupStrainer
-        else:
-            strainer = SoupStrainer(name, attrs, text, **kwargs)
-        results = ResultSet(strainer)
-        g = generator()
-        while True:
-            try:
-                i = g.next()
-            except StopIteration:
-                break
-            if i:
-                found = strainer.search(i)
-                if found:
-                    results.append(found)
-                    if limit and len(results) >= limit:
-                        break
-        return results
-
-    #These Generators can be used to navigate starting from both
-    #NavigableStrings and Tags.
-    def nextGenerator(self):
-        i = self
-        while i is not None:
-            i = i.next
-            yield i
-
-    def nextSiblingGenerator(self):
-        i = self
-        while i is not None:
-            i = i.nextSibling
-            yield i
-
-    def previousGenerator(self):
-        i = self
-        while i is not None:
-            i = i.previous
-            yield i
-
-    def previousSiblingGenerator(self):
-        i = self
-        while i is not None:
-            i = i.previousSibling
-            yield i
-
-    def parentGenerator(self):
-        i = self
-        while i is not None:
-            i = i.parent
-            yield i
-
-    # Utility methods
-    def substituteEncoding(self, str, encoding=None):
-        encoding = encoding or "utf-8"
-        return str.replace("%SOUP-ENCODING%", encoding)
-
-    def toEncoding(self, s, encoding=None):
-        """Encodes an object to a string in some encoding, or to Unicode.
-        ."""
-        if isinstance(s, unicode):
-            if encoding:
-                s = s.encode(encoding)
-        elif isinstance(s, str):
-            if encoding:
-                s = s.encode(encoding)
-            else:
-                s = unicode(s)
-        else:
-            if encoding:
-                s  = self.toEncoding(str(s), encoding)
-            else:
-                s = unicode(s)
-        return s
-
-    BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
-                                           + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
-                                           + ")")
-
-    def _sub_entity(self, x):
-        """Used with a regular expression to substitute the
-        appropriate XML entity for an XML special character."""
-        return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
-
-
-class NavigableString(unicode, PageElement):
-
-    def __new__(cls, value):
-        """Create a new NavigableString.
-
-        When unpickling a NavigableString, this method is called with
-        the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
-        passed in to the superclass's __new__ or the superclass won't know
-        how to handle non-ASCII characters.
-        """
-        if isinstance(value, unicode):
-            return unicode.__new__(cls, value)
-        return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
-
-    def __getnewargs__(self):
-        return (NavigableString.__str__(self),)
-
-    def __getattr__(self, attr):
-        """text.string gives you text. This is for backwards
-        compatibility for Navigable*String, but for CData* it lets you
-        get the string without the CData wrapper."""
-        if attr == 'string':
-            return self
-        else:
-            raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
-
-    def __unicode__(self):
-        return str(self).decode(DEFAULT_OUTPUT_ENCODING)
-
-    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
-        # Substitute outgoing XML entities.
-        data = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, self)
-        if encoding:
-            return data.encode(encoding)
-        else:
-            return data
-
-class CData(NavigableString):
-
-    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
-        return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
-
-class ProcessingInstruction(NavigableString):
-    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
-        output = self
-        if "%SOUP-ENCODING%" in output:
-            output = self.substituteEncoding(output, encoding)
-        return "<?%s?>" % self.toEncoding(output, encoding)
-
-class Comment(NavigableString):
-    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
-        return "<!--%s-->" % NavigableString.__str__(self, encoding)
-
-class Declaration(NavigableString):
-    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
-        return "<!%s>" % NavigableString.__str__(self, encoding)
-
-class Tag(PageElement):
-
-    """Represents a found HTML tag with its attributes and contents."""
-
-    def _convertEntities(self, match):
-        """Used in a call to re.sub to replace HTML, XML, and numeric
-        entities with the appropriate Unicode characters. If HTML
-        entities are being converted, any unrecognized entities are
-        escaped."""
-        x = match.group(1)
-        if self.convertHTMLEntities and x in name2codepoint:
-            return unichr(name2codepoint[x])
-        elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
-            if self.convertXMLEntities:
-                return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
-            else:
-                return u'&%s;' % x
-        elif len(x) > 0 and x[0] == '#':
-            # Handle numeric entities
-            if len(x) > 1 and x[1] == 'x':
-                return unichr(int(x[2:], 16))
-            else:
-                return unichr(int(x[1:]))
-
-        elif self.escapeUnrecognizedEntities:
-            return u'&amp;%s;' % x
-        else:
-            return u'&%s;' % x
-
-    def __init__(self, parser, name, attrs=None, parent=None,
-                 previous=None):
-        "Basic constructor."
-
-        # We don't actually store the parser object: that lets extracted
-        # chunks be garbage-collected
-        self.parserClass = parser.__class__
-        self.isSelfClosing = parser.isSelfClosingTag(name)
-        self.name = name
-        if attrs is None:
-            attrs = []
-        elif isinstance(attrs, dict):
-            attrs = attrs.items()
-        self.attrs = attrs
-        self.contents = []
-        self.setup(parent, previous)
-        self.hidden = False
-        self.containsSubstitutions = False
-        self.convertHTMLEntities = parser.convertHTMLEntities
-        self.convertXMLEntities = parser.convertXMLEntities
-        self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
-
-        # Convert any HTML, XML, or numeric entities in the attribute values.
-        convert = lambda(k, val): (k,
-                                   re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
-                                          self._convertEntities,
-                                          val))
-        self.attrs = map(convert, self.attrs)
-
-    def getString(self):
-        if (len(self.contents) == 1
-            and isinstance(self.contents[0], NavigableString)):
-            return self.contents[0]
-
-    def setString(self, string):
-        """Replace the contents of the tag with a string"""
-        self.clear()
-        self.append(string)
-
-    string = property(getString, setString)
-
-    def getText(self, separator=u""):
-        if not len(self.contents):
-            return u""
-        stopNode = self._lastRecursiveChild().next
-        strings = []
-        current = self.contents[0]
-        while current is not stopNode:
-            if isinstance(current, NavigableString):
-                strings.append(current.strip())
-            current = current.next
-        return separator.join(strings)
-
-    text = property(getText)
-
-    def get(self, key, default=None):
-        """Returns the value of the 'key' attribute for the tag, or
-        the value given for 'default' if it doesn't have that
-        attribute."""
-        return self._getAttrMap().get(key, default)
-
-    def clear(self):
-        """Extract all children."""
-        for child in self.contents[:]:
-            child.extract()
-
-    def index(self, element):
-        for i, child in enumerate(self.contents):
-            if child is element:
-                return i
-        raise ValueError("Tag.index: element not in tag")
-
-    def has_key(self, key):
-        return self._getAttrMap().has_key(key)
-
-    def __getitem__(self, key):
-        """tag[key] returns the value of the 'key' attribute for the tag,
-        and throws an exception if it's not there."""
-        return self._getAttrMap()[key]
-
-    def __iter__(self):
-        "Iterating over a tag iterates over its contents."
-        return iter(self.contents)
-
-    def __len__(self):
-        "The length of a tag is the length of its list of contents."
-        return len(self.contents)
-
-    def __contains__(self, x):
-        return x in self.contents
-
-    def __nonzero__(self):
-        "A tag is non-None even if it has no contents."
-        return True
-
-    def __setitem__(self, key, value):
-        """Setting tag[key] sets the value of the 'key' attribute for the
-        tag."""
-        self._getAttrMap()
-        self.attrMap[key] = value
-        found = False
-        for i in range(0, len(self.attrs)):
-            if self.attrs[i][0] == key:
-                self.attrs[i] = (key, value)
-                found = True
-        if not found:
-            self.attrs.append((key, value))
-        self._getAttrMap()[key] = value
-
-    def __delitem__(self, key):
-        "Deleting tag[key] deletes all 'key' attributes for the tag."
-        for item in self.attrs:
-            if item[0] == key:
-                self.attrs.remove(item)
-                #We don't break because bad HTML can define the same
-                #attribute multiple times.
-            self._getAttrMap()
-            if self.attrMap.has_key(key):
-                del self.attrMap[key]
-
-    def __call__(self, *args, **kwargs):
-        """Calling a tag like a function is the same as calling its
-        findAll() method. Eg. tag('a') returns a list of all the A tags
-        found within this tag."""
-        return apply(self.findAll, args, kwargs)
-
-    def __getattr__(self, tag):
-        #print "Getattr %s.%s" % (self.__class__, tag)
-        if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
-            return self.find(tag[:-3])
-        elif tag.find('__') != 0:
-            return self.find(tag)
-        raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
-
-    def __eq__(self, other):
-        """Returns true iff this tag has the same name, the same attributes,
-        and the same contents (recursively) as the given tag.
-
-        NOTE: right now this will return false if two tags have the
-        same attributes in a different order. Should this be fixed?"""
-        if other is self:
-            return True
-        if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
-            return False
-        for i in range(0, len(self.contents)):
-            if self.contents[i] != other.contents[i]:
-                return False
-        return True
-
-    def __ne__(self, other):
-        """Returns true iff this tag is not identical to the other tag,
-        as defined in __eq__."""
-        return not self == other
-
-    def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
-        """Renders this tag as a string."""
-        return self.__str__(encoding)
-
-    def __unicode__(self):
-        return self.__str__(None)
-
-    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
-                prettyPrint=False, indentLevel=0):
-        """Returns a string or Unicode representation of this tag and
-        its contents. To get Unicode, pass None for encoding.
-
-        NOTE: since Python's HTML parser consumes whitespace, this
-        method is not certain to reproduce the whitespace present in
-        the original string."""
-
-        encodedName = self.toEncoding(self.name, encoding)
-
-        attrs = []
-        if self.attrs:
-            for key, val in self.attrs:
-                fmt = '%s="%s"'
-                if isinstance(val, basestring):
-                    if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
-                        val = self.substituteEncoding(val, encoding)
-
-                    # The attribute value either:
-                    #
-                    # * Contains no embedded double quotes or single quotes.
-                    #   No problem: we enclose it in double quotes.
-                    # * Contains embedded single quotes. No problem:
-                    #   double quotes work here too.
-                    # * Contains embedded double quotes. No problem:
-                    #   we enclose it in single quotes.
-                    # * Embeds both single _and_ double quotes. This
-                    #   can't happen naturally, but it can happen if
-                    #   you modify an attribute value after parsing
-                    #   the document. Now we have a bit of a
-                    #   problem. We solve it by enclosing the
-                    #   attribute in single quotes, and escaping any
-                    #   embedded single quotes to XML entities.
-                    if '"' in val:
-                        fmt = "%s='%s'"
-                        if "'" in val:
-                            # TODO: replace with apos when
-                            # appropriate.
-                            val = val.replace("'", "&squot;")
-
-                    # Now we're okay w/r/t quotes. But the attribute
-                    # value might also contain angle brackets, or
-                    # ampersands that aren't part of entities. We need
-                    # to escape those to XML entities too.
-                    val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
-
-                attrs.append(fmt % (self.toEncoding(key, encoding),
-                                    self.toEncoding(val, encoding)))
-        close = ''
-        closeTag = ''
-        if self.isSelfClosing:
-            close = ' /'
-        else:
-            closeTag = '</%s>' % encodedName
-
-        indentTag, indentContents = 0, 0
-        if prettyPrint:
-            indentTag = indentLevel
-            space = (' ' * (indentTag-1))
-            indentContents = indentTag + 1
-        contents = self.renderContents(encoding, prettyPrint, indentContents)
-        if self.hidden:
-            s = contents
-        else:
-            s = []
-            attributeString = ''
-            if attrs:
-                attributeString = ' ' + ' '.join(attrs)
-            if prettyPrint:
-                s.append(space)
-            s.append('<%s%s%s>' % (encodedName, attributeString, close))
-            if prettyPrint:
-                s.append("\n")
-            s.append(contents)
-            if prettyPrint and contents and contents[-1] != "\n":
-                s.append("\n")
-            if prettyPrint and closeTag:
-                s.append(space)
-            s.append(closeTag)
-            if prettyPrint and closeTag and self.nextSibling:
-                s.append("\n")
-            s = ''.join(s)
-        return s
-
-    def decompose(self):
-        """Recursively destroys the contents of this tree."""
-        self.extract()
-        if len(self.contents) == 0:
-            return
-        current = self.contents[0]
-        while current is not None:
-            next = current.next
-            if isinstance(current, Tag):
-                del current.contents[:]
-            current.parent = None
-            current.previous = None
-            current.previousSibling = None
-            current.next = None
-            current.nextSibling = None
-            current = next
-
-    def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
-        return self.__str__(encoding, True)
-
-    def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
-                       prettyPrint=False, indentLevel=0):
-        """Renders the contents of this tag as a string in the given
-        encoding. If encoding is None, returns a Unicode string.."""
-        s=[]
-        for c in self:
-            text = None
-            if isinstance(c, NavigableString):
-                text = c.__str__(encoding)
-            elif isinstance(c, Tag):
-                s.append(c.__str__(encoding, prettyPrint, indentLevel))
-            if text and prettyPrint:
-                text = text.strip()
-            if text:
-                if prettyPrint:
-                    s.append(" " * (indentLevel-1))
-                s.append(text)
-                if prettyPrint:
-                    s.append("\n")
-        return ''.join(s)
-
-    #Soup methods
-
-    def find(self, name=None, attrs={}, recursive=True, text=None,
-             **kwargs):
-        """Return only the first child of this Tag matching the given
-        criteria."""
-        r = None
-        l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
-        if l:
-            r = l[0]
-        return r
-    findChild = find
-
-    def findAll(self, name=None, attrs={}, recursive=True, text=None,
-                limit=None, **kwargs):
-        """Extracts a list of Tag objects that match the given
-        criteria.  You can specify the name of the Tag and any
-        attributes you want the Tag to have.
-
-        The value of a key-value pair in the 'attrs' map can be a
-        string, a list of strings, a regular expression object, or a
-        callable that takes a string and returns whether or not the
-        string matches for some custom definition of 'matches'. The
-        same is true of the tag name."""
-        generator = self.recursiveChildGenerator
-        if not recursive:
-            generator = self.childGenerator
-        return self._findAll(name, attrs, text, limit, generator, **kwargs)
-    findChildren = findAll
-
-    # Pre-3.x compatibility methods
-    first = find
-    fetch = findAll
-
-    def fetchText(self, text=None, recursive=True, limit=None):
-        return self.findAll(text=text, recursive=recursive, limit=limit)
-
-    def firstText(self, text=None, recursive=True):
-        return self.find(text=text, recursive=recursive)
-
-    #Private methods
-
-    def _getAttrMap(self):
-        """Initializes a map representation of this tag's attributes,
-        if not already initialized."""
-        if not getattr(self, 'attrMap'):
-            self.attrMap = {}
-            for (key, value) in self.attrs:
-                self.attrMap[key] = value
-        return self.attrMap
-
-    #Generator methods
-    def childGenerator(self):
-        # Just use the iterator from the contents
-        return iter(self.contents)
-
-    def recursiveChildGenerator(self):
-        if not len(self.contents):
-            raise StopIteration
-        stopNode = self._lastRecursiveChild().next
-        current = self.contents[0]
-        while current is not stopNode:
-            yield current
-            current = current.next
-
-
-# Next, a couple classes to represent queries and their results.
-class SoupStrainer:
-    """Encapsulates a number of ways of matching a markup element (tag or
-    text)."""
-
-    def __init__(self, name=None, attrs={}, text=None, **kwargs):
-        self.name = name
-        if isinstance(attrs, basestring):
-            kwargs['class'] = _match_css_class(attrs)
-            attrs = None
-        if kwargs:
-            if attrs:
-                attrs = attrs.copy()
-                attrs.update(kwargs)
-            else:
-                attrs = kwargs
-        self.attrs = attrs
-        self.text = text
-
-    def __str__(self):
-        if self.text:
-            return self.text
-        else:
-            return "%s|%s" % (self.name, self.attrs)
-
-    def searchTag(self, markupName=None, markupAttrs={}):
-        found = None
-        markup = None
-        if isinstance(markupName, Tag):
-            markup = markupName
-            markupAttrs = markup
-        callFunctionWithTagData = callable(self.name) \
-                                and not isinstance(markupName, Tag)
-
-        if (not self.name) \
-               or callFunctionWithTagData \
-               or (markup and self._matches(markup, self.name)) \
-               or (not markup and self._matches(markupName, self.name)):
-            if callFunctionWithTagData:
-                match = self.name(markupName, markupAttrs)
-            else:
-                match = True
-                markupAttrMap = None
-                for attr, matchAgainst in self.attrs.items():
-                    if not markupAttrMap:
-                         if hasattr(markupAttrs, 'get'):
-                            markupAttrMap = markupAttrs
-                         else:
-                            markupAttrMap = {}
-                            for k,v in markupAttrs:
-                                markupAttrMap[k] = v
-                    attrValue = markupAttrMap.get(attr)
-                    if not self._matches(attrValue, matchAgainst):
-                        match = False
-                        break
-            if match:
-                if markup:
-                    found = markup
-                else:
-                    found = markupName
-        return found
-
-    def search(self, markup):
-        #print 'looking for %s in %s' % (self, markup)
-        found = None
-        # If given a list of items, scan it for a text element that
-        # matches.
-        if hasattr(markup, "__iter__") \
-                and not isinstance(markup, Tag):
-            for element in markup:
-                if isinstance(element, NavigableString) \
-                       and self.search(element):
-                    found = element
-                    break
-        # If it's a Tag, make sure its name or attributes match.
-        # Don't bother with Tags if we're searching for text.
-        elif isinstance(markup, Tag):
-            if not self.text:
-                found = self.searchTag(markup)
-        # If it's text, make sure the text matches.
-        elif isinstance(markup, NavigableString) or \
-                 isinstance(markup, basestring):
-            if self._matches(markup, self.text):
-                found = markup
-        else:
-            raise Exception, "I don't know how to match against a %s" \
-                  % markup.__class__
-        return found
-
-    def _matches(self, markup, matchAgainst):
-        #print "Matching %s against %s" % (markup, matchAgainst)
-        result = False
-        if matchAgainst is True:
-            result = markup is not None
-        elif callable(matchAgainst):
-            result = matchAgainst(markup)
-        else:
-            #Custom match methods take the tag as an argument, but all
-            #other ways of matching match the tag name as a string.
-            if isinstance(markup, Tag):
-                markup = markup.name
-            if markup and not isinstance(markup, basestring):
-                markup = unicode(markup)
-            #Now we know that chunk is either a string, or None.
-            if hasattr(matchAgainst, 'match'):
-                # It's a regexp object.
-                result = markup and matchAgainst.search(markup)
-            elif hasattr(matchAgainst, '__iter__'): # list-like
-                result = markup in matchAgainst
-            elif hasattr(matchAgainst, 'items'):
-                result = markup.has_key(matchAgainst)
-            elif matchAgainst and isinstance(markup, basestring):
-                if isinstance(markup, unicode):
-                    matchAgainst = unicode(matchAgainst)
-                else:
-                    matchAgainst = str(matchAgainst)
-
-            if not result:
-                result = matchAgainst == markup
-        return result
-
-class ResultSet(list):
-    """A ResultSet is just a list that keeps track of the SoupStrainer
-    that created it."""
-    def __init__(self, source):
-        list.__init__([])
-        self.source = source
-
-# Now, some helper functions.
-
-def buildTagMap(default, *args):
-    """Turns a list of maps, lists, or scalars into a single map.
-    Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
-    NESTING_RESET_TAGS maps out of lists and partial maps."""
-    built = {}
-    for portion in args:
-        if hasattr(portion, 'items'):
-            #It's a map. Merge it.
-            for k,v in portion.items():
-                built[k] = v
-        elif hasattr(portion, '__iter__'): # is a list
-            #It's a list. Map each item to the default.
-            for k in portion:
-                built[k] = default
-        else:
-            #It's a scalar. Map it to the default.
-            built[portion] = default
-    return built
-
-# Now, the parser classes.
-
-class BeautifulStoneSoup(Tag, SGMLParser):
-
-    """This class contains the basic parser and search code. It defines
-    a parser that knows nothing about tag behavior except for the
-    following:
-
-      You can't close a tag without closing all the tags it encloses.
-      That is, "<foo><bar></foo>" actually means
-      "<foo><bar></bar></foo>".
-
-    [Another possible explanation is "<foo><bar /></foo>", but since
-    this class defines no SELF_CLOSING_TAGS, it will never use that
-    explanation.]
-
-    This class is useful for parsing XML or made-up markup languages,
-    or when BeautifulSoup makes an assumption counter to what you were
-    expecting."""
-
-    SELF_CLOSING_TAGS = {}
-    NESTABLE_TAGS = {}
-    RESET_NESTING_TAGS = {}
-    QUOTE_TAGS = {}
-    PRESERVE_WHITESPACE_TAGS = []
-
-    MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
-                       lambda x: x.group(1) + ' />'),
-                      (re.compile('<!\s+([^<>]*)>'),
-                       lambda x: '<!' + x.group(1) + '>')
-                      ]
-
-    ROOT_TAG_NAME = u'[document]'
-
-    HTML_ENTITIES = "html"
-    XML_ENTITIES = "xml"
-    XHTML_ENTITIES = "xhtml"
-    # TODO: This only exists for backwards-compatibility
-    ALL_ENTITIES = XHTML_ENTITIES
-
-    # Used when determining whether a text node is all whitespace and
-    # can be replaced with a single space. A text node that contains
-    # fancy Unicode spaces (usually non-breaking) should be left
-    # alone.
-    STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
-
-    def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
-                 markupMassage=True, smartQuotesTo=XML_ENTITIES,
-                 convertEntities=None, selfClosingTags=None, isHTML=False):
-        """The Soup object is initialized as the 'root tag', and the
-        provided markup (which can be a string or a file-like object)
-        is fed into the underlying parser.
-
-        sgmllib will process most bad HTML, and the BeautifulSoup
-        class has some tricks for dealing with some HTML that kills
-        sgmllib, but Beautiful Soup can nonetheless choke or lose data
-        if your data uses self-closing tags or declarations
-        incorrectly.
-
-        By default, Beautiful Soup uses regexes to sanitize input,
-        avoiding the vast majority of these problems. If the problems
-        don't apply to you, pass in False for markupMassage, and
-        you'll get better performance.
-
-        The default parser massage techniques fix the two most common
-        instances of invalid HTML that choke sgmllib:
-
-         <br/> (No space between name of closing tag and tag close)
-         <! --Comment--> (Extraneous whitespace in declaration)
-
-        You can pass in a custom list of (RE object, replace method)
-        tuples to get Beautiful Soup to scrub your input the way you
-        want."""
-
-        self.parseOnlyThese = parseOnlyThese
-        self.fromEncoding = fromEncoding
-        self.smartQuotesTo = smartQuotesTo
-        self.convertEntities = convertEntities
-        # Set the rules for how we'll deal with the entities we
-        # encounter
-        if self.convertEntities:
-            # It doesn't make sense to convert encoded characters to
-            # entities even while you're converting entities to Unicode.
-            # Just convert it all to Unicode.
-            self.smartQuotesTo = None
-            if convertEntities == self.HTML_ENTITIES:
-                self.convertXMLEntities = False
-                self.convertHTMLEntities = True
-                self.escapeUnrecognizedEntities = True
-            elif convertEntities == self.XHTML_ENTITIES:
-                self.convertXMLEntities = True
-                self.convertHTMLEntities = True
-                self.escapeUnrecognizedEntities = False
-            elif convertEntities == self.XML_ENTITIES:
-                self.convertXMLEntities = True
-                self.convertHTMLEntities = False
-                self.escapeUnrecognizedEntities = False
-        else:
-            self.convertXMLEntities = False
-            self.convertHTMLEntities = False
-            self.escapeUnrecognizedEntities = False
-
-        self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
-        SGMLParser.__init__(self)
-
-        if hasattr(markup, 'read'):        # It's a file-type object.
-            markup = markup.read()
-        self.markup = markup
-        self.markupMassage = markupMassage
-        try:
-            self._feed(isHTML=isHTML)
-        except StopParsing:
-            pass
-        self.markup = None                 # The markup can now be GCed
-
-    def convert_charref(self, name):
-        """This method fixes a bug in Python's SGMLParser."""
-        try:
-            n = int(name)
-        except ValueError:
-            return
-        if not 0 <= n <= 127 : # ASCII ends at 127, not 255
-            return
-        return self.convert_codepoint(n)
-
-    def _feed(self, inDocumentEncoding=None, isHTML=False):
-        # Convert the document to Unicode.
-        markup = self.markup
-        if isinstance(markup, unicode):
-            if not hasattr(self, 'originalEncoding'):
-                self.originalEncoding = None
-        else:
-            dammit = UnicodeDammit\
-                     (markup, [self.fromEncoding, inDocumentEncoding],
-                      smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
-            markup = dammit.unicode
-            self.originalEncoding = dammit.originalEncoding
-            self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
-        if markup:
-            if self.markupMassage:
-                if not hasattr(self.markupMassage, "__iter__"):
-                    self.markupMassage = self.MARKUP_MASSAGE
-                for fix, m in self.markupMassage:
-                    markup = fix.sub(m, markup)
-                # TODO: We get rid of markupMassage so that the
-                # soup object can be deepcopied later on. Some
-                # Python installations can't copy regexes. If anyone
-                # was relying on the existence of markupMassage, this
-                # might cause problems.
-                del(self.markupMassage)
-        self.reset()
-
-        SGMLParser.feed(self, markup)
-        # Close out any unfinished strings and close all the open tags.
-        self.endData()
-        while self.currentTag.name != self.ROOT_TAG_NAME:
-            self.popTag()
-
-    def __getattr__(self, methodName):
-        """This method routes method call requests to either the SGMLParser
-        superclass or the Tag superclass, depending on the method name."""
-        #print "__getattr__ called on %s.%s" % (self.__class__, methodName)
-
-        if methodName.startswith('start_') or methodName.startswith('end_') \
-               or methodName.startswith('do_'):
-            return SGMLParser.__getattr__(self, methodName)
-        elif not methodName.startswith('__'):
-            return Tag.__getattr__(self, methodName)
-        else:
-            raise AttributeError
-
-    def isSelfClosingTag(self, name):
-        """Returns true iff the given string is the name of a
-        self-closing tag according to this parser."""
-        return self.SELF_CLOSING_TAGS.has_key(name) \
-               or self.instanceSelfClosingTags.has_key(name)
-
-    def reset(self):
-        Tag.__init__(self, self, self.ROOT_TAG_NAME)
-        self.hidden = 1
-        SGMLParser.reset(self)
-        self.currentData = []
-        self.currentTag = None
-        self.tagStack = []
-        self.quoteStack = []
-        self.pushTag(self)
-
-    def popTag(self):
-        tag = self.tagStack.pop()
-
-        #print "Pop", tag.name
-        if self.tagStack:
-            self.currentTag = self.tagStack[-1]
-        return self.currentTag
-
-    def pushTag(self, tag):
-        #print "Push", tag.name
-        if self.currentTag:
-            self.currentTag.contents.append(tag)
-        self.tagStack.append(tag)
-        self.currentTag = self.tagStack[-1]
-
-    def endData(self, containerClass=NavigableString):
-        if self.currentData:
-            currentData = u''.join(self.currentData)
-            if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
-                not set([tag.name for tag in self.tagStack]).intersection(
-                    self.PRESERVE_WHITESPACE_TAGS)):
-                if '\n' in currentData:
-                    currentData = '\n'
-                else:
-                    currentData = ' '
-            self.currentData = []
-            if self.parseOnlyThese and len(self.tagStack) <= 1 and \
-                   (not self.parseOnlyThese.text or \
-                    not self.parseOnlyThese.search(currentData)):
-                return
-            o = containerClass(currentData)
-            o.setup(self.currentTag, self.previous)
-            if self.previous:
-                self.previous.next = o
-            self.previous = o
-            self.currentTag.contents.append(o)
-
-
-    def _popToTag(self, name, inclusivePop=True):
-        """Pops the tag stack up to and including the most recent
-        instance of the given tag. If inclusivePop is false, pops the tag
-        stack up to but *not* including the most recent instqance of
-        the given tag."""
-        #print "Popping to %s" % name
-        if name == self.ROOT_TAG_NAME:
-            return
-
-        numPops = 0
-        mostRecentTag = None
-        for i in range(len(self.tagStack)-1, 0, -1):
-            if name == self.tagStack[i].name:
-                numPops = len(self.tagStack)-i
-                break
-        if not inclusivePop:
-            numPops = numPops - 1
-
-        for i in range(0, numPops):
-            mostRecentTag = self.popTag()
-        return mostRecentTag
-
-    def _smartPop(self, name):
-
-        """We need to pop up to the previous tag of this type, unless
-        one of this tag's nesting reset triggers comes between this
-        tag and the previous tag of this type, OR unless this tag is a
-        generic nesting trigger and another generic nesting trigger
-        comes between this tag and the previous tag of this type.
-
-        Examples:
-         <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
-         <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
-         <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
-
-         <li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
-         <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
-         <td><tr><td> *<td>* should pop to 'tr', not the first 'td'
-        """
-
-        nestingResetTriggers = self.NESTABLE_TAGS.get(name)
-        isNestable = nestingResetTriggers != None
-        isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
-        popTo = None
-        inclusive = True
-        for i in range(len(self.tagStack)-1, 0, -1):
-            p = self.tagStack[i]
-            if (not p or p.name == name) and not isNestable:
-                #Non-nestable tags get popped to the top or to their
-                #last occurance.
-                popTo = name
-                break
-            if (nestingResetTriggers is not None
-                and p.name in nestingResetTriggers) \
-                or (nestingResetTriggers is None and isResetNesting
-                    and self.RESET_NESTING_TAGS.has_key(p.name)):
-
-                #If we encounter one of the nesting reset triggers
-                #peculiar to this tag, or we encounter another tag
-                #that causes nesting to reset, pop up to but not
-                #including that tag.
-                popTo = p.name
-                inclusive = False
-                break
-            p = p.parent
-        if popTo:
-            self._popToTag(popTo, inclusive)
-
-    def unknown_starttag(self, name, attrs, selfClosing=0):
-        #print "Start tag %s: %s" % (name, attrs)
-        if self.quoteStack:
-            #This is not a real tag.
-            #print "<%s> is not real!" % name
-            attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
-            self.handle_data('<%s%s>' % (name, attrs))
-            return
-        self.endData()
-
-        if not self.isSelfClosingTag(name) and not selfClosing:
-            self._smartPop(name)
-
-        if self.parseOnlyThese and len(self.tagStack) <= 1 \
-               and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
-            return
-
-        tag = Tag(self, name, attrs, self.currentTag, self.previous)
-        if self.previous:
-            self.previous.next = tag
-        self.previous = tag
-        self.pushTag(tag)
-        if selfClosing or self.isSelfClosingTag(name):
-            self.popTag()
-        if name in self.QUOTE_TAGS:
-            #print "Beginning quote (%s)" % name
-            self.quoteStack.append(name)
-            self.literal = 1
-        return tag
-
-    def unknown_endtag(self, name):
-        #print "End tag %s" % name
-        if self.quoteStack and self.quoteStack[-1] != name:
-            #This is not a real end tag.
-            #print "</%s> is not real!" % name
-            self.handle_data('</%s>' % name)
-            return
-        self.endData()
-        self._popToTag(name)
-        if self.quoteStack and self.quoteStack[-1] == name:
-            self.quoteStack.pop()
-            self.literal = (len(self.quoteStack) > 0)
-
-    def handle_data(self, data):
-        self.currentData.append(data)
-
-    def _toStringSubclass(self, text, subclass):
-        """Adds a certain piece of text to the tree as a NavigableString
-        subclass."""
-        self.endData()
-        self.handle_data(text)
-        self.endData(subclass)
-
-    def handle_pi(self, text):
-        """Handle a processing instruction as a ProcessingInstruction
-        object, possibly one with a %SOUP-ENCODING% slot into which an
-        encoding will be plugged later."""
-        if text[:3] == "xml":
-            text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
-        self._toStringSubclass(text, ProcessingInstruction)
-
-    def handle_comment(self, text):
-        "Handle comments as Comment objects."
-        self._toStringSubclass(text, Comment)
-
-    def handle_charref(self, ref):
-        "Handle character references as data."
-        if self.convertEntities:
-            data = unichr(int(ref))
-        else:
-            data = '&#%s;' % ref
-        self.handle_data(data)
-
-    def handle_entityref(self, ref):
-        """Handle entity references as data, possibly converting known
-        HTML and/or XML entity references to the corresponding Unicode
-        characters."""
-        data = None
-        if self.convertHTMLEntities:
-            try:
-                data = unichr(name2codepoint[ref])
-            except KeyError:
-                pass
-
-        if not data and self.convertXMLEntities:
-                data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
-
-        if not data and self.convertHTMLEntities and \
-            not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
-                # TODO: We've got a problem here. We're told this is
-                # an entity reference, but it's not an XML entity
-                # reference or an HTML entity reference. Nonetheless,
-                # the logical thing to do is to pass it through as an
-                # unrecognized entity reference.
-                #
-                # Except: when the input is "&carol;" this function
-                # will be called with input "carol". When the input is
-                # "AT&T", this function will be called with input
-                # "T". We have no way of knowing whether a semicolon
-                # was present originally, so we don't know whether
-                # this is an unknown entity or just a misplaced
-                # ampersand.
-                #
-                # The more common case is a misplaced ampersand, so I
-                # escape the ampersand and omit the trailing semicolon.
-                data = "&amp;%s" % ref
-        if not data:
-            # This case is different from the one above, because we
-            # haven't already gone through a supposedly comprehensive
-            # mapping of entities to Unicode characters. We might not
-            # have gone through any mapping at all. So the chances are
-            # very high that this is a real entity, and not a
-            # misplaced ampersand.
-            data = "&%s;" % ref
-        self.handle_data(data)
-
-    def handle_decl(self, data):
-        "Handle DOCTYPEs and the like as Declaration objects."
-        self._toStringSubclass(data, Declaration)
-
-    def parse_declaration(self, i):
-        """Treat a bogus SGML declaration as raw data. Treat a CDATA
-        declaration as a CData object."""
-        j = None
-        if self.rawdata[i:i+9] == '<![CDATA[':
-             k = self.rawdata.find(']]>', i)
-             if k == -1:
-                 k = len(self.rawdata)
-             data = self.rawdata[i+9:k]
-             j = k+3
-             self._toStringSubclass(data, CData)
-        else:
-            try:
-                j = SGMLParser.parse_declaration(self, i)
-            except SGMLParseError:
-                toHandle = self.rawdata[i:]
-                self.handle_data(toHandle)
-                j = i + len(toHandle)
-        return j
-
-class BeautifulSoup(BeautifulStoneSoup):
-
-    """This parser knows the following facts about HTML:
-
-    * Some tags have no closing tag and should be interpreted as being
-      closed as soon as they are encountered.
-
-    * The text inside some tags (ie. 'script') may contain tags which
-      are not really part of the document and which should be parsed
-      as text, not tags. If you want to parse the text as tags, you can
-      always fetch it and parse it explicitly.
-
-    * Tag nesting rules:
-
-      Most tags can't be nested at all. For instance, the occurance of
-      a <p> tag should implicitly close the previous <p> tag.
-
-       <p>Para1<p>Para2
-        should be transformed into:
-       <p>Para1</p><p>Para2
-
-      Some tags can be nested arbitrarily. For instance, the occurance
-      of a <blockquote> tag should _not_ implicitly close the previous
-      <blockquote> tag.
-
-       Alice said: <blockquote>Bob said: <blockquote>Blah
-        should NOT be transformed into:
-       Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
-
-      Some tags can be nested, but the nesting is reset by the
-      interposition of other tags. For instance, a <tr> tag should
-      implicitly close the previous <tr> tag within the same <table>,
-      but not close a <tr> tag in another table.
-
-       <table><tr>Blah<tr>Blah
-        should be transformed into:
-       <table><tr>Blah</tr><tr>Blah
-        but,
-       <tr>Blah<table><tr>Blah
-        should NOT be transformed into
-       <tr>Blah<table></tr><tr>Blah
-
-    Differing assumptions about tag nesting rules are a major source
-    of problems with the BeautifulSoup class. If BeautifulSoup is not
-    treating as nestable a tag your page author treats as nestable,
-    try ICantBelieveItsBeautifulSoup, MinimalSoup, or
-    BeautifulStoneSoup before writing your own subclass."""
-
-    def __init__(self, *args, **kwargs):
-        if not kwargs.has_key('smartQuotesTo'):
-            kwargs['smartQuotesTo'] = self.HTML_ENTITIES
-        kwargs['isHTML'] = True
-        BeautifulStoneSoup.__init__(self, *args, **kwargs)
-
-    SELF_CLOSING_TAGS = buildTagMap(None,
-                                    ('br' , 'hr', 'input', 'img', 'meta',
-                                    'spacer', 'link', 'frame', 'base', 'col'))
-
-    PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
-
-    QUOTE_TAGS = {'script' : None, 'textarea' : None}
-
-    #According to the HTML standard, each of these inline tags can
-    #contain another tag of the same type. Furthermore, it's common
-    #to actually use these tags this way.
-    NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
-                            'center')
-
-    #According to the HTML standard, these block tags can contain
-    #another tag of the same type. Furthermore, it's common
-    #to actually use these tags this way.
-    NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
-
-    #Lists can contain other lists, but there are restrictions.
-    NESTABLE_LIST_TAGS = { 'ol' : [],
-                           'ul' : [],
-                           'li' : ['ul', 'ol'],
-                           'dl' : [],
-                           'dd' : ['dl'],
-                           'dt' : ['dl'] }
-
-    #Tables can contain other tables, but there are restrictions.
-    NESTABLE_TABLE_TAGS = {'table' : [],
-                           'tr' : ['table', 'tbody', 'tfoot', 'thead'],
-                           'td' : ['tr'],
-                           'th' : ['tr'],
-                           'thead' : ['table'],
-                           'tbody' : ['table'],
-                           'tfoot' : ['table'],
-                           }
-
-    NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
-
-    #If one of these tags is encountered, all tags up to the next tag of
-    #this type are popped.
-    RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
-                                     NON_NESTABLE_BLOCK_TAGS,
-                                     NESTABLE_LIST_TAGS,
-                                     NESTABLE_TABLE_TAGS)
-
-    NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
-                                NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
-
-    # Used to detect the charset in a META tag; see start_meta
-    CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
-
-    def start_meta(self, attrs):
-        """Beautiful Soup can detect a charset included in a META tag,
-        try to convert the document to that charset, and re-parse the
-        document from the beginning."""
-        httpEquiv = None
-        contentType = None
-        contentTypeIndex = None
-        tagNeedsEncodingSubstitution = False
-
-        for i in range(0, len(attrs)):
-            key, value = attrs[i]
-            key = key.lower()
-            if key == 'http-equiv':
-                httpEquiv = value
-            elif key == 'content':
-                contentType = value
-                contentTypeIndex = i
-
-        if httpEquiv and contentType: # It's an interesting meta tag.
-            match = self.CHARSET_RE.search(contentType)
-            if match:
-                if (self.declaredHTMLEncoding is not None or
-                    self.originalEncoding == self.fromEncoding):
-                    # An HTML encoding was sniffed while converting
-                    # the document to Unicode, or an HTML encoding was
-                    # sniffed during a previous pass through the
-                    # document, or an encoding was specified
-                    # explicitly and it worked. Rewrite the meta tag.
-                    def rewrite(match):
-                        return match.group(1) + "%SOUP-ENCODING%"
-                    newAttr = self.CHARSET_RE.sub(rewrite, contentType)
-                    attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
-                                               newAttr)
-                    tagNeedsEncodingSubstitution = True
-                else:
-                    # This is our first pass through the document.
-                    # Go through it again with the encoding information.
-                    newCharset = match.group(3)
-                    if newCharset and newCharset != self.originalEncoding:
-                        self.declaredHTMLEncoding = newCharset
-                        self._feed(self.declaredHTMLEncoding)
-                        raise StopParsing
-                    pass
-        tag = self.unknown_starttag("meta", attrs)
-        if tag and tagNeedsEncodingSubstitution:
-            tag.containsSubstitutions = True
-
-class StopParsing(Exception):
-    pass
-
-class ICantBelieveItsBeautifulSoup(BeautifulSoup):
-
-    """The BeautifulSoup class is oriented towards skipping over
-    common HTML errors like unclosed tags. However, sometimes it makes
-    errors of its own. For instance, consider this fragment:
-
-     <b>Foo<b>Bar</b></b>
-
-    This is perfectly valid (if bizarre) HTML. However, the
-    BeautifulSoup class will implicitly close the first b tag when it
-    encounters the second 'b'. It will think the author wrote
-    "<b>Foo<b>Bar", and didn't close the first 'b' tag, because
-    there's no real-world reason to bold something that's already
-    bold. When it encounters '</b></b>' it will close two more 'b'
-    tags, for a grand total of three tags closed instead of two. This
-    can throw off the rest of your document structure. The same is
-    true of a number of other tags, listed below.
-
-    It's much more common for someone to forget to close a 'b' tag
-    than to actually use nested 'b' tags, and the BeautifulSoup class
-    handles the common case. This class handles the not-co-common
-    case: where you can't believe someone wrote what they did, but
-    it's valid HTML and BeautifulSoup screwed up by assuming it
-    wouldn't be."""
-
-    I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
-     ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
-      'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
-      'big')
-
-    I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
-
-    NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
-                                I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
-                                I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
-
-class MinimalSoup(BeautifulSoup):
-    """The MinimalSoup class is for parsing HTML that contains
-    pathologically bad markup. It makes no assumptions about tag
-    nesting, but it does know which tags are self-closing, that
-    <script> tags contain Javascript and should not be parsed, that
-    META tags may contain encoding information, and so on.
-
-    This also makes it better for subclassing than BeautifulStoneSoup
-    or BeautifulSoup."""
-
-    RESET_NESTING_TAGS = buildTagMap('noscript')
-    NESTABLE_TAGS = {}
-
-class BeautifulSOAP(BeautifulStoneSoup):
-    """This class will push a tag with only a single string child into
-    the tag's parent as an attribute. The attribute's name is the tag
-    name, and the value is the string child. An example should give
-    the flavor of the change:
-
-    <foo><bar>baz</bar></foo>
-     =>
-    <foo bar="baz"><bar>baz</bar></foo>
-
-    You can then access fooTag['bar'] instead of fooTag.barTag.string.
-
-    This is, of course, useful for scraping structures that tend to
-    use subelements instead of attributes, such as SOAP messages. Note
-    that it modifies its input, so don't print the modified version
-    out.
-
-    I'm not sure how many people really want to use this class; let me
-    know if you do. Mainly I like the name."""
-
-    def popTag(self):
-        if len(self.tagStack) > 1:
-            tag = self.tagStack[-1]
-            parent = self.tagStack[-2]
-            parent._getAttrMap()
-            if (isinstance(tag, Tag) and len(tag.contents) == 1 and
-                isinstance(tag.contents[0], NavigableString) and
-                not parent.attrMap.has_key(tag.name)):
-                parent[tag.name] = tag.contents[0]
-        BeautifulStoneSoup.popTag(self)
-
-#Enterprise class names! It has come to our attention that some people
-#think the names of the Beautiful Soup parser classes are too silly
-#and "unprofessional" for use in enterprise screen-scraping. We feel
-#your pain! For such-minded folk, the Beautiful Soup Consortium And
-#All-Night Kosher Bakery recommends renaming this file to
-#"RobustParser.py" (or, in cases of extreme enterprisiness,
-#"RobustParserBeanInterface.class") and using the following
-#enterprise-friendly class aliases:
-class RobustXMLParser(BeautifulStoneSoup):
-    pass
-class RobustHTMLParser(BeautifulSoup):
-    pass
-class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
-    pass
-class RobustInsanelyWackAssHTMLParser(MinimalSoup):
-    pass
-class SimplifyingSOAPParser(BeautifulSOAP):
-    pass
-
-######################################################
-#
-# Bonus library: Unicode, Dammit
-#
-# This class forces XML data into a standard format (usually to UTF-8
-# or Unicode).  It is heavily based on code from Mark Pilgrim's
-# Universal Feed Parser. It does not rewrite the XML or HTML to
-# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
-# (XML) and BeautifulSoup.start_meta (HTML).
-
-# Autodetects character encodings.
-# Download from http://chardet.feedparser.org/
-try:
-    import chardet
-#    import chardet.constants
-#    chardet.constants._debug = 1
-except ImportError:
-    chardet = None
-
-# cjkcodecs and iconv_codec make Python know about more character encodings.
-# Both are available from http://cjkpython.i18n.org/
-# They're built in if you use Python 2.4.
-try:
-    import cjkcodecs.aliases
-except ImportError:
-    pass
-try:
-    import iconv_codec
-except ImportError:
-    pass
-
-class UnicodeDammit:
-    """A class for detecting the encoding of a *ML document and
-    converting it to a Unicode string. If the source encoding is
-    windows-1252, can replace MS smart quotes with their HTML or XML
-    equivalents."""
-
-    # This dictionary maps commonly seen values for "charset" in HTML
-    # meta tags to the corresponding Python codec names. It only covers
-    # values that aren't in Python's aliases and can't be determined
-    # by the heuristics in find_codec.
-    CHARSET_ALIASES = { "macintosh" : "mac-roman",
-                        "x-sjis" : "shift-jis" }
-
-    def __init__(self, markup, overrideEncodings=[],
-                 smartQuotesTo='xml', isHTML=False):
-        self.declaredHTMLEncoding = None
-        self.markup, documentEncoding, sniffedEncoding = \
-                     self._detectEncoding(markup, isHTML)
-        self.smartQuotesTo = smartQuotesTo
-        self.triedEncodings = []
-        if markup == '' or isinstance(markup, unicode):
-            self.originalEncoding = None
-            self.unicode = unicode(markup)
-            return
-
-        u = None
-        for proposedEncoding in overrideEncodings:
-            u = self._convertFrom(proposedEncoding)
-            if u: break
-        if not u:
-            for proposedEncoding in (documentEncoding, sniffedEncoding):
-                u = self._convertFrom(proposedEncoding)
-                if u: break
-
-        # If no luck and we have auto-detection library, try that:
-        if not u and chardet and not isinstance(self.markup, unicode):
-            u = self._convertFrom(chardet.detect(self.markup)['encoding'])
-
-        # As a last resort, try utf-8 and windows-1252:
-        if not u:
-            for proposed_encoding in ("utf-8", "windows-1252"):
-                u = self._convertFrom(proposed_encoding)
-                if u: break
-
-        self.unicode = u
-        if not u: self.originalEncoding = None
-
-    def _subMSChar(self, orig):
-        """Changes a MS smart quote character to an XML or HTML
-        entity."""
-        sub = self.MS_CHARS.get(orig)
-        if isinstance(sub, tuple):
-            if self.smartQuotesTo == 'xml':
-                sub = '&#x%s;' % sub[1]
-            else:
-                sub = '&%s;' % sub[0]
-        return sub
-
-    def _convertFrom(self, proposed):
-        proposed = self.find_codec(proposed)
-        if not proposed or proposed in self.triedEncodings:
-            return None
-        self.triedEncodings.append(proposed)
-        markup = self.markup
-
-        # Convert smart quotes to HTML if coming from an encoding
-        # that might have them.
-        if self.smartQuotesTo and proposed.lower() in("windows-1252",
-                                                      "iso-8859-1",
-                                                      "iso-8859-2"):
-            markup = re.compile("([\x80-\x9f])").sub \
-                     (lambda(x): self._subMSChar(x.group(1)),
-                      markup)
-
-        try:
-            # print "Trying to convert document to %s" % proposed
-            u = self._toUnicode(markup, proposed)
-            self.markup = u
-            self.originalEncoding = proposed
-        except Exception, e:
-            # print "That didn't work!"
-            # print e
-            return None
-        #print "Correct encoding: %s" % proposed
-        return self.markup
-
-    def _toUnicode(self, data, encoding):
-        '''Given a string and its encoding, decodes the string into Unicode.
-        %encoding is a string recognized by encodings.aliases'''
-
-        # strip Byte Order Mark (if present)
-        if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
-               and (data[2:4] != '\x00\x00'):
-            encoding = 'utf-16be'
-            data = data[2:]
-        elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
-                 and (data[2:4] != '\x00\x00'):
-            encoding = 'utf-16le'
-            data = data[2:]
-        elif data[:3] == '\xef\xbb\xbf':
-            encoding = 'utf-8'
-            data = data[3:]
-        elif data[:4] == '\x00\x00\xfe\xff':
-            encoding = 'utf-32be'
-            data = data[4:]
-        elif data[:4] == '\xff\xfe\x00\x00':
-            encoding = 'utf-32le'
-            data = data[4:]
-        newdata = unicode(data, encoding)
-        return newdata
-
-    def _detectEncoding(self, xml_data, isHTML=False):
-        """Given a document, tries to detect its XML encoding."""
-        xml_encoding = sniffed_xml_encoding = None
-        try:
-            if xml_data[:4] == '\x4c\x6f\xa7\x94':
-                # EBCDIC
-                xml_data = self._ebcdic_to_ascii(xml_data)
-            elif xml_data[:4] == '\x00\x3c\x00\x3f':
-                # UTF-16BE
-                sniffed_xml_encoding = 'utf-16be'
-                xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
-            elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
-                     and (xml_data[2:4] != '\x00\x00'):
-                # UTF-16BE with BOM
-                sniffed_xml_encoding = 'utf-16be'
-                xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
-            elif xml_data[:4] == '\x3c\x00\x3f\x00':
-                # UTF-16LE
-                sniffed_xml_encoding = 'utf-16le'
-                xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
-            elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
-                     (xml_data[2:4] != '\x00\x00'):
-                # UTF-16LE with BOM
-                sniffed_xml_encoding = 'utf-16le'
-                xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
-            elif xml_data[:4] == '\x00\x00\x00\x3c':
-                # UTF-32BE
-                sniffed_xml_encoding = 'utf-32be'
-                xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
-            elif xml_data[:4] == '\x3c\x00\x00\x00':
-                # UTF-32LE
-                sniffed_xml_encoding = 'utf-32le'
-                xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
-            elif xml_data[:4] == '\x00\x00\xfe\xff':
-                # UTF-32BE with BOM
-                sniffed_xml_encoding = 'utf-32be'
-                xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
-            elif xml_data[:4] == '\xff\xfe\x00\x00':
-                # UTF-32LE with BOM
-                sniffed_xml_encoding = 'utf-32le'
-                xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
-            elif xml_data[:3] == '\xef\xbb\xbf':
-                # UTF-8 with BOM
-                sniffed_xml_encoding = 'utf-8'
-                xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
-            else:
-                sniffed_xml_encoding = 'ascii'
-                pass
-        except:
-            xml_encoding_match = None
-        xml_encoding_match = re.compile(
-            '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
-        if not xml_encoding_match and isHTML:
-            regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
-            xml_encoding_match = regexp.search(xml_data)
-        if xml_encoding_match is not None:
-            xml_encoding = xml_encoding_match.groups()[0].lower()
-            if isHTML:
-                self.declaredHTMLEncoding = xml_encoding
-            if sniffed_xml_encoding and \
-               (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
-                                 'iso-10646-ucs-4', 'ucs-4', 'csucs4',
-                                 'utf-16', 'utf-32', 'utf_16', 'utf_32',
-                                 'utf16', 'u16')):
-                xml_encoding = sniffed_xml_encoding
-        return xml_data, xml_encoding, sniffed_xml_encoding
-
-
-    def find_codec(self, charset):
-        return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
-               or (charset and self._codec(charset.replace("-", ""))) \
-               or (charset and self._codec(charset.replace("-", "_"))) \
-               or charset
-
-    def _codec(self, charset):
-        if not charset: return charset
-        codec = None
-        try:
-            codecs.lookup(charset)
-            codec = charset
-        except (LookupError, ValueError):
-            pass
-        return codec
-
-    EBCDIC_TO_ASCII_MAP = None
-    def _ebcdic_to_ascii(self, s):
-        c = self.__class__
-        if not c.EBCDIC_TO_ASCII_MAP:
-            emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
-                    16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
-                    128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
-                    144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
-                    32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
-                    38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
-                    45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
-                    186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
-                    195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
-                    201,202,106,107,108,109,110,111,112,113,114,203,204,205,
-                    206,207,208,209,126,115,116,117,118,119,120,121,122,210,
-                    211,212,213,214,215,216,217,218,219,220,221,222,223,224,
-                    225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
-                    73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
-                    82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
-                    90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
-                    250,251,252,253,254,255)
-            import string
-            c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
-            ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
-        return s.translate(c.EBCDIC_TO_ASCII_MAP)
-
-    MS_CHARS = { '\x80' : ('euro', '20AC'),
-                 '\x81' : ' ',
-                 '\x82' : ('sbquo', '201A'),
-                 '\x83' : ('fnof', '192'),
-                 '\x84' : ('bdquo', '201E'),
-                 '\x85' : ('hellip', '2026'),
-                 '\x86' : ('dagger', '2020'),
-                 '\x87' : ('Dagger', '2021'),
-                 '\x88' : ('circ', '2C6'),
-                 '\x89' : ('permil', '2030'),
-                 '\x8A' : ('Scaron', '160'),
-                 '\x8B' : ('lsaquo', '2039'),
-                 '\x8C' : ('OElig', '152'),
-                 '\x8D' : '?',
-                 '\x8E' : ('#x17D', '17D'),
-                 '\x8F' : '?',
-                 '\x90' : '?',
-                 '\x91' : ('lsquo', '2018'),
-                 '\x92' : ('rsquo', '2019'),
-                 '\x93' : ('ldquo', '201C'),
-                 '\x94' : ('rdquo', '201D'),
-                 '\x95' : ('bull', '2022'),
-                 '\x96' : ('ndash', '2013'),
-                 '\x97' : ('mdash', '2014'),
-                 '\x98' : ('tilde', '2DC'),
-                 '\x99' : ('trade', '2122'),
-                 '\x9a' : ('scaron', '161'),
-                 '\x9b' : ('rsaquo', '203A'),
-                 '\x9c' : ('oelig', '153'),
-                 '\x9d' : '?',
-                 '\x9e' : ('#x17E', '17E'),
-                 '\x9f' : ('Yuml', ''),}
-
-#######################################################################
-
-
-#By default, act as an HTML pretty-printer.
-if __name__ == '__main__':
-    import sys
-    soup = BeautifulSoup(sys.stdin)
-    print soup.prettify()
diff --git a/lib/dateutil/__init__.py b/lib/dateutil/__init__.py
index 1020e72919e76c0c9e03640bd1b6c5ec5c4c7fee..743669c702e2e48984b63c0a7d4d9f774e195555 100644
--- a/lib/dateutil/__init__.py
+++ b/lib/dateutil/__init__.py
@@ -1,10 +1,2 @@
 # -*- coding: utf-8 -*-
-"""
-Copyright (c) 2003-2010  Gustavo Niemeyer <gustavo@niemeyer.net>
-
-This module offers extensions to the standard Python
-datetime module.
-"""
-__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
-__license__ = "Simplified BSD"
-__version__ = "2.2"
+__version__ = "2.4.2"
diff --git a/lib/dateutil/easter.py b/lib/dateutil/easter.py
index d8a38844f9e3fcf0efff9a5675f08e3bb5885d83..8d30c4ebdab098c9f35d934e6e91d5e7a49e4764 100644
--- a/lib/dateutil/easter.py
+++ b/lib/dateutil/easter.py
@@ -1,18 +1,17 @@
+# -*- coding: utf-8 -*-
 """
-Copyright (c) 2003-2007  Gustavo Niemeyer <gustavo@niemeyer.net>
-
-This module offers extensions to the standard Python
-datetime module.
+This module offers a generic easter computing method for any given year, using
+Western, Orthodox or Julian algorithms.
 """
-__license__ = "Simplified BSD"
 
 import datetime
 
 __all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
 
-EASTER_JULIAN   = 1
+EASTER_JULIAN = 1
 EASTER_ORTHODOX = 2
-EASTER_WESTERN  = 3
+EASTER_WESTERN = 3
+
 
 def easter(year, method=EASTER_WESTERN):
     """
@@ -24,7 +23,7 @@ def easter(year, method=EASTER_WESTERN):
 
     This algorithm implements three different easter
     calculation methods:
-    
+
     1 - Original calculation in Julian calendar, valid in
         dates after 326 AD
     2 - Original method, with date converted to Gregorian
@@ -39,7 +38,7 @@ def easter(year, method=EASTER_WESTERN):
     EASTER_WESTERN  = 3
 
     The default method is method 3.
-    
+
     More about the algorithm may be found at:
 
     http://users.chariot.net.au/~gmarts/eastalg.htm
@@ -68,24 +67,23 @@ def easter(year, method=EASTER_WESTERN):
     e = 0
     if method < 3:
         # Old method
-        i = (19*g+15)%30
-        j = (y+y//4+i)%7
+        i = (19*g + 15) % 30
+        j = (y + y//4 + i) % 7
         if method == 2:
             # Extra dates to convert Julian to Gregorian date
             e = 10
             if y > 1600:
-                e = e+y//100-16-(y//100-16)//4
+                e = e + y//100 - 16 - (y//100 - 16)//4
     else:
         # New method
         c = y//100
-        h = (c-c//4-(8*c+13)//25+19*g+15)%30
-        i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
-        j = (y+y//4+i+2-c+c//4)%7
+        h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
+        i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
+        j = (y + y//4 + i + 2 - c + c//4) % 7
 
     # p can be from -6 to 56 corresponding to dates 22 March to 23 May
     # (later dates apply to method 2, although 23 May never actually occurs)
-    p = i-j+e
-    d = 1+(p+27+(p+6)//40)%31
-    m = 3+(p+26)//30
+    p = i - j + e
+    d = 1 + (p + 27 + (p + 6)//40) % 31
+    m = 3 + (p + 26)//30
     return datetime.date(int(y), int(m), int(d))
-
diff --git a/lib/dateutil/parser.py b/lib/dateutil/parser.py
index aef836238c8d8ce70c5b50dd73340c264a2127eb..762e5db1b1f912d21e0109e8d278a27db674c561 100644
--- a/lib/dateutil/parser.py
+++ b/lib/dateutil/parser.py
@@ -1,50 +1,75 @@
 # -*- coding:iso-8859-1 -*-
 """
-Copyright (c) 2003-2007  Gustavo Niemeyer <gustavo@niemeyer.net>
-
-This module offers extensions to the standard Python
-datetime module.
+This module offers a generic date/time string parser which is able to parse
+most known formats to represent a date and/or time.
+
+This module attempts to be forgiving with regards to unlikely input formats,
+returning a datetime object even for dates which are ambiguous. If an element
+of a date/time stamp is omitted, the following rules are applied:
+- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
+  on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
+  specified.
+- If a time zone is omitted, a timezone-naive datetime is returned.
+
+If any other elements are missing, they are taken from the
+:class:`datetime.datetime` object passed to the parameter ``default``. If this
+results in a day number exceeding the valid number of days per month, one can
+fall back to the last day of the month by setting ``fallback_on_invalid_day``
+parameter to ``True``.
+
+Also provided is the ``smart_defaults`` option, which attempts to fill in the
+missing elements from context. If specified, the logic is:
+- If the omitted element is smaller than the largest specified element, select
+  the *earliest* time matching the specified conditions; so ``"June 2010"`` is
+  interpreted as ``June 1, 2010 0:00:00``) and the (somewhat strange)
+  ``"Feb 1997 3:15 PM"`` is interpreted as ``February 1, 1997 15:15:00``.
+- If the element is larger than the largest specified element, select the
+  *most recent* time matching the specified conditions (e.g parsing ``"May"``
+  in June 2015 returns the date May 1st, 2015, whereas parsing it in April 2015
+  returns May 1st 2014). If using the ``date_in_future`` flag, this logic is
+  inverted, and instead the *next* time matching the specified conditions is
+  returned.
+
+Additional resources about date/time string formats can be found below:
+
+- `A summary of the international standard date and time notation
+  <http://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
+- `W3C Date and Time Formats <http://www.w3.org/TR/NOTE-datetime>`_
+- `Time Formats (Planetary Rings Node) <http://pds-rings.seti.org/tools/time_formats.html>`_
+- `CPAN ParseDate module
+  <http://search.cpan.org/~muir/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
+- `Java SimpleDateFormat Class
+  <https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
 """
 from __future__ import unicode_literals
-__license__ = "Simplified BSD"
-
 
 import datetime
 import string
 import time
-import sys
-import os
 import collections
-
-try:
-    from io import StringIO
-except ImportError:
-    from io import StringIO
+import re
+from io import StringIO
+from calendar import monthrange, isleap
 
 from six import text_type, binary_type, integer_types
 
 from . import relativedelta
 from . import tz
 
-
 __all__ = ["parse", "parserinfo"]
 
 
-# Some pointers:
-#
-# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
-# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
-# http://www.w3.org/TR/NOTE-datetime
-# http://ringmaster.arc.nasa.gov/tools/time_formats.html
-# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
-# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
-
-
 class _timelex(object):
+    # Fractional seconds are sometimes split by a comma
+    _split_decimal = re.compile("([\.,])")
 
     def __init__(self, instream):
+        if isinstance(instream, binary_type):
+            instream = instream.decode()
+
         if isinstance(instream, text_type):
             instream = StringIO(instream)
+
         self.instream = instream
         self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
                           'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
@@ -57,25 +82,47 @@ class _timelex(object):
         self.eof = False
 
     def get_token(self):
+        """
+        This function breaks the time string into lexical units (tokens), which
+        can be parsed by the parser. Lexical units are demarcated by changes in
+        the character set, so any continuous string of letters is considered
+        one unit, any continuous string of numbers is considered one unit.
+
+        The main complication arises from the fact that dots ('.') can be used
+        both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
+        "4:30:21.447"). As such, it is necessary to read the full context of
+        any dot-separated strings before breaking it into tokens; as such, this
+        function maintains a "token stack", for when the ambiguous context
+        demands that multiple tokens be parsed at once.
+        """
         if self.tokenstack:
             return self.tokenstack.pop(0)
+
         seenletters = False
         token = None
         state = None
         wordchars = self.wordchars
         numchars = self.numchars
         whitespace = self.whitespace
+
         while not self.eof:
+            # We only realize that we've reached the end of a token when we
+            # find a character that's not part of the current token - since
+            # that character may be part of the next token, it's stored in the
+            # charstack.
             if self.charstack:
                 nextchar = self.charstack.pop(0)
             else:
                 nextchar = self.instream.read(1)
                 while nextchar == '\x00':
                     nextchar = self.instream.read(1)
+
             if not nextchar:
                 self.eof = True
                 break
             elif not state:
+                # First character of the token - determines if we're starting
+                # to parse a word, a number or something else.
                 token = nextchar
                 if nextchar in wordchars:
                     state = 'a'
@@ -83,10 +130,12 @@ class _timelex(object):
                     state = '0'
                 elif nextchar in whitespace:
                     token = ' '
-                    break # emit token
+                    break  # emit token
                 else:
-                    break # emit token
+                    break  # emit token
             elif state == 'a':
+                # If we've already started reading a word, we keep reading
+                # letters until we find something that's not part of a word.
                 seenletters = True
                 if nextchar in wordchars:
                     token += nextchar
@@ -95,17 +144,21 @@ class _timelex(object):
                     state = 'a.'
                 else:
                     self.charstack.append(nextchar)
-                    break # emit token
+                    break  # emit token
             elif state == '0':
+                # If we've already started reading a number, we keep reading
+                # numbers until we find something that doesn't fit.
                 if nextchar in numchars:
                     token += nextchar
-                elif nextchar == '.':
+                elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
                     token += nextchar
                     state = '0.'
                 else:
                     self.charstack.append(nextchar)
-                    break # emit token
+                    break  # emit token
             elif state == 'a.':
+                # If we've seen some letters and a dot separator, continue
+                # parsing, and the tokens will be broken up later.
                 seenletters = True
                 if nextchar == '.' or nextchar in wordchars:
                     token += nextchar
@@ -114,8 +167,10 @@ class _timelex(object):
                     state = '0.'
                 else:
                     self.charstack.append(nextchar)
-                    break # emit token
+                    break  # emit token
             elif state == '0.':
+                # If we've seen at least one dot separator, keep going, we'll
+                # break up the tokens later.
                 if nextchar == '.' or nextchar in numchars:
                     token += nextchar
                 elif nextchar in wordchars and token[-1] == '.':
@@ -123,15 +178,19 @@ class _timelex(object):
                     state = 'a.'
                 else:
                     self.charstack.append(nextchar)
-                    break # emit token
-        if (state in ('a.', '0.') and
-            (seenletters or token.count('.') > 1 or token[-1] == '.')):
-            l = token.split('.')
+                    break  # emit token
+
+        if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
+                                       token[-1] in '.,')):
+            l = self._split_decimal.split(token)
             token = l[0]
             for tok in l[1:]:
-                self.tokenstack.append('.')
                 if tok:
                     self.tokenstack.append(tok)
+
+        if state == '0.' and token.count('.') == 0:
+            token = token.replace(',', '.')
+
         return token
 
     def __iter__(self):
@@ -141,6 +200,7 @@ class _timelex(object):
         token = self.get_token()
         if token is None:
             raise StopIteration
+
         return token
 
     def next(self):
@@ -170,6 +230,22 @@ class _resultbase(object):
 
 
 class parserinfo(object):
+    """
+    Class which handles what inputs are accepted. Subclass this to customize
+    the language and acceptable values for each parameter.
+
+    :param dayfirst:
+            Whether to interpret the first value in an ambiguous 3-integer date
+            (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+            ``yearfirst`` is set to ``True``, this distinguishes between YDM
+            and YMD. Default is ``False``.
+
+    :param yearfirst:
+            Whether to interpret the first value in an ambiguous 3-integer date
+            (e.g. 01/05/09) as the year. If ``True``, the first number is taken
+            to be the year, otherwise the last number is taken to be the year.
+            Default is ``False``.
+    """
 
     # m from a.m/p.m, t from ISO T separator
     JUMP = [" ", ".", ",", ";", "-", "/", "'",
@@ -183,18 +259,18 @@ class parserinfo(object):
                 ("Fri", "Friday"),
                 ("Sat", "Saturday"),
                 ("Sun", "Sunday")]
-    MONTHS   = [("Jan", "January"),
-                ("Feb", "February"),
-                ("Mar", "March"),
-                ("Apr", "April"),
-                ("May", "May"),
-                ("Jun", "June"),
-                ("Jul", "July"),
-                ("Aug", "August"),
-                ("Sep", "Sept", "September"),
-                ("Oct", "October"),
-                ("Nov", "November"),
-                ("Dec", "December")]
+    MONTHS = [("Jan", "January"),
+              ("Feb", "February"),
+              ("Mar", "March"),
+              ("Apr", "April"),
+              ("May", "May"),
+              ("Jun", "June"),
+              ("Jul", "July"),
+              ("Aug", "August"),
+              ("Sep", "Sept", "September"),
+              ("Oct", "October"),
+              ("Nov", "November"),
+              ("Dec", "December")]
     HMS = [("h", "hour", "hours"),
            ("m", "minute", "minutes"),
            ("s", "second", "seconds")]
@@ -204,7 +280,7 @@ class parserinfo(object):
     PERTAIN = ["of"]
     TZOFFSET = {}
 
-    def __init__(self, dayfirst=False, yearfirst=False):
+    def __init__(self, dayfirst=False, yearfirst=False, smart_defaults=False):
         self._jump = self._convert(self.JUMP)
         self._weekdays = self._convert(self.WEEKDAYS)
         self._months = self._convert(self.MONTHS)
@@ -215,14 +291,14 @@ class parserinfo(object):
 
         self.dayfirst = dayfirst
         self.yearfirst = yearfirst
+        self.smart_defaults = smart_defaults
 
         self._year = time.localtime().tm_year
-        self._century = self._year//100*100
+        self._century = self._year // 100 * 100
 
     def _convert(self, lst):
         dct = {}
-        for i in range(len(lst)):
-            v = lst[i]
+        for i, v in enumerate(lst):
             if isinstance(v, tuple):
                 for v in v:
                     dct[v.lower()] = i
@@ -244,7 +320,7 @@ class parserinfo(object):
     def month(self, name):
         if len(name) >= 3:
             try:
-                return self._months[name.lower()]+1
+                return self._months[name.lower()] + 1
             except KeyError:
                 pass
         return None
@@ -270,12 +346,13 @@ class parserinfo(object):
     def tzoffset(self, name):
         if name in self._utczone:
             return 0
+
         return self.TZOFFSET.get(name)
 
     def convertyear(self, year):
         if year < 100:
             year += self._century
-            if abs(year-self._year) >= 50:
+            if abs(year - self._year) >= 50:
                 if year < self._year:
                     year += 100
                 else:
@@ -286,6 +363,7 @@ class parserinfo(object):
         # move to info
         if res.year is not None:
             res.year = self.convertyear(res.year)
+
         if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
             res.tzname = "UTC"
             res.tzoffset = 0
@@ -295,37 +373,211 @@ class parserinfo(object):
 
 
 class parser(object):
-
     def __init__(self, info=None):
         self.info = info or parserinfo()
 
-    def parse(self, timestr, default=None,
-                    ignoretz=False, tzinfos=None,
-                    **kwargs):
-        if not default:
+    def parse(self, timestr, default=None, ignoretz=False, tzinfos=None,
+              smart_defaults=None, date_in_future=False, 
+              fallback_on_invalid_day=None, **kwargs):
+        """
+        Parse the date/time string into a :class:`datetime.datetime` object.
+
+        :param timestr:
+            Any date/time string using the supported formats.
+
+        :param default:
+            The default datetime object, if this is a datetime object and not
+            ``None``, elements specified in ``timestr`` replace elements in the
+            default object, unless ``smart_defaults`` is set to ``True``, in
+            which case to the extent necessary, timestamps are calculated
+            relative to this date.
+
+        :param smart_defaults:
+            If using smart defaults, the ``default`` parameter is treated as
+            the effective parsing date/time, and the context of the datetime
+            string is determined relative to ``default``. If ``None``, this
+            parameter is inherited from the :class:`parserinfo` object.
+
+        :param date_in_future:
+            If ``smart_defaults`` is ``True``, the parser assumes by default
+            that the timestamp refers to a date in the past, and will return
+            the beginning of the most recent timespan which matches the time
+            string (e.g. if ``default`` is March 3rd, 2013,  "Feb" parses to
+            "Feb 1, 2013" and "May 3" parses to May 3rd, 2012). Setting this
+            parameter to ``True`` inverts this assumption, and returns the
+            beginning of the *next* matching timespan.
+
+        :param fallback_on_invalid_day:
+            If specified ``True``, an otherwise invalid date such as "Feb 30"
+            or "June 32" falls back to the last day of the month. If specified
+            as "False", the parser is strict about parsing otherwise valid
+            dates that would turn up as invalid because of the fallback rules
+            (e.g. "Feb 2010" run with a default of January 30, 2010 and
+            ``smartparser`` set to ``False`` would would throw an error, rather
+            than falling back to the end of February). If ``None`` or
+            unspecified, the date falls back to the most recent valid date only
+            if the invalid date is created as a result of an unspecified day in
+            the time string.
+
+        :param ignoretz:
+            If set ``True``, time zones in parsed strings are ignored and a
+            naive :class:`datetime.datetime` object is returned.
+
+        :param tzinfos:
+            Additional time zone names / aliases which may be present in the
+            string. This argument maps time zone names (and optionally offsets
+            from those time zones) to time zones. This parameter can be a
+            dictionary with timezone aliases mapping time zone names to time
+            zones or a function taking two parameters (``tzname`` and
+            ``tzoffset``) and returning a time zone.
+
+            The timezones to which the names are mapped can be an integer
+            offset from UTC in minutes or a :class:`tzinfo` object.
+
+            .. doctest::
+               :options: +NORMALIZE_WHITESPACE
+
+                >>> from dateutil.parser import parse
+                >>> from dateutil.tz import gettz
+                >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
+                >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
+                datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
+                >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
+                datetime.datetime(2012, 1, 19, 17, 21,
+                                  tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
+
+            This parameter is ignored if ``ignoretz`` is set.
+
+        :param **kwargs:
+            Keyword arguments as passed to ``_parse()``.
+
+        :return:
+            Returns a :class:`datetime.datetime` object or, if the
+            ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
+            first element being a :class:`datetime.datetime` object, the second
+            a tuple containing the fuzzy tokens.
+
+        :raises ValueError:
+            Raised for invalid or unknown string format, if the provided
+            :class:`tzinfo` is not in a valid format, or if an invalid date
+            would be created.
+
+        :raises OverFlowError:
+            Raised if the parsed date exceeds the largest valid C integer on
+            your system.
+        """
+
+        if smart_defaults is None:
+            smart_defaults = self.info.smart_defaults
+
+        if default is None:
+            effective_dt = datetime.datetime.now()
             default = datetime.datetime.now().replace(hour=0, minute=0,
                                                       second=0, microsecond=0)
-
+        else:
+            effective_dt = default
 
         res, skipped_tokens = self._parse(timestr, **kwargs)
 
         if res is None:
-            raise ValueError("unknown string format")
+            raise ValueError("Unknown string format")
+
         repl = {}
-        for attr in ["year", "month", "day", "hour",
-                     "minute", "second", "microsecond"]:
+        for attr in ("year", "month", "day", "hour",
+                     "minute", "second", "microsecond"):
             value = getattr(res, attr)
             if value is not None:
                 repl[attr] = value
+
+        # Choose the correct fallback position if requested by the
+        # ``smart_defaults`` parameter.
+        if smart_defaults:
+            # Determine if it refers to this year, last year or next year
+            if res.year is None:
+                if res.month is not None:
+                    # Explicitly deal with leap year problems
+                    if res.month == 2 and (res.day is not None and
+                                           res.day == 29):
+
+                        ly_offset = 4 if date_in_future else -4
+                        next_year = 4 * (default.year // 4)
+
+                        if date_in_future:
+                            next_year += ly_offset
+
+                        if not isleap(next_year):
+                            next_year += ly_offset
+
+                        if not isleap(default.year):
+                            default = default.replace(year=next_year)
+                    elif date_in_future:
+                        next_year = default.year + 1
+                    else:
+                        next_year = default.year - 1
+
+                    if ((res.month == default.month and res.day is not None and
+                         ((res.day < default.day and date_in_future) or
+                          (res.day > default.day and not date_in_future))) or
+                        ((res.month < default.month and date_in_future) or
+                         (res.month > default.month and not date_in_future))):
+
+                        default = default.replace(year=next_year)
+
+            # Select a proper month
+            if res.month is None:
+                if res.year is not None:
+                    default = default.replace(month=1)
+
+                # I'm not sure if this is even possible.
+                if res.day is not None:
+                    if res.day < default.day and date_in_future:
+                        default += datetime.timedelta(months=1)
+                    elif res.day > default.day and not date_in_future:
+                        default -= datetime.timedelta(months=1)
+
+            if res.day is None:
+                # Determine if it's today, tomorrow or yesterday.
+                if res.year is None and res.month is None:
+                    t_repl = {}
+                    for key, val in repl.iteritems():
+                        if key in ('hour', 'minute', 'second', 'microsecond'):
+                            t_repl[key] = val
+
+                    stime = effective_dt.replace(**t_repl)
+
+                    if stime < effective_dt and date_in_future:
+                        default += datetime.timedelta(days=1)
+                    elif stime > effective_dt and not date_in_future:
+                        default -= datetime.timedelta(days=1)
+                else:
+                    # Otherwise it's the beginning of the month
+                    default = default.replace(day=1)
+
+        if fallback_on_invalid_day or (fallback_on_invalid_day is None and
+                                       'day' not in repl):
+            # If the default day exceeds the last day of the month, fall back to
+            # the end of the month.
+            cyear = default.year if res.year is None else res.year
+            cmonth = default.month if res.month is None else res.month
+            cday = default.day if res.day is None else res.day
+
+            if cday > monthrange(cyear, cmonth)[1]:
+                repl['day'] = monthrange(cyear, cmonth)[1]
+
         ret = default.replace(**repl)
+
         if res.weekday is not None and not res.day:
             ret = ret+relativedelta.relativedelta(weekday=res.weekday)
+
         if not ignoretz:
-            if isinstance(tzinfos, collections.Callable) or tzinfos and res.tzname in tzinfos:
+            if (isinstance(tzinfos, collections.Callable) or
+                    tzinfos and res.tzname in tzinfos):
+
                 if isinstance(tzinfos, collections.Callable):
                     tzdata = tzinfos(res.tzname, res.tzoffset)
                 else:
                     tzdata = tzinfos.get(res.tzname)
+
                 if isinstance(tzdata, datetime.tzinfo):
                     tzinfo = tzdata
                 elif isinstance(tzdata, text_type):
@@ -333,8 +585,8 @@ class parser(object):
                 elif isinstance(tzdata, integer_types):
                     tzinfo = tz.tzoffset(res.tzname, tzdata)
                 else:
-                    raise ValueError("offset must be tzinfo subclass, " \
-                                      "tz string, or int offset")
+                    raise ValueError("Offset must be tzinfo subclass, "
+                                     "tz string, or int offset.")
                 ret = ret.replace(tzinfo=tzinfo)
             elif res.tzname and res.tzname in time.tzname:
                 ret = ret.replace(tzinfo=tz.tzlocal())
@@ -343,28 +595,70 @@ class parser(object):
             elif res.tzoffset:
                 ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
 
-        if skipped_tokens:
+        if kwargs.get('fuzzy_with_tokens', False):
             return ret, skipped_tokens
-
-        return ret
+        else:
+            return ret
 
     class _result(_resultbase):
         __slots__ = ["year", "month", "day", "weekday",
                      "hour", "minute", "second", "microsecond",
-                     "tzname", "tzoffset"]
-
-    def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, fuzzy_with_tokens=False):
+                     "tzname", "tzoffset", "ampm"]
+
+    def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
+               fuzzy_with_tokens=False):
+        """
+        Private method which performs the heavy lifting of parsing, called from
+        ``parse()``, which passes on its ``kwargs`` to this function.
+
+        :param timestr:
+            The string to parse.
+
+        :param dayfirst:
+            Whether to interpret the first value in an ambiguous 3-integer date
+            (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+            ``yearfirst`` is set to ``True``, this distinguishes between YDM
+            and YMD. If set to ``None``, this value is retrieved from the
+            current :class:`parserinfo` object (which itself defaults to
+            ``False``).
+
+        :param yearfirst:
+            Whether to interpret the first value in an ambiguous 3-integer date
+            (e.g. 01/05/09) as the year. If ``True``, the first number is taken
+            to be the year, otherwise the last number is taken to be the year.
+            If this is set to ``None``, the value is retrieved from the current
+            :class:`parserinfo` object (which itself defaults to ``False``).
+
+        :param fuzzy:
+            Whether to allow fuzzy parsing, allowing for string like "Today is
+            January 1, 2047 at 8:21:00AM".
+
+        :param fuzzy_with_tokens:
+            If ``True``, ``fuzzy`` is automatically set to True, and the parser
+            will return a tuple where the first element is the parsed
+            :class:`datetime.datetime` datetimestamp and the second element is
+            a tuple containing the portions of the string which were ignored:
+
+            .. doctest::
+
+                >>> from dateutil.parser import parse
+                >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
+                (datetime.datetime(2011, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
+
+        """
         if fuzzy_with_tokens:
             fuzzy = True
 
         info = self.info
+
         if dayfirst is None:
             dayfirst = info.dayfirst
+
         if yearfirst is None:
             yearfirst = info.yearfirst
-        res = self._result()
-        l = _timelex.split(timestr)
 
+        res = self._result()
+        l = _timelex.split(timestr)         # Splits the timestr into tokens
 
         # keep up with the last token skipped so we can recombine
         # consecutively skipped tokens (-2 for when i begins at 0).
@@ -372,7 +666,6 @@ class parser(object):
         skipped_tokens = list()
 
         try:
-
             # year/month/day list
             ymd = []
 
@@ -394,17 +687,21 @@ class parser(object):
                     # Token is a number
                     len_li = len(l[i])
                     i += 1
+
                     if (len(ymd) == 3 and len_li in (2, 4)
-                        and (i >= len_l or (l[i] != ':' and
-                                            info.hms(l[i]) is None))):
+                        and res.hour is None and (i >= len_l or (l[i] != ':' and
+                                                  info.hms(l[i]) is None))):
                         # 19990101T23[59]
                         s = l[i-1]
                         res.hour = int(s[:2])
+
                         if len_li == 4:
                             res.minute = int(s[2:])
+
                     elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
                         # YYMMDD or HHMMSS[.ss]
                         s = l[i-1]
+
                         if not ymd and l[i-1].find('.') == -1:
                             ymd.append(info.convertyear(int(s[:2])))
                             ymd.append(int(s[2:4]))
@@ -414,12 +711,14 @@ class parser(object):
                             res.hour = int(s[:2])
                             res.minute = int(s[2:4])
                             res.second, res.microsecond = _parsems(s[4:])
+
                     elif len_li == 8:
                         # YYYYMMDD
                         s = l[i-1]
                         ymd.append(int(s[:4]))
                         ymd.append(int(s[4:6]))
                         ymd.append(int(s[6:]))
+
                     elif len_li in (12, 14):
                         # YYYYMMDDhhmm[ss]
                         s = l[i-1]
@@ -428,30 +727,42 @@ class parser(object):
                         ymd.append(int(s[6:8]))
                         res.hour = int(s[8:10])
                         res.minute = int(s[10:12])
+
                         if len_li == 14:
                             res.second = int(s[12:])
+
                     elif ((i < len_l and info.hms(l[i]) is not None) or
                           (i+1 < len_l and l[i] == ' ' and
                            info.hms(l[i+1]) is not None)):
+
                         # HH[ ]h or MM[ ]m or SS[.ss][ ]s
                         if l[i] == ' ':
                             i += 1
+
                         idx = info.hms(l[i])
+
                         while True:
                             if idx == 0:
                                 res.hour = int(value)
-                                if value%1:
-                                    res.minute = int(60*(value%1))
+
+                                if value % 1:
+                                    res.minute = int(60*(value % 1))
+
                             elif idx == 1:
                                 res.minute = int(value)
-                                if value%1:
-                                    res.second = int(60*(value%1))
+
+                                if value % 1:
+                                    res.second = int(60*(value % 1))
+
                             elif idx == 2:
                                 res.second, res.microsecond = \
                                     _parsems(value_repr)
+
                             i += 1
+
                             if i >= len_l or idx == 2:
                                 break
+
                             # 12h00
                             try:
                                 value_repr = l[i]
@@ -461,37 +772,49 @@ class parser(object):
                             else:
                                 i += 1
                                 idx += 1
+
                                 if i < len_l:
                                     newidx = info.hms(l[i])
+
                                     if newidx is not None:
                                         idx = newidx
-                    elif i == len_l and l[i-2] == ' ' and info.hms(l[i-3]) is not None:
+
+                    elif (i == len_l and l[i-2] == ' ' and
+                          info.hms(l[i-3]) is not None):
                         # X h MM or X m SS
                         idx = info.hms(l[i-3]) + 1
+
                         if idx == 1:
                             res.minute = int(value)
-                            if value%1:
-                                res.second = int(60*(value%1))
+
+                            if value % 1:
+                                res.second = int(60*(value % 1))
                             elif idx == 2:
                                 res.second, res.microsecond = \
-                                        _parsems(value_repr)
+                                    _parsems(value_repr)
                                 i += 1
+
                     elif i+1 < len_l and l[i] == ':':
                         # HH:MM[:SS[.ss]]
                         res.hour = int(value)
                         i += 1
                         value = float(l[i])
                         res.minute = int(value)
-                        if value%1:
-                            res.second = int(60*(value%1))
+
+                        if value % 1:
+                            res.second = int(60*(value % 1))
+
                         i += 1
+
                         if i < len_l and l[i] == ':':
                             res.second, res.microsecond = _parsems(l[i+1])
                             i += 2
+
                     elif i < len_l and l[i] in ('-', '/', '.'):
                         sep = l[i]
                         ymd.append(int(value))
                         i += 1
+
                         if i < len_l and not info.jump(l[i]):
                             try:
                                 # 01-01[-01]
@@ -499,47 +822,57 @@ class parser(object):
                             except ValueError:
                                 # 01-Jan[-01]
                                 value = info.month(l[i])
+
                                 if value is not None:
                                     ymd.append(value)
                                     assert mstridx == -1
                                     mstridx = len(ymd)-1
                                 else:
-                                    return None
+                                    return None, None
+
                             i += 1
+
                             if i < len_l and l[i] == sep:
                                 # We have three members
                                 i += 1
                                 value = info.month(l[i])
+
                                 if value is not None:
                                     ymd.append(value)
                                     mstridx = len(ymd)-1
                                     assert mstridx == -1
                                 else:
                                     ymd.append(int(l[i]))
+
                                 i += 1
                     elif i >= len_l or info.jump(l[i]):
                         if i+1 < len_l and info.ampm(l[i+1]) is not None:
                             # 12 am
                             res.hour = int(value)
+
                             if res.hour < 12 and info.ampm(l[i+1]) == 1:
                                 res.hour += 12
                             elif res.hour == 12 and info.ampm(l[i+1]) == 0:
                                 res.hour = 0
+
                             i += 1
                         else:
                             # Year, month or day
                             ymd.append(int(value))
                         i += 1
                     elif info.ampm(l[i]) is not None:
+
                         # 12am
                         res.hour = int(value)
+
                         if res.hour < 12 and info.ampm(l[i]) == 1:
                             res.hour += 12
                         elif res.hour == 12 and info.ampm(l[i]) == 0:
                             res.hour = 0
                         i += 1
+
                     elif not fuzzy:
-                        return None
+                        return None, None
                     else:
                         i += 1
                     continue
@@ -557,6 +890,7 @@ class parser(object):
                     ymd.append(value)
                     assert mstridx == -1
                     mstridx = len(ymd)-1
+
                     i += 1
                     if i < len_l:
                         if l[i] in ('-', '/'):
@@ -565,11 +899,13 @@ class parser(object):
                             i += 1
                             ymd.append(int(l[i]))
                             i += 1
+
                             if i < len_l and l[i] == sep:
                                 # Jan-01-99
                                 i += 1
                                 ymd.append(int(l[i]))
                                 i += 1
+
                         elif (i+3 < len_l and l[i] == l[i+2] == ' '
                               and info.pertain(l[i+1])):
                             # Jan of 01
@@ -588,17 +924,47 @@ class parser(object):
                 # Check am/pm
                 value = info.ampm(l[i])
                 if value is not None:
-                    if value == 1 and res.hour < 12:
-                        res.hour += 12
-                    elif value == 0 and res.hour == 12:
-                        res.hour = 0
+                    # For fuzzy parsing, 'a' or 'am' (both valid English words)
+                    # may erroneously trigger the AM/PM flag. Deal with that
+                    # here.
+                    val_is_ampm = True
+
+                    # If there's already an AM/PM flag, this one isn't one.
+                    if fuzzy and res.ampm is not None:
+                        val_is_ampm = False
+
+                    # If AM/PM is found and hour is not, raise a ValueError
+                    if res.hour is None:
+                        if fuzzy:
+                            val_is_ampm = False
+                        else:
+                            raise ValueError('No hour specified with ' +
+                                             'AM or PM flag.')
+                    elif not 0 <= res.hour <= 12:
+                        # If AM/PM is found, it's a 12 hour clock, so raise 
+                        # an error for invalid range
+                        if fuzzy:
+                            val_is_ampm = False
+                        else:
+                            raise ValueError('Invalid hour specified for ' +
+                                             '12-hour clock.')
+
+                    if val_is_ampm:
+                        if value == 1 and res.hour < 12:
+                            res.hour += 12
+                        elif value == 0 and res.hour == 12:
+                            res.hour = 0
+
+                        res.ampm = value
+
                     i += 1
                     continue
 
                 # Check for a timezone name
                 if (res.hour is not None and len(l[i]) <= 5 and
-                    res.tzname is None and res.tzoffset is None and
-                    not [x for x in l[i] if x not in string.ascii_uppercase]):
+                        res.tzname is None and res.tzoffset is None and
+                        not [x for x in l[i] if x not in
+                             string.ascii_uppercase]):
                     res.tzname = l[i]
                     res.tzoffset = info.tzoffset(res.tzname)
                     i += 1
@@ -623,6 +989,7 @@ class parser(object):
                     signal = (-1, 1)[l[i] == '+']
                     i += 1
                     len_li = len(l[i])
+
                     if len_li == 4:
                         # -0300
                         res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
@@ -634,8 +1001,9 @@ class parser(object):
                         # -[0]3
                         res.tzoffset = int(l[i][:2])*3600
                     else:
-                        return None
+                        return None, None
                     i += 1
+
                     res.tzoffset *= signal
 
                     # Look for a timezone name between parenthesis
@@ -643,7 +1011,7 @@ class parser(object):
                         info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
                         3 <= len(l[i+2]) <= 5 and
                         not [x for x in l[i+2]
-                                if x not in string.ascii_uppercase]):
+                             if x not in string.ascii_uppercase]):
                         # -0300 (BRST)
                         res.tzname = l[i+2]
                         i += 4
@@ -651,7 +1019,7 @@ class parser(object):
 
                 # Check jumps
                 if not (info.jump(l[i]) or fuzzy):
-                    return None
+                    return None, None
 
                 if last_skipped_token_i == i - 1:
                     # recombine the tokens
@@ -666,17 +1034,19 @@ class parser(object):
             len_ymd = len(ymd)
             if len_ymd > 3:
                 # More than three members!?
-                return None
+                return None, None
             elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
                 # One member, or two members with a month string
                 if mstridx != -1:
                     res.month = ymd[mstridx]
                     del ymd[mstridx]
+
                 if len_ymd > 1 or mstridx == -1:
                     if ymd[0] > 31:
                         res.year = ymd[0]
                     else:
                         res.day = ymd[0]
+
             elif len_ymd == 2:
                 # Two members with numbers
                 if ymd[0] > 31:
@@ -691,7 +1061,8 @@ class parser(object):
                 else:
                     # 01-13
                     res.month, res.day = ymd
-            if len_ymd == 3:
+
+            elif len_ymd == 3:
                 # Three members
                 if mstridx == 0:
                     res.month, res.day, res.year = ymd
@@ -704,6 +1075,7 @@ class parser(object):
                         # Give precendence to day-first, since
                         # two-digit years is usually hand-written.
                         res.day, res.month, res.year = ymd
+
                 elif mstridx == 2:
                     # WTF!?
                     if ymd[1] > 31:
@@ -712,6 +1084,7 @@ class parser(object):
                     else:
                         # 99-01-Jan
                         res.year, res.day, res.month = ymd
+
                 else:
                     if ymd[0] > 31 or \
                        (yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
@@ -725,23 +1098,114 @@ class parser(object):
                         res.month, res.day, res.year = ymd
 
         except (IndexError, ValueError, AssertionError):
-            return None
+            return None, None
 
         if not info.validate(res):
-            return None
+            return None, None
 
         if fuzzy_with_tokens:
             return res, tuple(skipped_tokens)
-
-        return res, None
+        else:
+            return res, None
 
 DEFAULTPARSER = parser()
+
+
 def parse(timestr, parserinfo=None, **kwargs):
-    # Python 2.x support: datetimes return their string presentation as
-    # bytes in 2.x and unicode in 3.x, so it's reasonable to expect that
-    # the parser will get both kinds. Internally we use unicode only.
-    if isinstance(timestr, binary_type):
-        timestr = timestr.decode()
+    """
+
+    Parse a string in one of the supported formats, using the
+    ``parserinfo`` parameters.
+
+    :param timestr:
+        A string containing a date/time stamp.
+
+    :param parserinfo:
+        A :class:`parserinfo` object containing parameters for the parser.
+        If ``None``, the default arguments to the :class:`parserinfo`
+        constructor are used.
+
+    The ``**kwargs`` parameter takes the following keyword arguments:
+
+    :param default:
+        The default datetime object, if this is a datetime object and not
+        ``None``, elements specified in ``timestr`` replace elements in the
+        default object.
+
+    :param ignoretz:
+        If set ``True``, time zones in parsed strings are ignored and a naive
+        :class:`datetime` object is returned.
+
+    :param tzinfos:
+            Additional time zone names / aliases which may be present in the
+            string. This argument maps time zone names (and optionally offsets
+            from those time zones) to time zones. This parameter can be a
+            dictionary with timezone aliases mapping time zone names to time
+            zones or a function taking two parameters (``tzname`` and
+            ``tzoffset``) and returning a time zone.
+
+            The timezones to which the names are mapped can be an integer
+            offset from UTC in minutes or a :class:`tzinfo` object.
+
+            .. doctest::
+               :options: +NORMALIZE_WHITESPACE
+
+                >>> from dateutil.parser import parse
+                >>> from dateutil.tz import gettz
+                >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
+                >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
+                datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
+                >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
+                datetime.datetime(2012, 1, 19, 17, 21,
+                                  tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
+
+            This parameter is ignored if ``ignoretz`` is set.
+
+    :param dayfirst:
+        Whether to interpret the first value in an ambiguous 3-integer date
+        (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+        ``yearfirst`` is set to ``True``, this distinguishes between YDM and
+        YMD. If set to ``None``, this value is retrieved from the current
+        :class:`parserinfo` object (which itself defaults to ``False``).
+
+    :param yearfirst:
+        Whether to interpret the first value in an ambiguous 3-integer date
+        (e.g. 01/05/09) as the year. If ``True``, the first number is taken to
+        be the year, otherwise the last number is taken to be the year. If
+        this is set to ``None``, the value is retrieved from the current
+        :class:`parserinfo` object (which itself defaults to ``False``).
+
+    :param fuzzy:
+        Whether to allow fuzzy parsing, allowing for string like "Today is
+        January 1, 2047 at 8:21:00AM".
+
+    :param fuzzy_with_tokens:
+        If ``True``, ``fuzzy`` is automatically set to True, and the parser
+        will return a tuple where the first element is the parsed
+        :class:`datetime.datetime` datetimestamp and the second element is
+        a tuple containing the portions of the string which were ignored:
+
+        .. doctest::
+
+            >>> from dateutil.parser import parse
+            >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
+            (datetime.datetime(2011, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
+
+    :return:
+        Returns a :class:`datetime.datetime` object or, if the
+        ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
+        first element being a :class:`datetime.datetime` object, the second
+        a tuple containing the fuzzy tokens.
+
+    :raises ValueError:
+        Raised for invalid or unknown string format, if the provided
+        :class:`tzinfo` is not in a valid format, or if an invalid date
+        would be created.
+
+    :raises OverFlowError:
+        Raised if the parsed date exceeds the largest valid C integer on
+        your system.
+    """
     if parserinfo:
         return parser(parserinfo).parse(timestr, **kwargs)
     else:
@@ -779,7 +1243,7 @@ class _tzparser(object):
                 # BRST+3[BRDT[+2]]
                 j = i
                 while j < len_l and not [x for x in l[j]
-                                            if x in "0123456789:,-+"]:
+                                         if x in "0123456789:,-+"]:
                     j += 1
                 if j != i:
                     if not res.stdabbr:
@@ -789,8 +1253,8 @@ class _tzparser(object):
                         offattr = "dstoffset"
                         res.dstabbr = "".join(l[i:j])
                     i = j
-                    if (i < len_l and
-                        (l[i] in ('+', '-') or l[i][0] in "0123456789")):
+                    if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
+                                       "0123456789")):
                         if l[i] in ('+', '-'):
                             # Yes, that's right.  See the TZ variable
                             # documentation.
@@ -801,8 +1265,8 @@ class _tzparser(object):
                         len_li = len(l[i])
                         if len_li == 4:
                             # -0300
-                            setattr(res, offattr,
-                                    (int(l[i][:2])*3600+int(l[i][2:])*60)*signal)
+                            setattr(res, offattr, (int(l[i][:2])*3600 +
+                                                   int(l[i][2:])*60)*signal)
                         elif i+1 < len_l and l[i+1] == ':':
                             # -03:00
                             setattr(res, offattr,
@@ -822,7 +1286,8 @@ class _tzparser(object):
 
             if i < len_l:
                 for j in range(i, len_l):
-                    if l[j] == ';': l[j] = ','
+                    if l[j] == ';':
+                        l[j] = ','
 
                 assert l[i] == ','
 
@@ -831,7 +1296,7 @@ class _tzparser(object):
             if i >= len_l:
                 pass
             elif (8 <= l.count(',') <= 9 and
-                not [y for x in l[i:] if x != ','
+                  not [y for x in l[i:] if x != ','
                        for y in x if y not in "0123456789"]):
                 # GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
                 for x in (res.start, res.end):
@@ -845,7 +1310,7 @@ class _tzparser(object):
                     i += 2
                     if value:
                         x.week = value
-                        x.weekday = (int(l[i])-1)%7
+                        x.weekday = (int(l[i])-1) % 7
                     else:
                         x.day = int(l[i])
                     i += 2
@@ -861,7 +1326,7 @@ class _tzparser(object):
             elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
                   not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
                                                      '.', '-', ':')
-                         for y in x if y not in "0123456789"]):
+                       for y in x if y not in "0123456789"]):
                 for x in (res.start, res.end):
                     if l[i] == 'J':
                         # non-leap year day (1 based)
@@ -880,7 +1345,7 @@ class _tzparser(object):
                         i += 1
                         assert l[i] in ('-', '.')
                         i += 1
-                        x.weekday = (int(l[i])-1)%7
+                        x.weekday = (int(l[i])-1) % 7
                     else:
                         # year day (zero based)
                         x.yday = int(l[i])+1
@@ -921,6 +1386,8 @@ class _tzparser(object):
 
 
 DEFAULTTZPARSER = _tzparser()
+
+
 def _parsetz(tzstr):
     return DEFAULTTZPARSER.parse(tzstr)
 
diff --git a/lib/dateutil/relativedelta.py b/lib/dateutil/relativedelta.py
index 4393bcbcde2252c338ff0cdc3937b2cd6ec2d47d..84d5f834902bd18e39ba3d2052617fd9308c0936 100644
--- a/lib/dateutil/relativedelta.py
+++ b/lib/dateutil/relativedelta.py
@@ -1,11 +1,4 @@
-"""
-Copyright (c) 2003-2010  Gustavo Niemeyer <gustavo@niemeyer.net>
-
-This module offers extensions to the standard Python
-datetime module.
-"""
-__license__ = "Simplified BSD"
-
+# -*- coding: utf-8 -*-
 import datetime
 import calendar
 
@@ -13,6 +6,7 @@ from six import integer_types
 
 __all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
 
+
 class weekday(object):
     __slots__ = ["weekday", "n"]
 
@@ -43,25 +37,35 @@ class weekday(object):
 
 MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
 
+
 class relativedelta(object):
     """
-The relativedelta type is based on the specification of the excelent
-work done by M.-A. Lemburg in his mx.DateTime extension. However,
-notice that this type does *NOT* implement the same algorithm as
+The relativedelta type is based on the specification of the excellent
+work done by M.-A. Lemburg in his
+`mx.DateTime <http://www.egenix.com/files/python/mxDateTime.html>`_ extension.
+However, notice that this type does *NOT* implement the same algorithm as
 his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
 
-There's two different ways to build a relativedelta instance. The
-first one is passing it two date/datetime classes:
+There are two different ways to build a relativedelta instance. The
+first one is passing it two date/datetime classes::
 
     relativedelta(datetime1, datetime2)
 
-And the other way is to use the following keyword arguments:
+The second one is passing it any number of the following keyword arguments::
+
+    relativedelta(arg1=x,arg2=y,arg3=z...)
 
     year, month, day, hour, minute, second, microsecond:
-        Absolute information.
+        Absolute information (argument is singular); adding or subtracting a
+        relativedelta with absolute information does not perform an aritmetic
+        operation, but rather REPLACES the corresponding value in the
+        original datetime with the value(s) in relativedelta.
 
     years, months, weeks, days, hours, minutes, seconds, microseconds:
-        Relative information, may be negative.
+        Relative information, may be negative (argument is plural); adding
+        or subtracting a relativedelta with relative information performs
+        the corresponding aritmetic operation on the original datetime value
+        with the information in the relativedelta.  
 
     weekday:
         One of the weekday instances (MO, TU, etc). These instances may
@@ -80,26 +84,26 @@ And the other way is to use the following keyword arguments:
 
 Here is the behavior of operations with relativedelta:
 
-1) Calculate the absolute year, using the 'year' argument, or the
+1. Calculate the absolute year, using the 'year' argument, or the
    original datetime year, if the argument is not present.
 
-2) Add the relative 'years' argument to the absolute year.
+2. Add the relative 'years' argument to the absolute year.
 
-3) Do steps 1 and 2 for month/months.
+3. Do steps 1 and 2 for month/months.
 
-4) Calculate the absolute day, using the 'day' argument, or the
+4. Calculate the absolute day, using the 'day' argument, or the
    original datetime day, if the argument is not present. Then,
    subtract from the day until it fits in the year and month
    found after their operations.
 
-5) Add the relative 'days' argument to the absolute day. Notice
+5. Add the relative 'days' argument to the absolute day. Notice
    that the 'weeks' argument is multiplied by 7 and added to
    'days'.
 
-6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
+6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
    microsecond/microseconds.
 
-7) If the 'weekday' argument is present, calculate the weekday,
+7. If the 'weekday' argument is present, calculate the weekday,
    with the given (wday, nth) tuple. wday is the index of the
    weekday (0-6, 0=Mon), and nth is the number of weeks to add
    forward or backward, depending on its signal. Notice that if
@@ -114,9 +118,14 @@ Here is the behavior of operations with relativedelta:
                  yearday=None, nlyearday=None,
                  hour=None, minute=None, second=None, microsecond=None):
         if dt1 and dt2:
-            if (not isinstance(dt1, datetime.date)) or (not isinstance(dt2, datetime.date)):
+            # datetime is a subclass of date. So both must be date
+            if not (isinstance(dt1, datetime.date) and
+                    isinstance(dt2, datetime.date)):
                 raise TypeError("relativedelta only diffs datetime/date")
-            if not type(dt1) == type(dt2): #isinstance(dt1, type(dt2)):
+            # We allow two dates, or two datetimes, so we coerce them to be
+            # of the same type
+            if (isinstance(dt1, datetime.datetime) !=
+                    isinstance(dt2, datetime.datetime)):
                 if not isinstance(dt1, datetime.datetime):
                     dt1 = datetime.datetime.fromordinal(dt1.toordinal())
                 elif not isinstance(dt2, datetime.datetime):
@@ -158,7 +167,7 @@ Here is the behavior of operations with relativedelta:
         else:
             self.years = years
             self.months = months
-            self.days = days+weeks*7
+            self.days = days + weeks * 7
             self.leapdays = leapdays
             self.hours = hours
             self.minutes = minutes
@@ -185,7 +194,8 @@ Here is the behavior of operations with relativedelta:
                 if yearday > 59:
                     self.leapdays = -1
             if yday:
-                ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366]
+                ydayidx = [31, 59, 90, 120, 151, 181, 212,
+                           243, 273, 304, 334, 366]
                 for idx, ydays in enumerate(ydayidx):
                     if yday <= ydays:
                         self.month = idx+1
@@ -225,13 +235,20 @@ Here is the behavior of operations with relativedelta:
             div, mod = divmod(self.months*s, 12)
             self.months = mod*s
             self.years += div*s
-        if (self.hours or self.minutes or self.seconds or self.microseconds or
-            self.hour is not None or self.minute is not None or
-            self.second is not None or self.microsecond is not None):
+        if (self.hours or self.minutes or self.seconds or self.microseconds
+                or self.hour is not None or self.minute is not None or
+                self.second is not None or self.microsecond is not None):
             self._has_time = 1
         else:
             self._has_time = 0
 
+    @property
+    def weeks(self):
+        return self.days // 7
+    @weeks.setter
+    def weeks(self, value):
+        self.days = self.days - (self.weeks * 7) + value*7
+
     def _set_months(self, months):
         self.months = months
         if abs(self.months) > 11:
@@ -244,22 +261,24 @@ Here is the behavior of operations with relativedelta:
 
     def __add__(self, other):
         if isinstance(other, relativedelta):
-            return relativedelta(years=other.years+self.years,
-                             months=other.months+self.months,
-                             days=other.days+self.days,
-                             hours=other.hours+self.hours,
-                             minutes=other.minutes+self.minutes,
-                             seconds=other.seconds+self.seconds,
-                             microseconds=other.microseconds+self.microseconds,
-                             leapdays=other.leapdays or self.leapdays,
-                             year=other.year or self.year,
-                             month=other.month or self.month,
-                             day=other.day or self.day,
-                             weekday=other.weekday or self.weekday,
-                             hour=other.hour or self.hour,
-                             minute=other.minute or self.minute,
-                             second=other.second or self.second,
-                             microsecond=other.microsecond or self.microsecond)
+            return self.__class__(years=other.years+self.years,
+                                 months=other.months+self.months,
+                                 days=other.days+self.days,
+                                 hours=other.hours+self.hours,
+                                 minutes=other.minutes+self.minutes,
+                                 seconds=other.seconds+self.seconds,
+                                 microseconds=(other.microseconds +
+                                               self.microseconds),
+                                 leapdays=other.leapdays or self.leapdays,
+                                 year=other.year or self.year,
+                                 month=other.month or self.month,
+                                 day=other.day or self.day,
+                                 weekday=other.weekday or self.weekday,
+                                 hour=other.hour or self.hour,
+                                 minute=other.minute or self.minute,
+                                 second=other.second or self.second,
+                                 microsecond=(other.microsecond or
+                                              self.microsecond))
         if not isinstance(other, datetime.date):
             raise TypeError("unsupported type for add operation")
         elif self._has_time and not isinstance(other, datetime.datetime):
@@ -295,9 +314,9 @@ Here is the behavior of operations with relativedelta:
             weekday, nth = self.weekday.weekday, self.weekday.n or 1
             jumpdays = (abs(nth)-1)*7
             if nth > 0:
-                jumpdays += (7-ret.weekday()+weekday)%7
+                jumpdays += (7-ret.weekday()+weekday) % 7
             else:
-                jumpdays += (ret.weekday()-weekday)%7
+                jumpdays += (ret.weekday()-weekday) % 7
                 jumpdays *= -1
             ret += datetime.timedelta(days=jumpdays)
         return ret
@@ -311,7 +330,7 @@ Here is the behavior of operations with relativedelta:
     def __sub__(self, other):
         if not isinstance(other, relativedelta):
             raise TypeError("unsupported type for sub operation")
-        return relativedelta(years=self.years-other.years,
+        return self.__class__(years=self.years-other.years,
                              months=self.months-other.months,
                              days=self.days-other.days,
                              hours=self.hours-other.hours,
@@ -329,7 +348,7 @@ Here is the behavior of operations with relativedelta:
                              microsecond=self.microsecond or other.microsecond)
 
     def __neg__(self):
-        return relativedelta(years=-self.years,
+        return self.__class__(years=-self.years,
                              months=-self.months,
                              days=-self.days,
                              hours=-self.hours,
@@ -363,10 +382,12 @@ Here is the behavior of operations with relativedelta:
                     self.minute is None and
                     self.second is None and
                     self.microsecond is None)
+    # Compatibility with Python 2.x
+    __nonzero__ = __bool__
 
     def __mul__(self, other):
         f = float(other)
-        return relativedelta(years=int(self.years*f),
+        return self.__class__(years=int(self.years*f),
                              months=int(self.months*f),
                              days=int(self.days*f),
                              hours=int(self.hours*f),
@@ -402,6 +423,7 @@ Here is the behavior of operations with relativedelta:
                 self.hours == other.hours and
                 self.minutes == other.minutes and
                 self.seconds == other.seconds and
+                self.microseconds == other.microseconds and
                 self.leapdays == other.leapdays and
                 self.year == other.year and
                 self.month == other.month and
diff --git a/lib/dateutil/rrule.py b/lib/dateutil/rrule.py
index ad4d3ba70c4e2b94dd7e2ce2b9eb852d5134e5af..4b92b68e019b93bf48dc53bc1484b133630c05e6 100644
--- a/lib/dateutil/rrule.py
+++ b/lib/dateutil/rrule.py
@@ -1,21 +1,22 @@
+# -*- coding: utf-8 -*-
 """
-Copyright (c) 2003-2010  Gustavo Niemeyer <gustavo@niemeyer.net>
-
-This module offers extensions to the standard Python
-datetime module.
+The rrule module offers a small, complete, and very fast, implementation of
+the recurrence rules documented in the
+`iCalendar RFC <http://www.ietf.org/rfc/rfc2445.txt>`_,
+including support for caching of results.
 """
-__license__ = "Simplified BSD"
-
 import itertools
 import datetime
 import calendar
+import sys
+
 try:
-    import _thread
+    from math import gcd
 except ImportError:
-    import thread as _thread
-import sys
+    from fractions import gcd
 
 from six import advance_iterator, integer_types
+from six.moves import _thread
 
 __all__ = ["rrule", "rruleset", "rrulestr",
            "YEARLY", "MONTHLY", "WEEKLY", "DAILY",
@@ -23,7 +24,7 @@ __all__ = ["rrule", "rruleset", "rrulestr",
            "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
 
 # Every mask is 7 days longer to handle cross-year weekly periods.
-M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
+M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
                  [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
 M365MASK = list(M366MASK)
 M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
@@ -39,6 +40,8 @@ del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
 MDAY365MASK = tuple(MDAY365MASK)
 M365MASK = tuple(M365MASK)
 
+FREQNAMES = ['YEARLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY','SECONDLY']
+
 (YEARLY,
  MONTHLY,
  WEEKLY,
@@ -51,6 +54,7 @@ M365MASK = tuple(M365MASK)
 easter = None
 parser = None
 
+
 class weekday(object):
     __slots__ = ["weekday", "n"]
 
@@ -83,12 +87,13 @@ class weekday(object):
 
 MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
 
+
 class rrulebase(object):
     def __init__(self, cache=False):
         if cache:
             self._cache = []
             self._cache_lock = _thread.allocate_lock()
-            self._cache_gen  = self._iter()
+            self._cache_gen = self._iter()
             self._cache_complete = False
         else:
             self._cache = None
@@ -163,11 +168,17 @@ class rrulebase(object):
 
     # __len__() introduces a large performance penality.
     def count(self):
+        """ Returns the number of recurrences in this set. It will have go
+            trough the whole recurrence, if this hasn't been done before. """
         if self._len is None:
-            for x in self: pass
+            for x in self:
+                pass
         return self._len
 
     def before(self, dt, inc=False):
+        """ Returns the last recurrence before the given datetime instance. The
+            inc keyword defines what happens if dt is an occurrence. With
+            inc=True, if dt itself is an occurrence, it will be returned. """
         if self._cache_complete:
             gen = self._cache
         else:
@@ -186,6 +197,9 @@ class rrulebase(object):
         return last
 
     def after(self, dt, inc=False):
+        """ Returns the first recurrence after the given datetime instance. The
+            inc keyword defines what happens if dt is an occurrence. With
+            inc=True, if dt itself is an occurrence, it will be returned.  """
         if self._cache_complete:
             gen = self._cache
         else:
@@ -200,7 +214,52 @@ class rrulebase(object):
                     return i
         return None
 
-    def between(self, after, before, inc=False):
+    def xafter(self, dt, count=None, inc=False):
+        """
+        Generator which yields up to `count` recurrences after the given
+        datetime instance, equivalent to `after`.
+
+        :param dt:
+            The datetime at which to start generating recurrences.
+
+        :param count:
+            The maximum number of recurrences to generate. If `None` (default),
+            dates are generated until the recurrence rule is exhausted.
+
+        :param inc:
+            If `dt` is an instance of the rule and `inc` is `True`, it is
+            included in the output.
+
+        :yields: Yields a sequence of `datetime` objects.
+        """
+
+        if self._cache_complete:
+            gen = self._cache
+        else:
+            gen = self
+
+        # Select the comparison function
+        if inc:
+            comp = lambda dc, dtc: dc >= dtc
+        else:
+            comp = lambda dc, dtc: dc > dtc
+
+        # Generate dates
+        n = 0
+        for d in gen:
+            if comp(d, dt):
+                yield d
+
+                if count is not None:
+                    n += 1
+                    if n >= count:
+                        break
+
+    def between(self, after, before, inc=False, count=1):
+        """ Returns all the occurrences of the rrule between after and before.
+        The inc keyword defines what happens if after and/or before are
+        themselves occurrences. With inc=True, they will be included in the
+        list, if they are found in the recurrence set. """
         if self._cache_complete:
             gen = self._cache
         else:
@@ -229,7 +288,93 @@ class rrulebase(object):
                     l.append(i)
         return l
 
+
 class rrule(rrulebase):
+    """
+    That's the base of the rrule operation. It accepts all the keywords
+    defined in the RFC as its constructor parameters (except byday,
+    which was renamed to byweekday) and more. The constructor prototype is::
+
+            rrule(freq)
+
+    Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
+    or SECONDLY.
+
+    Additionally, it supports the following keyword arguments:
+
+    :param cache:
+        If given, it must be a boolean value specifying to enable or disable
+        caching of results. If you will use the same rrule instance multiple
+        times, enabling caching will improve the performance considerably.
+    :param dtstart:
+        The recurrence start. Besides being the base for the recurrence,
+        missing parameters in the final recurrence instances will also be
+        extracted from this date. If not given, datetime.now() will be used
+        instead.
+    :param interval:
+        The interval between each freq iteration. For example, when using
+        YEARLY, an interval of 2 means once every two years, but with HOURLY,
+        it means once every two hours. The default interval is 1.
+    :param wkst:
+        The week start day. Must be one of the MO, TU, WE constants, or an
+        integer, specifying the first day of the week. This will affect
+        recurrences based on weekly periods. The default week start is got
+        from calendar.firstweekday(), and may be modified by
+        calendar.setfirstweekday().
+    :param count:
+        How many occurrences will be generated.
+    :param until:
+        If given, this must be a datetime instance, that will specify the
+        limit of the recurrence. If a recurrence instance happens to be the
+        same as the datetime instance given in the until keyword, this will
+        be the last occurrence.
+    :param bysetpos:
+        If given, it must be either an integer, or a sequence of integers,
+        positive or negative. Each given integer will specify an occurrence
+        number, corresponding to the nth occurrence of the rule inside the
+        frequency period. For example, a bysetpos of -1 if combined with a
+        MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
+        result in the last work day of every month.
+    :param bymonth:
+        If given, it must be either an integer, or a sequence of integers,
+        meaning the months to apply the recurrence to.
+    :param bymonthday:
+        If given, it must be either an integer, or a sequence of integers,
+        meaning the month days to apply the recurrence to.
+    :param byyearday:
+        If given, it must be either an integer, or a sequence of integers,
+        meaning the year days to apply the recurrence to.
+    :param byweekno:
+        If given, it must be either an integer, or a sequence of integers,
+        meaning the week numbers to apply the recurrence to. Week numbers
+        have the meaning described in ISO8601, that is, the first week of
+        the year is that containing at least four days of the new year.
+    :param byweekday:
+        If given, it must be either an integer (0 == MO), a sequence of
+        integers, one of the weekday constants (MO, TU, etc), or a sequence
+        of these constants. When given, these variables will define the
+        weekdays where the recurrence will be applied. It's also possible to
+        use an argument n for the weekday instances, which will mean the nth
+        occurrence of this weekday in the period. For example, with MONTHLY,
+        or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
+        first friday of the month where the recurrence happens. Notice that in
+        the RFC documentation, this is specified as BYDAY, but was renamed to
+        avoid the ambiguity of that keyword.
+    :param byhour:
+        If given, it must be either an integer, or a sequence of integers,
+        meaning the hours to apply the recurrence to.
+    :param byminute:
+        If given, it must be either an integer, or a sequence of integers,
+        meaning the minutes to apply the recurrence to.
+    :param bysecond:
+        If given, it must be either an integer, or a sequence of integers,
+        meaning the seconds to apply the recurrence to.
+    :param byeaster:
+        If given, it must be either an integer, or a sequence of integers,
+        positive or negative. Each integer will define an offset from the
+        Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
+        Sunday itself. This is an extension to the RFC specification.
+     """
     def __init__(self, freq, dtstart=None,
                  interval=1, wkst=None, count=None, until=None, bysetpos=None,
                  bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
@@ -249,15 +394,24 @@ class rrule(rrulebase):
         self._freq = freq
         self._interval = interval
         self._count = count
+
+        # Cache the original byxxx rules, if they are provided, as the _byxxx
+        # attributes do not necessarily map to the inputs, and this can be
+        # a problem in generating the strings. Only store things if they've
+        # been supplied (the string retrieval will just use .get())
+        self._original_rule = {}
+
         if until and not isinstance(until, datetime.datetime):
             until = datetime.datetime.fromordinal(until.toordinal())
         self._until = until
+
         if wkst is None:
             self._wkst = calendar.firstweekday()
         elif isinstance(wkst, integer_types):
             self._wkst = wkst
         else:
             self._wkst = wkst.weekday
+
         if bysetpos is None:
             self._bysetpos = None
         elif isinstance(bysetpos, integer_types):
@@ -271,30 +425,47 @@ class rrule(rrulebase):
                 if pos == 0 or not (-366 <= pos <= 366):
                     raise ValueError("bysetpos must be between 1 and 366, "
                                      "or between -366 and -1")
-        if not (byweekno or byyearday or bymonthday or
-                byweekday is not None or byeaster is not None):
+
+        if self._bysetpos:
+            self._original_rule['bysetpos'] = self._bysetpos
+
+        if (byweekno is None and byyearday is None and bymonthday is None and
+                byweekday is None and byeaster is None):
             if freq == YEARLY:
-                if not bymonth:
+                if bymonth is None:
                     bymonth = dtstart.month
+                    self._original_rule['bymonth'] = None
                 bymonthday = dtstart.day
+                self._original_rule['bymonthday'] = None
             elif freq == MONTHLY:
                 bymonthday = dtstart.day
+                self._original_rule['bymonthday'] = None
             elif freq == WEEKLY:
                 byweekday = dtstart.weekday()
+                self._original_rule['byweekday'] = None
+
         # bymonth
-        if not bymonth:
+        if bymonth is None:
             self._bymonth = None
-        elif isinstance(bymonth, integer_types):
-            self._bymonth = (bymonth,)
         else:
-            self._bymonth = tuple(bymonth)
+            if isinstance(bymonth, integer_types):
+                bymonth = (bymonth,)
+
+            self._bymonth = tuple(sorted(set(bymonth)))
+
+            if 'bymonth' not in self._original_rule:
+                self._original_rule['bymonth'] = self._bymonth
+
         # byyearday
-        if not byyearday:
+        if byyearday is None:
             self._byyearday = None
-        elif isinstance(byyearday, integer_types):
-            self._byyearday = (byyearday,)
         else:
-            self._byyearday = tuple(byyearday)
+            if isinstance(byyearday, integer_types):
+                byyearday = (byyearday,)
+
+            self._byyearday = tuple(sorted(set(byyearday)))
+            self._original_rule['byyearday'] = self._byyearday
+
         # byeaster
         if byeaster is not None:
             if not easter:
@@ -302,90 +473,144 @@ class rrule(rrulebase):
             if isinstance(byeaster, integer_types):
                 self._byeaster = (byeaster,)
             else:
-                self._byeaster = tuple(byeaster)
+                self._byeaster = tuple(sorted(byeaster))
+
+            self._original_rule['byeaster'] = self._byeaster
         else:
             self._byeaster = None
-        # bymonthay
-        if not bymonthday:
+
+        # bymonthday
+        if bymonthday is None:
             self._bymonthday = ()
             self._bynmonthday = ()
-        elif isinstance(bymonthday, integer_types):
-            if bymonthday < 0:
-                self._bynmonthday = (bymonthday,)
-                self._bymonthday = ()
-            else:
-                self._bymonthday = (bymonthday,)
-                self._bynmonthday = ()
         else:
-            self._bymonthday = tuple([x for x in bymonthday if x > 0])
-            self._bynmonthday = tuple([x for x in bymonthday if x < 0])
+            if isinstance(bymonthday, integer_types):
+                bymonthday = (bymonthday,)
+
+            bymonthday = set(bymonthday)            # Ensure it's unique
+
+            self._bymonthday = tuple(sorted([x for x in bymonthday if x > 0]))
+            self._bynmonthday = tuple(sorted([x for x in bymonthday if x < 0]))
+
+            # Storing positive numbers first, then negative numbers
+            if 'bymonthday' not in self._original_rule:
+                self._original_rule['bymonthday'] = tuple(
+                    itertools.chain(self._bymonthday, self._bynmonthday))
+
         # byweekno
         if byweekno is None:
             self._byweekno = None
-        elif isinstance(byweekno, integer_types):
-            self._byweekno = (byweekno,)
         else:
-            self._byweekno = tuple(byweekno)
+            if isinstance(byweekno, integer_types):
+                byweekno = (byweekno,)
+
+            self._byweekno = tuple(sorted(set(byweekno)))
+
+            self._original_rule['byweekno'] = self._byweekno
+
         # byweekday / bynweekday
         if byweekday is None:
             self._byweekday = None
             self._bynweekday = None
-        elif isinstance(byweekday, integer_types):
-            self._byweekday = (byweekday,)
-            self._bynweekday = None
-        elif hasattr(byweekday, "n"):
-            if not byweekday.n or freq > MONTHLY:
-                self._byweekday = (byweekday.weekday,)
-                self._bynweekday = None
-            else:
-                self._bynweekday = ((byweekday.weekday, byweekday.n),)
-                self._byweekday = None
         else:
-            self._byweekday = []
-            self._bynweekday = []
+            # If it's one of the valid non-sequence types, convert to a
+            # single-element sequence before the iterator that builds the
+            # byweekday set.
+            if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
+                byweekday = (byweekday,)
+
+            self._byweekday = set()
+            self._bynweekday = set()
             for wday in byweekday:
                 if isinstance(wday, integer_types):
-                    self._byweekday.append(wday)
+                    self._byweekday.add(wday)
                 elif not wday.n or freq > MONTHLY:
-                    self._byweekday.append(wday.weekday)
+                    self._byweekday.add(wday.weekday)
                 else:
-                    self._bynweekday.append((wday.weekday, wday.n))
-            self._byweekday = tuple(self._byweekday)
-            self._bynweekday = tuple(self._bynweekday)
+                    self._bynweekday.add((wday.weekday, wday.n))
+
             if not self._byweekday:
                 self._byweekday = None
             elif not self._bynweekday:
                 self._bynweekday = None
+
+            if self._byweekday is not None:
+                self._byweekday = tuple(sorted(self._byweekday))
+                orig_byweekday = [weekday(x) for x in self._byweekday]
+            else:
+                orig_byweekday = tuple()
+
+            if self._bynweekday is not None:
+                self._bynweekday = tuple(sorted(self._bynweekday))
+                orig_bynweekday = [weekday(*x) for x in self._bynweekday]
+            else:
+                orig_bynweekday = tuple()
+
+            if 'byweekday' not in self._original_rule:
+                self._original_rule['byweekday'] = tuple(itertools.chain(
+                    orig_byweekday, orig_bynweekday))
+
         # byhour
         if byhour is None:
             if freq < HOURLY:
-                self._byhour = (dtstart.hour,)
+                self._byhour = set((dtstart.hour,))
             else:
                 self._byhour = None
-        elif isinstance(byhour, integer_types):
-            self._byhour = (byhour,)
         else:
-            self._byhour = tuple(byhour)
+            if isinstance(byhour, integer_types):
+                byhour = (byhour,)
+
+            if freq == HOURLY:
+                self._byhour = self.__construct_byset(start=dtstart.hour,
+                                                      byxxx=byhour,
+                                                      base=24)
+            else:
+                self._byhour = set(byhour)
+
+            self._byhour = tuple(sorted(self._byhour))
+            self._original_rule['byhour'] = self._byhour
+
         # byminute
         if byminute is None:
             if freq < MINUTELY:
-                self._byminute = (dtstart.minute,)
+                self._byminute = set((dtstart.minute,))
             else:
                 self._byminute = None
-        elif isinstance(byminute, integer_types):
-            self._byminute = (byminute,)
         else:
-            self._byminute = tuple(byminute)
+            if isinstance(byminute, integer_types):
+                byminute = (byminute,)
+
+            if freq == MINUTELY:
+                self._byminute = self.__construct_byset(start=dtstart.minute,
+                                                        byxxx=byminute,
+                                                        base=60)
+            else:
+                self._byminute = set(byminute)
+
+            self._byminute = tuple(sorted(self._byminute))
+            self._original_rule['byminute'] = self._byminute
+
         # bysecond
         if bysecond is None:
             if freq < SECONDLY:
-                self._bysecond = (dtstart.second,)
+                self._bysecond = ((dtstart.second,))
             else:
                 self._bysecond = None
-        elif isinstance(bysecond, integer_types):
-            self._bysecond = (bysecond,)
         else:
-            self._bysecond = tuple(bysecond)
+            if isinstance(bysecond, integer_types):
+                bysecond = (bysecond,)
+
+            self._bysecond = set(bysecond)
+
+            if freq == SECONDLY:
+                self._bysecond = self.__construct_byset(start=dtstart.second,
+                                                        byxxx=bysecond,
+                                                        base=60)
+            else:
+                self._bysecond = set(bysecond)
+
+            self._bysecond = tuple(sorted(self._bysecond))
+            self._original_rule['bysecond'] = self._bysecond
 
         if self._freq >= HOURLY:
             self._timeset = None
@@ -395,11 +620,70 @@ class rrule(rrulebase):
                 for minute in self._byminute:
                     for second in self._bysecond:
                         self._timeset.append(
-                                datetime.time(hour, minute, second,
-                                                    tzinfo=self._tzinfo))
+                            datetime.time(hour, minute, second,
+                                          tzinfo=self._tzinfo))
             self._timeset.sort()
             self._timeset = tuple(self._timeset)
 
+    def __str__(self):
+        """
+        Output a string that would generate this RRULE if passed to rrulestr.
+        This is mostly compatible with RFC2445, except for the
+        dateutil-specific extension BYEASTER.
+        """
+
+        output = []
+        h, m, s = [None] * 3
+        if self._dtstart:
+            output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
+            h, m, s = self._dtstart.timetuple()[3:6]
+
+        parts = ['FREQ=' + FREQNAMES[self._freq]]
+        if self._interval != 1:
+            parts.append('INTERVAL=' + str(self._interval))
+
+        if self._wkst:
+            parts.append('WKST=' + str(self._wkst))
+
+        if self._count:
+            parts.append('COUNT=' + str(self._count))
+
+        if self._original_rule.get('byweekday') is not None:
+            # The str() method on weekday objects doesn't generate
+            # RFC2445-compliant strings, so we should modify that.
+            original_rule = dict(self._original_rule)
+            wday_strings = []
+            for wday in original_rule['byweekday']:
+                if wday.n:
+                    wday_strings.append('{n:+d}{wday}'.format(
+                        n=wday.n,
+                        wday=repr(wday)[0:2]))
+                else:
+                    wday_strings.append(repr(wday))
+
+            original_rule['byweekday'] = wday_strings
+        else:
+            original_rule = self._original_rule
+
+        partfmt = '{name}={vals}'
+        for name, key in [('BYSETPOS', 'bysetpos'),
+                          ('BYMONTH', 'bymonth'),
+                          ('BYMONTHDAY', 'bymonthday'),
+                          ('BYYEARDAY', 'byyearday'),
+                          ('BYWEEKNO', 'byweekno'),
+                          ('BYDAY', 'byweekday'),
+                          ('BYHOUR', 'byhour'),
+                          ('BYMINUTE', 'byminute'),
+                          ('BYSECOND', 'bysecond'),
+                          ('BYEASTER', 'byeaster')]:
+            value = original_rule.get(key)
+            if value:
+                parts.append(partfmt.format(name=name, vals=(','.join(str(v)
+                                                             for v in value))))
+
+        output.append(';'.join(parts))
+        return '\n'.join(output)
+
     def _iter(self):
         year, month, day, hour, minute, second, weekday, yearday, _ = \
             self._dtstart.timetuple()
@@ -424,20 +708,20 @@ class rrule(rrulebase):
         ii = _iterinfo(self)
         ii.rebuild(year, month)
 
-        getdayset = {YEARLY:ii.ydayset,
-                     MONTHLY:ii.mdayset,
-                     WEEKLY:ii.wdayset,
-                     DAILY:ii.ddayset,
-                     HOURLY:ii.ddayset,
-                     MINUTELY:ii.ddayset,
-                     SECONDLY:ii.ddayset}[freq]
-        
+        getdayset = {YEARLY: ii.ydayset,
+                     MONTHLY: ii.mdayset,
+                     WEEKLY: ii.wdayset,
+                     DAILY: ii.ddayset,
+                     HOURLY: ii.ddayset,
+                     MINUTELY: ii.ddayset,
+                     SECONDLY: ii.ddayset}[freq]
+
         if freq < HOURLY:
             timeset = self._timeset
         else:
-            gettimeset = {HOURLY:ii.htimeset,
-                          MINUTELY:ii.mtimeset,
-                          SECONDLY:ii.stimeset}[freq]
+            gettimeset = {HOURLY: ii.htimeset,
+                          MINUTELY: ii.mtimeset,
+                          SECONDLY: ii.stimeset}[freq]
             if ((freq >= HOURLY and
                  self._byhour and hour not in self._byhour) or
                 (freq >= MINUTELY and
@@ -466,11 +750,10 @@ class rrule(rrulebase):
                      ii.mdaymask[i] not in bymonthday and
                      ii.nmdaymask[i] not in bynmonthday) or
                     (byyearday and
-                     ((i < ii.yearlen and i+1 not in byyearday
-                                      and -ii.yearlen+i not in byyearday) or
-                      (i >= ii.yearlen and i+1-ii.yearlen not in byyearday
-                                       and -ii.nextyearlen+i-ii.yearlen
-                                           not in byyearday)))):
+                     ((i < ii.yearlen and i+1 not in byyearday and
+                       -ii.yearlen+i not in byyearday) or
+                      (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
+                       -ii.nextyearlen+i-ii.yearlen not in byyearday)))):
                     dayset[i] = None
                     filtered = True
 
@@ -484,7 +767,7 @@ class rrule(rrulebase):
                         daypos, timepos = divmod(pos-1, len(timeset))
                     try:
                         i = [x for x in dayset[start:end]
-                                if x is not None][daypos]
+                             if x is not None][daypos]
                         time = timeset[timepos]
                     except IndexError:
                         pass
@@ -509,7 +792,7 @@ class rrule(rrulebase):
             else:
                 for i in dayset[start:end]:
                     if i is not None:
-                        date = datetime.date.fromordinal(ii.yearordinal+i)
+                        date = datetime.date.fromordinal(ii.yearordinal + i)
                         for time in timeset:
                             res = datetime.datetime.combine(date, time)
                             if until and res > until:
@@ -559,60 +842,86 @@ class rrule(rrulebase):
                 if filtered:
                     # Jump to one iteration before next day
                     hour += ((23-hour)//interval)*interval
-                while True:
-                    hour += interval
-                    div, mod = divmod(hour, 24)
-                    if div:
-                        hour = mod
-                        day += div
-                        fixday = True
-                    if not byhour or hour in byhour:
-                        break
+
+                if byhour:
+                    ndays, hour = self.__mod_distance(value=hour,
+                                                      byxxx=self._byhour,
+                                                      base=24)
+                else:
+                    ndays, hour = divmod(hour+interval, 24)
+
+                if ndays:
+                    day += ndays
+                    fixday = True
+
                 timeset = gettimeset(hour, minute, second)
             elif freq == MINUTELY:
                 if filtered:
                     # Jump to one iteration before next day
                     minute += ((1439-(hour*60+minute))//interval)*interval
-                while True:
-                    minute += interval
-                    div, mod = divmod(minute, 60)
+
+                valid = False
+                rep_rate = (24*60)
+                for j in range(rep_rate // gcd(interval, rep_rate)):
+                    if byminute:
+                        nhours, minute = \
+                            self.__mod_distance(value=minute,
+                                                byxxx=self._byminute,
+                                                base=60)
+                    else:
+                        nhours, minute = divmod(minute+interval, 60)
+
+                    div, hour = divmod(hour+nhours, 24)
                     if div:
-                        minute = mod
-                        hour += div
-                        div, mod = divmod(hour, 24)
-                        if div:
-                            hour = mod
-                            day += div
-                            fixday = True
-                            filtered = False
-                    if ((not byhour or hour in byhour) and
-                        (not byminute or minute in byminute)):
+                        day += div
+                        fixday = True
+                        filtered = False
+
+                    if not byhour or hour in byhour:
+                        valid = True
                         break
+
+                if not valid:
+                    raise ValueError('Invalid combination of interval and ' +
+                                     'byhour resulting in empty rule.')
+
                 timeset = gettimeset(hour, minute, second)
             elif freq == SECONDLY:
                 if filtered:
                     # Jump to one iteration before next day
-                    second += (((86399-(hour*3600+minute*60+second))
-                                //interval)*interval)
-                while True:
-                    second += self._interval
-                    div, mod = divmod(second, 60)
+                    second += (((86399 - (hour * 3600 + minute * 60 + second))
+                                // interval) * interval)
+
+                rep_rate = (24 * 3600)
+                valid = False
+                for j in range(0, rep_rate // gcd(interval, rep_rate)):
+                    if bysecond:
+                        nminutes, second = \
+                            self.__mod_distance(value=second,
+                                                byxxx=self._bysecond,
+                                                base=60)
+                    else:
+                        nminutes, second = divmod(second+interval, 60)
+
+                    div, minute = divmod(minute+nminutes, 60)
                     if div:
-                        second = mod
-                        minute += div
-                        div, mod = divmod(minute, 60)
+                        hour += div
+                        div, hour = divmod(hour, 24)
                         if div:
-                            minute = mod
-                            hour += div
-                            div, mod = divmod(hour, 24)
-                            if div:
-                                hour = mod
-                                day += div
-                                fixday = True
+                            day += div
+                            fixday = True
+
                     if ((not byhour or hour in byhour) and
-                        (not byminute or minute in byminute) and
-                        (not bysecond or second in bysecond)):
+                            (not byminute or minute in byminute) and
+                            (not bysecond or second in bysecond)):
+                        valid = True
                         break
+
+                if not valid:
+                    raise ValueError('Invalid combination of interval, ' +
+                                     'byhour and byminute resulting in empty' +
+                                     ' rule.')
+
                 timeset = gettimeset(hour, minute, second)
 
             if fixday and day > 28:
@@ -630,6 +939,86 @@ class rrule(rrulebase):
                         daysinmonth = calendar.monthrange(year, month)[1]
                     ii.rebuild(year, month)
 
+    def __construct_byset(self, start, byxxx, base):
+        """
+        If a `BYXXX` sequence is passed to the constructor at the same level as
+        `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
+        specifications which cannot be reached given some starting conditions.
+
+        This occurs whenever the interval is not coprime with the base of a
+        given unit and the difference between the starting position and the
+        ending position is not coprime with the greatest common denominator
+        between the interval and the base. For example, with a FREQ of hourly
+        starting at 17:00 and an interval of 4, the only valid values for
+        BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
+        coprime.
+
+        :param start:
+            Specifies the starting position.
+        :param byxxx:
+            An iterable containing the list of allowed values.
+        :param base:
+            The largest allowable value for the specified frequency (e.g.
+            24 hours, 60 minutes).
+
+        This does not preserve the type of the iterable, returning a set, since
+        the values should be unique and the order is irrelevant, this will
+        speed up later lookups.
+
+        In the event of an empty set, raises a :exception:`ValueError`, as this
+        results in an empty rrule.
+        """
+
+        cset = set()
+
+        # Support a single byxxx value.
+        if isinstance(byxxx, integer_types):
+            byxxx = (byxxx, )
+
+        for num in byxxx:
+            i_gcd = gcd(self._interval, base)
+            # Use divmod rather than % because we need to wrap negative nums.
+            if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
+                cset.add(num)
+
+        if len(cset) == 0:
+            raise ValueError("Invalid rrule byxxx generates an empty set.")
+
+        return cset
+
+    def __mod_distance(self, value, byxxx, base):
+        """
+        Calculates the next value in a sequence where the `FREQ` parameter is
+        specified along with a `BYXXX` parameter at the same "level"
+        (e.g. `HOURLY` specified with `BYHOUR`).
+
+        :param value:
+            The old value of the component.
+        :param byxxx:
+            The `BYXXX` set, which should have been generated by
+            `rrule._construct_byset`, or something else which checks that a
+            valid rule is present.
+        :param base:
+            The largest allowable value for the specified frequency (e.g.
+            24 hours, 60 minutes).
+
+        If a valid value is not found after `base` iterations (the maximum
+        number before the sequence would start to repeat), this raises a
+        :exception:`ValueError`, as no valid values were found.
+
+        This returns a tuple of `divmod(n*interval, base)`, where `n` is the
+        smallest number of `interval` repetitions until the next specified
+        value in `byxxx` is found.
+        """
+        accumulator = 0
+        for ii in range(1, base + 1):
+            # Using divmod() over % to account for negative intervals
+            div, value = divmod(value + self._interval, base)
+            accumulator += div
+            if value in byxxx:
+                return (accumulator, value)
+
+
 class _iterinfo(object):
     __slots__ = ["rrule", "lastyear", "lastmonth",
                  "yearlen", "nextyearlen", "yearordinal", "yearweekday",
@@ -645,8 +1034,8 @@ class _iterinfo(object):
         # Every mask is 7 days longer to handle cross-year weekly periods.
         rr = self.rrule
         if year != self.lastyear:
-            self.yearlen = 365+calendar.isleap(year)
-            self.nextyearlen = 365+calendar.isleap(year+1)
+            self.yearlen = 365 + calendar.isleap(year)
+            self.nextyearlen = 365 + calendar.isleap(year + 1)
             firstyday = datetime.date(year, 1, 1)
             self.yearordinal = firstyday.toordinal()
             self.yearweekday = firstyday.weekday()
@@ -669,13 +1058,13 @@ class _iterinfo(object):
                 self.wnomask = None
             else:
                 self.wnomask = [0]*(self.yearlen+7)
-                #no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
-                no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
+                # no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
+                no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
                 if no1wkst >= 4:
                     no1wkst = 0
                     # Number of days in the year, plus the days we got
                     # from last year.
-                    wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
+                    wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
                 else:
                     # Number of days in the year, minus the days we
                     # left in last year.
@@ -721,22 +1110,22 @@ class _iterinfo(object):
                     # this year.
                     if -1 not in rr._byweekno:
                         lyearweekday = datetime.date(year-1, 1, 1).weekday()
-                        lno1wkst = (7-lyearweekday+rr._wkst)%7
+                        lno1wkst = (7-lyearweekday+rr._wkst) % 7
                         lyearlen = 365+calendar.isleap(year-1)
                         if lno1wkst >= 4:
                             lno1wkst = 0
-                            lnumweeks = 52+(lyearlen+
-                                           (lyearweekday-rr._wkst)%7)%7//4
+                            lnumweeks = 52+(lyearlen +
+                                            (lyearweekday-rr._wkst) % 7) % 7//4
                         else:
-                            lnumweeks = 52+(self.yearlen-no1wkst)%7//4
+                            lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
                     else:
                         lnumweeks = -1
                     if lnumweeks in rr._byweekno:
                         for i in range(no1wkst):
                             self.wnomask[i] = 1
 
-        if (rr._bynweekday and
-            (month != self.lastmonth or year != self.lastyear)):
+        if (rr._bynweekday and (month != self.lastmonth or
+                                year != self.lastyear)):
             ranges = []
             if rr._freq == YEARLY:
                 if rr._bymonth:
@@ -755,10 +1144,10 @@ class _iterinfo(object):
                     for wday, n in rr._bynweekday:
                         if n < 0:
                             i = last+(n+1)*7
-                            i -= (self.wdaymask[i]-wday)%7
+                            i -= (self.wdaymask[i]-wday) % 7
                         else:
                             i = first+(n-1)*7
-                            i += (7-self.wdaymask[i]+wday)%7
+                            i += (7-self.wdaymask[i]+wday) % 7
                         if first <= i <= last:
                             self.nwdaymask[i] = 1
 
@@ -775,50 +1164,50 @@ class _iterinfo(object):
         return list(range(self.yearlen)), 0, self.yearlen
 
     def mdayset(self, year, month, day):
-        set = [None]*self.yearlen
+        dset = [None]*self.yearlen
         start, end = self.mrange[month-1:month+1]
         for i in range(start, end):
-            set[i] = i
-        return set, start, end
+            dset[i] = i
+        return dset, start, end
 
     def wdayset(self, year, month, day):
         # We need to handle cross-year weeks here.
-        set = [None]*(self.yearlen+7)
+        dset = [None]*(self.yearlen+7)
         i = datetime.date(year, month, day).toordinal()-self.yearordinal
         start = i
         for j in range(7):
-            set[i] = i
+            dset[i] = i
             i += 1
-            #if (not (0 <= i < self.yearlen) or
+            # if (not (0 <= i < self.yearlen) or
             #    self.wdaymask[i] == self.rrule._wkst):
             # This will cross the year boundary, if necessary.
             if self.wdaymask[i] == self.rrule._wkst:
                 break
-        return set, start, i
+        return dset, start, i
 
     def ddayset(self, year, month, day):
-        set = [None]*self.yearlen
-        i = datetime.date(year, month, day).toordinal()-self.yearordinal
-        set[i] = i
-        return set, i, i+1
+        dset = [None] * self.yearlen
+        i = datetime.date(year, month, day).toordinal() - self.yearordinal
+        dset[i] = i
+        return dset, i, i + 1
 
     def htimeset(self, hour, minute, second):
-        set = []
+        tset = []
         rr = self.rrule
         for minute in rr._byminute:
             for second in rr._bysecond:
-                set.append(datetime.time(hour, minute, second,
-                                         tzinfo=rr._tzinfo))
-        set.sort()
-        return set
+                tset.append(datetime.time(hour, minute, second,
+                                          tzinfo=rr._tzinfo))
+        tset.sort()
+        return tset
 
     def mtimeset(self, hour, minute, second):
-        set = []
+        tset = []
         rr = self.rrule
         for second in rr._bysecond:
-            set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
-        set.sort()
-        return set
+            tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
+        tset.sort()
+        return tset
 
     def stimeset(self, hour, minute, second):
         return (datetime.time(hour, minute, second,
@@ -826,6 +1215,12 @@ class _iterinfo(object):
 
 
 class rruleset(rrulebase):
+    """ The rruleset type allows more complex recurrence setups, mixing
+    multiple rules, dates, exclusion rules, and exclusion dates. The type
+    constructor takes the following keyword arguments:
+
+    :param cache: If True, caching of results will be enabled, improving
+                  performance of multiple queries considerably. """
 
     class _genitem(object):
         def __init__(self, genlist, gen):
@@ -865,15 +1260,26 @@ class rruleset(rrulebase):
         self._exdate = []
 
     def rrule(self, rrule):
+        """ Include the given :py:class:`rrule` instance in the recurrence set
+            generation. """
         self._rrule.append(rrule)
 
     def rdate(self, rdate):
+        """ Include the given :py:class:`datetime` instance in the recurrence
+            set generation. """
         self._rdate.append(rdate)
 
     def exrule(self, exrule):
+        """ Include the given rrule instance in the recurrence set exclusion
+            list. Dates which are part of the given recurrence rules will not
+            be generated, even if some inclusive rrule or rdate matches them.
+        """
         self._exrule.append(exrule)
 
     def exdate(self, exdate):
+        """ Include the given datetime instance in the recurrence set
+            exclusion list. Dates included that way will not be generated,
+            even if some inclusive rrule or rdate matches them. """
         self._exdate.append(exdate)
 
     def _iter(self):
@@ -905,6 +1311,7 @@ class rruleset(rrulebase):
             rlist.sort()
         self._len = total
 
+
 class _rrulestr(object):
 
     _freq_map = {"YEARLY": YEARLY,
@@ -915,7 +1322,8 @@ class _rrulestr(object):
                  "MINUTELY": MINUTELY,
                  "SECONDLY": SECONDLY}
 
-    _weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
+    _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
+                    "FR": 4, "SA": 5, "SU": 6}
 
     def _handle_int(self, rrkwargs, name, value, **kwargs):
         rrkwargs[name.lower()] = int(value)
@@ -923,17 +1331,17 @@ class _rrulestr(object):
     def _handle_int_list(self, rrkwargs, name, value, **kwargs):
         rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
 
-    _handle_INTERVAL   = _handle_int
-    _handle_COUNT      = _handle_int
-    _handle_BYSETPOS   = _handle_int_list
-    _handle_BYMONTH    = _handle_int_list
+    _handle_INTERVAL = _handle_int
+    _handle_COUNT = _handle_int
+    _handle_BYSETPOS = _handle_int_list
+    _handle_BYMONTH = _handle_int_list
     _handle_BYMONTHDAY = _handle_int_list
-    _handle_BYYEARDAY  = _handle_int_list
-    _handle_BYEASTER   = _handle_int_list
-    _handle_BYWEEKNO   = _handle_int_list
-    _handle_BYHOUR     = _handle_int_list
-    _handle_BYMINUTE   = _handle_int_list
-    _handle_BYSECOND   = _handle_int_list
+    _handle_BYYEARDAY = _handle_int_list
+    _handle_BYEASTER = _handle_int_list
+    _handle_BYWEEKNO = _handle_int_list
+    _handle_BYHOUR = _handle_int_list
+    _handle_BYMINUTE = _handle_int_list
+    _handle_BYSECOND = _handle_int_list
 
     def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
         rrkwargs["freq"] = self._freq_map[value]
@@ -944,23 +1352,34 @@ class _rrulestr(object):
             from dateutil import parser
         try:
             rrkwargs["until"] = parser.parse(value,
-                                           ignoretz=kwargs.get("ignoretz"),
-                                           tzinfos=kwargs.get("tzinfos"))
+                                             ignoretz=kwargs.get("ignoretz"),
+                                             tzinfos=kwargs.get("tzinfos"))
         except ValueError:
             raise ValueError("invalid until date")
 
     def _handle_WKST(self, rrkwargs, name, value, **kwargs):
         rrkwargs["wkst"] = self._weekday_map[value]
 
-    def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
+    def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
+        """
+        Two ways to specify this: +1MO or MO(+1)
+        """
         l = []
         for wday in value.split(','):
-            for i in range(len(wday)):
-                if wday[i] not in '+-0123456789':
-                    break
-            n = wday[:i] or None
-            w = wday[i:]
-            if n: n = int(n)
+            if '(' in wday:
+                # If it's of the form TH(+1), etc.
+                splt = wday.split('(')
+                w = splt[0]
+                n = int(splt[1][:-1])
+            else:
+                # If it's of the form +1MO
+                for i in range(len(wday)):
+                    if wday[i] not in '+-0123456789':
+                        break
+                n = wday[:i] or None
+                w = wday[i:]
+                if n:
+                    n = int(n)
             l.append(weekdays[self._weekday_map[w]](n))
         rrkwargs["byweekday"] = l
 
@@ -1021,8 +1440,8 @@ class _rrulestr(object):
                     i += 1
         else:
             lines = s.split()
-        if (not forceset and len(lines) == 1 and
-            (s.find(':') == -1 or s.startswith('RRULE:'))):
+        if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
+                                                  s.startswith('RRULE:'))):
             return self._parse_rfc_rrule(lines[0], cache=cache,
                                          dtstart=dtstart, ignoretz=ignoretz,
                                          tzinfos=tzinfos)
@@ -1071,32 +1490,32 @@ class _rrulestr(object):
                                            tzinfos=tzinfos)
                 else:
                     raise ValueError("unsupported property: "+name)
-            if (forceset or len(rrulevals) > 1 or
-                rdatevals or exrulevals or exdatevals):
+            if (forceset or len(rrulevals) > 1 or rdatevals
+                    or exrulevals or exdatevals):
                 if not parser and (rdatevals or exdatevals):
                     from dateutil import parser
-                set = rruleset(cache=cache)
+                rset = rruleset(cache=cache)
                 for value in rrulevals:
-                    set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
-                                                    ignoretz=ignoretz,
-                                                    tzinfos=tzinfos))
-                for value in rdatevals:
-                    for datestr in value.split(','):
-                        set.rdate(parser.parse(datestr,
-                                               ignoretz=ignoretz,
-                                               tzinfos=tzinfos))
-                for value in exrulevals:
-                    set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
+                    rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
                                                      ignoretz=ignoretz,
                                                      tzinfos=tzinfos))
-                for value in exdatevals:
+                for value in rdatevals:
                     for datestr in value.split(','):
-                        set.exdate(parser.parse(datestr,
+                        rset.rdate(parser.parse(datestr,
                                                 ignoretz=ignoretz,
                                                 tzinfos=tzinfos))
+                for value in exrulevals:
+                    rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
+                                                      ignoretz=ignoretz,
+                                                      tzinfos=tzinfos))
+                for value in exdatevals:
+                    for datestr in value.split(','):
+                        rset.exdate(parser.parse(datestr,
+                                                 ignoretz=ignoretz,
+                                                 tzinfos=tzinfos))
                 if compatible and dtstart:
-                    set.rdate(dtstart)
-                return set
+                    rset.rdate(dtstart)
+                return rset
             else:
                 return self._parse_rfc_rrule(rrulevals[0],
                                              dtstart=dtstart,
diff --git a/lib/dateutil/tz.py b/lib/dateutil/tz.py
index e849fc24b5e2e554efb86b025422ef7bf1fbad0f..6625d989ba0d53972f08c904bf0f869e04902ebd 100644
--- a/lib/dateutil/tz.py
+++ b/lib/dateutil/tz.py
@@ -1,19 +1,25 @@
+# -*- coding: utf-8 -*-
 """
-Copyright (c) 2003-2007  Gustavo Niemeyer <gustavo@niemeyer.net>
-
-This module offers extensions to the standard Python
-datetime module.
+This module offers timezone implementations subclassing the abstract
+:py:`datetime.tzinfo` type. There are classes to handle tzfile format files
+(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ
+environment string (in all known formats), given ranges (with help from
+relative deltas), local machine timezone, fixed offset timezone, and UTC
+timezone.
 """
-__license__ = "Simplified BSD"
-
-from six import string_types, PY3
-
 import datetime
 import struct
 import time
 import sys
 import os
 
+from six import string_types, PY3
+
+try:
+    from dateutil.tzwin import tzwin, tzwinlocal
+except ImportError:
+    tzwin = tzwinlocal = None
+
 relativedelta = None
 parser = None
 rrule = None
@@ -21,32 +27,31 @@ rrule = None
 __all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
            "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
 
-try:
-    from dateutil.tzwin import tzwin, tzwinlocal
-except (ImportError, OSError):
-    tzwin, tzwinlocal = None, None
 
-def tzname_in_python2(myfunc):
+def tzname_in_python2(namefunc):
     """Change unicode output into bytestrings in Python 2
 
     tzname() API changed in Python 3. It used to return bytes, but was changed
     to unicode strings
     """
-    def inner_func(*args, **kwargs):
-        if PY3:
-            return myfunc(*args, **kwargs)
-        else:
-            return myfunc(*args, **kwargs).encode()
-    return inner_func
+    def adjust_encoding(*args, **kwargs):
+        name = namefunc(*args, **kwargs)
+        if name is not None and not PY3:
+            name = name.encode()
+
+        return name
+
+    return adjust_encoding
 
 ZERO = datetime.timedelta(0)
 EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
 
+
 class tzutc(datetime.tzinfo):
 
     def utcoffset(self, dt):
         return ZERO
-     
+
     def dst(self, dt):
         return ZERO
 
@@ -66,6 +71,7 @@ class tzutc(datetime.tzinfo):
 
     __reduce__ = object.__reduce__
 
+
 class tzoffset(datetime.tzinfo):
 
     def __init__(self, name, offset):
@@ -96,13 +102,14 @@ class tzoffset(datetime.tzinfo):
 
     __reduce__ = object.__reduce__
 
-class tzlocal(datetime.tzinfo):
 
-    _std_offset = datetime.timedelta(seconds=-time.timezone)
-    if time.daylight:
-        _dst_offset = datetime.timedelta(seconds=-time.altzone)
-    else:
-        _dst_offset = _std_offset
+class tzlocal(datetime.tzinfo):
+    def __init__(self):
+        self._std_offset = datetime.timedelta(seconds=-time.timezone)
+        if time.daylight:
+            self._dst_offset = datetime.timedelta(seconds=-time.altzone)
+        else:
+            self._dst_offset = self._std_offset
 
     def utcoffset(self, dt):
         if self._isdst(dt):
@@ -123,25 +130,25 @@ class tzlocal(datetime.tzinfo):
     def _isdst(self, dt):
         # We can't use mktime here. It is unstable when deciding if
         # the hour near to a change is DST or not.
-        # 
+        #
         # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
         #                         dt.minute, dt.second, dt.weekday(), 0, -1))
         # return time.localtime(timestamp).tm_isdst
         #
         # The code above yields the following result:
         #
-        #>>> import tz, datetime
-        #>>> t = tz.tzlocal()
-        #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
-        #'BRDT'
-        #>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
-        #'BRST'
-        #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
-        #'BRST'
-        #>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
-        #'BRDT'
-        #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
-        #'BRDT'
+        # >>> import tz, datetime
+        # >>> t = tz.tzlocal()
+        # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+        # 'BRDT'
+        # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
+        # 'BRST'
+        # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+        # 'BRST'
+        # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
+        # 'BRDT'
+        # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+        # 'BRDT'
         #
         # Here is a more stable implementation:
         #
@@ -166,6 +173,7 @@ class tzlocal(datetime.tzinfo):
 
     __reduce__ = object.__reduce__
 
+
 class _ttinfo(object):
     __slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
 
@@ -205,15 +213,20 @@ class _ttinfo(object):
             if name in state:
                 setattr(self, name, state[name])
 
+
 class tzfile(datetime.tzinfo):
 
     # http://www.twinsun.com/tz/tz-link.htm
     # ftp://ftp.iana.org/tz/tz*.tar.gz
-    
-    def __init__(self, fileobj):
+
+    def __init__(self, fileobj, filename=None):
+        file_opened_here = False
         if isinstance(fileobj, string_types):
             self._filename = fileobj
             fileobj = open(fileobj, 'rb')
+            file_opened_here = True
+        elif filename is not None:
+            self._filename = filename
         elif hasattr(fileobj, "name"):
             self._filename = fileobj.name
         else:
@@ -228,125 +241,128 @@ class tzfile(datetime.tzinfo):
         # six four-byte values of type long, written in a
         # ``standard'' byte order (the high-order  byte
         # of the value is written first).
+        try:
+            if fileobj.read(4).decode() != "TZif":
+                raise ValueError("magic not found")
 
-        if fileobj.read(4).decode() != "TZif":
-            raise ValueError("magic not found")
+            fileobj.read(16)
 
-        fileobj.read(16)
+            (
+                # The number of UTC/local indicators stored in the file.
+                ttisgmtcnt,
 
-        (
-         # The number of UTC/local indicators stored in the file.
-         ttisgmtcnt,
+                # The number of standard/wall indicators stored in the file.
+                ttisstdcnt,
 
-         # The number of standard/wall indicators stored in the file.
-         ttisstdcnt,
-         
-         # The number of leap seconds for which data is
-         # stored in the file.
-         leapcnt,
+                # The number of leap seconds for which data is
+                # stored in the file.
+                leapcnt,
 
-         # The number of "transition times" for which data
-         # is stored in the file.
-         timecnt,
+                # The number of "transition times" for which data
+                # is stored in the file.
+                timecnt,
 
-         # The number of "local time types" for which data
-         # is stored in the file (must not be zero).
-         typecnt,
+                # The number of "local time types" for which data
+                # is stored in the file (must not be zero).
+                typecnt,
 
-         # The  number  of  characters  of "time zone
-         # abbreviation strings" stored in the file.
-         charcnt,
+                # The  number  of  characters  of "time zone
+                # abbreviation strings" stored in the file.
+                charcnt,
 
-        ) = struct.unpack(">6l", fileobj.read(24))
+            ) = struct.unpack(">6l", fileobj.read(24))
 
-        # The above header is followed by tzh_timecnt four-byte
-        # values  of  type long,  sorted  in ascending order.
-        # These values are written in ``standard'' byte order.
-        # Each is used as a transition time (as  returned  by
-        # time(2)) at which the rules for computing local time
-        # change.
+            # The above header is followed by tzh_timecnt four-byte
+            # values  of  type long,  sorted  in ascending order.
+            # These values are written in ``standard'' byte order.
+            # Each is used as a transition time (as  returned  by
+            # time(2)) at which the rules for computing local time
+            # change.
 
-        if timecnt:
-            self._trans_list = struct.unpack(">%dl" % timecnt,
-                                             fileobj.read(timecnt*4))
-        else:
-            self._trans_list = []
-
-        # Next come tzh_timecnt one-byte values of type unsigned
-        # char; each one tells which of the different types of
-        # ``local time'' types described in the file is associated
-        # with the same-indexed transition time. These values
-        # serve as indices into an array of ttinfo structures that
-        # appears next in the file.
-        
-        if timecnt:
-            self._trans_idx = struct.unpack(">%dB" % timecnt,
-                                            fileobj.read(timecnt))
-        else:
-            self._trans_idx = []
-        
-        # Each ttinfo structure is written as a four-byte value
-        # for tt_gmtoff  of  type long,  in  a  standard  byte
-        # order, followed  by a one-byte value for tt_isdst
-        # and a one-byte  value  for  tt_abbrind.   In  each
-        # structure, tt_gmtoff  gives  the  number  of
-        # seconds to be added to UTC, tt_isdst tells whether
-        # tm_isdst should be set by  localtime(3),  and
-        # tt_abbrind serves  as an index into the array of
-        # time zone abbreviation characters that follow the
-        # ttinfo structure(s) in the file.
-
-        ttinfo = []
-
-        for i in range(typecnt):
-            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
-
-        abbr = fileobj.read(charcnt).decode()
-
-        # Then there are tzh_leapcnt pairs of four-byte
-        # values, written in  standard byte  order;  the
-        # first  value  of  each pair gives the time (as
-        # returned by time(2)) at which a leap second
-        # occurs;  the  second  gives the  total  number of
-        # leap seconds to be applied after the given time.
-        # The pairs of values are sorted in ascending order
-        # by time.
-
-        # Not used, for now
-        if leapcnt:
-            leap = struct.unpack(">%dl" % (leapcnt*2),
-                                 fileobj.read(leapcnt*8))
-
-        # Then there are tzh_ttisstdcnt standard/wall
-        # indicators, each stored as a one-byte value;
-        # they tell whether the transition times associated
-        # with local time types were specified as standard
-        # time or wall clock time, and are used when
-        # a time zone file is used in handling POSIX-style
-        # time zone environment variables.
-
-        if ttisstdcnt:
-            isstd = struct.unpack(">%db" % ttisstdcnt,
-                                  fileobj.read(ttisstdcnt))
-
-        # Finally, there are tzh_ttisgmtcnt UTC/local
-        # indicators, each stored as a one-byte value;
-        # they tell whether the transition times associated
-        # with local time types were specified as UTC or
-        # local time, and are used when a time zone file
-        # is used in handling POSIX-style time zone envi-
-        # ronment variables.
-
-        if ttisgmtcnt:
-            isgmt = struct.unpack(">%db" % ttisgmtcnt,
-                                  fileobj.read(ttisgmtcnt))
-
-        # ** Everything has been read **
+            if timecnt:
+                self._trans_list = struct.unpack(">%dl" % timecnt,
+                                                 fileobj.read(timecnt*4))
+            else:
+                self._trans_list = []
+
+            # Next come tzh_timecnt one-byte values of type unsigned
+            # char; each one tells which of the different types of
+            # ``local time'' types described in the file is associated
+            # with the same-indexed transition time. These values
+            # serve as indices into an array of ttinfo structures that
+            # appears next in the file.
+
+            if timecnt:
+                self._trans_idx = struct.unpack(">%dB" % timecnt,
+                                                fileobj.read(timecnt))
+            else:
+                self._trans_idx = []
+
+            # Each ttinfo structure is written as a four-byte value
+            # for tt_gmtoff  of  type long,  in  a  standard  byte
+            # order, followed  by a one-byte value for tt_isdst
+            # and a one-byte  value  for  tt_abbrind.   In  each
+            # structure, tt_gmtoff  gives  the  number  of
+            # seconds to be added to UTC, tt_isdst tells whether
+            # tm_isdst should be set by  localtime(3),  and
+            # tt_abbrind serves  as an index into the array of
+            # time zone abbreviation characters that follow the
+            # ttinfo structure(s) in the file.
+
+            ttinfo = []
+
+            for i in range(typecnt):
+                ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
+
+            abbr = fileobj.read(charcnt).decode()
+
+            # Then there are tzh_leapcnt pairs of four-byte
+            # values, written in  standard byte  order;  the
+            # first  value  of  each pair gives the time (as
+            # returned by time(2)) at which a leap second
+            # occurs;  the  second  gives the  total  number of
+            # leap seconds to be applied after the given time.
+            # The pairs of values are sorted in ascending order
+            # by time.
+
+            # Not used, for now
+            # if leapcnt:
+            #    leap = struct.unpack(">%dl" % (leapcnt*2),
+            #                         fileobj.read(leapcnt*8))
+
+            # Then there are tzh_ttisstdcnt standard/wall
+            # indicators, each stored as a one-byte value;
+            # they tell whether the transition times associated
+            # with local time types were specified as standard
+            # time or wall clock time, and are used when
+            # a time zone file is used in handling POSIX-style
+            # time zone environment variables.
+
+            if ttisstdcnt:
+                isstd = struct.unpack(">%db" % ttisstdcnt,
+                                      fileobj.read(ttisstdcnt))
+
+            # Finally, there are tzh_ttisgmtcnt UTC/local
+            # indicators, each stored as a one-byte value;
+            # they tell whether the transition times associated
+            # with local time types were specified as UTC or
+            # local time, and are used when a time zone file
+            # is used in handling POSIX-style time zone envi-
+            # ronment variables.
+
+            if ttisgmtcnt:
+                isgmt = struct.unpack(">%db" % ttisgmtcnt,
+                                      fileobj.read(ttisgmtcnt))
+
+            # ** Everything has been read **
+        finally:
+            if file_opened_here:
+                fileobj.close()
 
         # Build ttinfo list
         self._ttinfo_list = []
         for i in range(typecnt):
-            gmtoff, isdst, abbrind =  ttinfo[i]
+            gmtoff, isdst, abbrind = ttinfo[i]
             # Round to full-minutes if that's not the case. Python's
             # datetime doesn't accept sub-minute timezones. Check
             # http://python.org/sf/1447945 for some information.
@@ -464,7 +480,7 @@ class tzfile(datetime.tzinfo):
         # However, this class stores historical changes in the
         # dst offset, so I belive that this wouldn't be the right
         # way to implement this.
-        
+
     @tzname_in_python2
     def tzname(self, dt):
         if not self._ttinfo_std:
@@ -481,7 +497,6 @@ class tzfile(datetime.tzinfo):
     def __ne__(self, other):
         return not self.__eq__(other)
 
-
     def __repr__(self):
         return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
 
@@ -490,8 +505,8 @@ class tzfile(datetime.tzinfo):
             raise ValueError("Unpickable %s class" % self.__class__.__name__)
         return (self.__class__, (self._filename,))
 
-class tzrange(datetime.tzinfo):
 
+class tzrange(datetime.tzinfo):
     def __init__(self, stdabbr, stdoffset=None,
                  dstabbr=None, dstoffset=None,
                  start=None, end=None):
@@ -512,12 +527,12 @@ class tzrange(datetime.tzinfo):
             self._dst_offset = ZERO
         if dstabbr and start is None:
             self._start_delta = relativedelta.relativedelta(
-                    hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
+                hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
         else:
             self._start_delta = start
         if dstabbr and end is None:
             self._end_delta = relativedelta.relativedelta(
-                    hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
+                hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
         else:
             self._end_delta = end
 
@@ -570,8 +585,9 @@ class tzrange(datetime.tzinfo):
 
     __reduce__ = object.__reduce__
 
+
 class tzstr(tzrange):
-    
+
     def __init__(self, s):
         global parser
         if not parser:
@@ -645,9 +661,10 @@ class tzstr(tzrange):
     def __repr__(self):
         return "%s(%s)" % (self.__class__.__name__, repr(self._s))
 
+
 class _tzicalvtzcomp(object):
     def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
-                       tzname=None, rrule=None):
+                 tzname=None, rrule=None):
         self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
         self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
         self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
@@ -655,6 +672,7 @@ class _tzicalvtzcomp(object):
         self.tzname = tzname
         self.rrule = rrule
 
+
 class _tzicalvtz(datetime.tzinfo):
     def __init__(self, tzid, comps=[]):
         self._tzid = tzid
@@ -718,6 +736,7 @@ class _tzicalvtz(datetime.tzinfo):
 
     __reduce__ = object.__reduce__
 
+
 class tzical(object):
     def __init__(self, fileobj):
         global rrule
@@ -726,7 +745,8 @@ class tzical(object):
 
         if isinstance(fileobj, string_types):
             self._s = fileobj
-            fileobj = open(fileobj, 'r')  # ical should be encoded in UTF-8 with CRLF
+            # ical should be encoded in UTF-8 with CRLF
+            fileobj = open(fileobj, 'r')
         elif hasattr(fileobj, "name"):
             self._s = fileobj.name
         else:
@@ -754,7 +774,7 @@ class tzical(object):
         if not s:
             raise ValueError("empty offset")
         if s[0] in ('+', '-'):
-            signal = (-1, +1)[s[0]=='+']
+            signal = (-1, +1)[s[0] == '+']
             s = s[1:]
         else:
             signal = +1
@@ -815,7 +835,8 @@ class tzical(object):
                         if not tzid:
                             raise ValueError("mandatory TZID not found")
                         if not comps:
-                            raise ValueError("at least one component is needed")
+                            raise ValueError(
+                                "at least one component is needed")
                         # Process vtimezone
                         self._vtz[tzid] = _tzicalvtz(tzid, comps)
                         invtz = False
@@ -823,9 +844,11 @@ class tzical(object):
                         if not founddtstart:
                             raise ValueError("mandatory DTSTART not found")
                         if tzoffsetfrom is None:
-                            raise ValueError("mandatory TZOFFSETFROM not found")
+                            raise ValueError(
+                                "mandatory TZOFFSETFROM not found")
                         if tzoffsetto is None:
-                            raise ValueError("mandatory TZOFFSETFROM not found")
+                            raise ValueError(
+                                "mandatory TZOFFSETFROM not found")
                         # Process component
                         rr = None
                         if rrulelines:
@@ -848,15 +871,18 @@ class tzical(object):
                         rrulelines.append(line)
                     elif name == "TZOFFSETFROM":
                         if parms:
-                            raise ValueError("unsupported %s parm: %s "%(name, parms[0]))
+                            raise ValueError(
+                                "unsupported %s parm: %s " % (name, parms[0]))
                         tzoffsetfrom = self._parse_offset(value)
                     elif name == "TZOFFSETTO":
                         if parms:
-                            raise ValueError("unsupported TZOFFSETTO parm: "+parms[0])
+                            raise ValueError(
+                                "unsupported TZOFFSETTO parm: "+parms[0])
                         tzoffsetto = self._parse_offset(value)
                     elif name == "TZNAME":
                         if parms:
-                            raise ValueError("unsupported TZNAME parm: "+parms[0])
+                            raise ValueError(
+                                "unsupported TZNAME parm: "+parms[0])
                         tzname = value
                     elif name == "COMMENT":
                         pass
@@ -865,7 +891,8 @@ class tzical(object):
                 else:
                     if name == "TZID":
                         if parms:
-                            raise ValueError("unsupported TZID parm: "+parms[0])
+                            raise ValueError(
+                                "unsupported TZID parm: "+parms[0])
                         tzid = value
                     elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
                         pass
@@ -886,6 +913,7 @@ else:
     TZFILES = []
     TZPATHS = []
 
+
 def gettz(name=None):
     tz = None
     if not name:
@@ -933,11 +961,11 @@ def gettz(name=None):
                     pass
             else:
                 tz = None
-                if tzwin:
+                if tzwin is not None:
                     try:
                         tz = tzwin(name)
-                    except OSError:
-                        pass
+                    except WindowsError:
+                        tz = None
                 if not tz:
                     from dateutil.zoneinfo import gettz
                     tz = gettz(name)
diff --git a/lib/dateutil/tzwin.py b/lib/dateutil/tzwin.py
index 041c6cc3d6453eabba91a6d74665d5bb656d3383..f4e0e248e1fe6d72b40e729bb6e51685e64a1bde 100644
--- a/lib/dateutil/tzwin.py
+++ b/lib/dateutil/tzwin.py
@@ -1,8 +1,10 @@
 # This code was originally contributed by Jeffrey Harris.
 import datetime
 import struct
-import winreg
 
+from six.moves import winreg
+
+from .tz import tzname_in_python2
 
 __all__ = ["tzwin", "tzwinlocal"]
 
@@ -12,8 +14,8 @@ TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
 TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
 TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
 
+
 def _settzkeyname():
-    global TZKEYNAME
     handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
     try:
         winreg.OpenKey(handle, TZKEYNAMENT).Close()
@@ -21,8 +23,10 @@ def _settzkeyname():
     except WindowsError:
         TZKEYNAME = TZKEYNAME9X
     handle.Close()
+    return TZKEYNAME
+
+TZKEYNAME = _settzkeyname()
 
-_settzkeyname()
 
 class tzwinbase(datetime.tzinfo):
     """tzinfo class based on win32's timezones available in the registry."""
@@ -39,7 +43,8 @@ class tzwinbase(datetime.tzinfo):
             return datetime.timedelta(minutes=minutes)
         else:
             return datetime.timedelta(0)
-        
+
+    @tzname_in_python2
     def tzname(self, dt):
         if self._isdst(dt):
             return self._dstname
@@ -59,8 +64,11 @@ class tzwinbase(datetime.tzinfo):
 
     def display(self):
         return self._display
-    
+
     def _isdst(self, dt):
+        if not self._dstmonth:
+            # dstmonth == 0 signals the zone has no daylight saving time
+            return False
         dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
                                self._dsthour, self._dstminute,
                                self._dstweeknumber)
@@ -78,31 +86,33 @@ class tzwin(tzwinbase):
     def __init__(self, name):
         self._name = name
 
-        handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
-        tzkey = winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
-        keydict = valuestodict(tzkey)
-        tzkey.Close()
-        handle.Close()
+        # multiple contexts only possible in 2.7 and 3.1, we still support 2.6
+        with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+            with winreg.OpenKey(handle,
+                                "%s\%s" % (TZKEYNAME, name)) as tzkey:
+                keydict = valuestodict(tzkey)
 
-        self._stdname = keydict["Std"].encode("iso-8859-1")
-        self._dstname = keydict["Dlt"].encode("iso-8859-1")
+        self._stdname = keydict["Std"]
+        self._dstname = keydict["Dlt"]
 
         self._display = keydict["Display"]
-        
+
         # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
         tup = struct.unpack("=3l16h", keydict["TZI"])
-        self._stdoffset = -tup[0]-tup[1]         # Bias + StandardBias * -1
-        self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
-        
+        self._stdoffset = -tup[0]-tup[1]          # Bias + StandardBias * -1
+        self._dstoffset = self._stdoffset-tup[2]  # + DaylightBias * -1
+
+        # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
+        # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
         (self._stdmonth,
-         self._stddayofweek,  # Sunday = 0
-         self._stdweeknumber, # Last = 5
+         self._stddayofweek,   # Sunday = 0
+         self._stdweeknumber,  # Last = 5
          self._stdhour,
          self._stdminute) = tup[4:9]
 
         (self._dstmonth,
-         self._dstdayofweek,  # Sunday = 0
-         self._dstweeknumber, # Last = 5
+         self._dstdayofweek,   # Sunday = 0
+         self._dstweeknumber,  # Last = 5
          self._dsthour,
          self._dstminute) = tup[12:17]
 
@@ -114,61 +124,59 @@ class tzwin(tzwinbase):
 
 
 class tzwinlocal(tzwinbase):
-    
+
     def __init__(self):
 
-        handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
+        with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
 
-        tzlocalkey = winreg.OpenKey(handle, TZLOCALKEYNAME)
-        keydict = valuestodict(tzlocalkey)
-        tzlocalkey.Close()
+            with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
+                keydict = valuestodict(tzlocalkey)
 
-        self._stdname = keydict["StandardName"].encode("iso-8859-1")
-        self._dstname = keydict["DaylightName"].encode("iso-8859-1")
+            self._stdname = keydict["StandardName"]
+            self._dstname = keydict["DaylightName"]
 
-        try:
-            tzkey = winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
-            _keydict = valuestodict(tzkey)
-            self._display = _keydict["Display"]
-            tzkey.Close()
-        except OSError:
-            self._display = None
+            try:
+                with winreg.OpenKey(
+                        handle, "%s\%s" % (TZKEYNAME, self._stdname)) as tzkey:
+                    _keydict = valuestodict(tzkey)
+                    self._display = _keydict["Display"]
+            except OSError:
+                self._display = None
 
-        handle.Close()
-        
         self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
         self._dstoffset = self._stdoffset-keydict["DaylightBias"]
 
-
         # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
         tup = struct.unpack("=8h", keydict["StandardStart"])
 
         (self._stdmonth,
-         self._stddayofweek,  # Sunday = 0
-         self._stdweeknumber, # Last = 5
+         self._stddayofweek,   # Sunday = 0
+         self._stdweeknumber,  # Last = 5
          self._stdhour,
          self._stdminute) = tup[1:6]
 
         tup = struct.unpack("=8h", keydict["DaylightStart"])
 
         (self._dstmonth,
-         self._dstdayofweek,  # Sunday = 0
-         self._dstweeknumber, # Last = 5
+         self._dstdayofweek,   # Sunday = 0
+         self._dstweeknumber,  # Last = 5
          self._dsthour,
          self._dstminute) = tup[1:6]
 
     def __reduce__(self):
         return (self.__class__, ())
 
+
 def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
     """dayofweek == 0 means Sunday, whichweek 5 means last instance"""
     first = datetime.datetime(year, month, 1, hour, minute)
-    weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
+    weekdayone = first.replace(day=((dayofweek-first.isoweekday()) % 7+1))
     for n in range(whichweek):
         dt = weekdayone+(whichweek-n)*ONEWEEK
         if dt.month == month:
             return dt
 
+
 def valuestodict(key):
     """Convert a registry key's values to a dictionary."""
     dict = {}
diff --git a/lib/dateutil/zoneinfo/.gitignore b/lib/dateutil/zoneinfo/.gitignore
deleted file mode 100644
index 335ec9573de50674dbe15e731000ad7c5a2d1cfb..0000000000000000000000000000000000000000
--- a/lib/dateutil/zoneinfo/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.tar.gz
diff --git a/lib/dateutil/zoneinfo/__init__.py b/lib/dateutil/zoneinfo/__init__.py
index 81db1405b140375d2bb6190b9e09b17166f78d11..8156092ef6b2cc4df1eb62ee7a08e6493e4a10eb 100644
--- a/lib/dateutil/zoneinfo/__init__.py
+++ b/lib/dateutil/zoneinfo/__init__.py
@@ -1,109 +1,102 @@
 # -*- coding: utf-8 -*-
-"""
-Copyright (c) 2003-2005  Gustavo Niemeyer <gustavo@niemeyer.net>
-
-This module offers extensions to the standard Python
-datetime module.
-"""
 import logging
 import os
-from subprocess import call
+import warnings
+import tempfile
+import shutil
+import json
+
+from subprocess import check_call
 from tarfile import TarFile
+from pkgutil import get_data
+from io import BytesIO
+from contextlib import closing
 
 from dateutil.tz import tzfile
 
-__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
-__license__ = "Simplified BSD"
+__all__ = ["gettz", "gettz_db_metadata", "rebuild"]
+
+ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
+METADATA_FN = 'METADATA'
 
-__all__ = ["setcachesize", "gettz", "rebuild"]
+# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but
+# it's close enough for python2.6
+tar_open = TarFile.open
+if not hasattr(TarFile, '__exit__'):
+    def tar_open(*args, **kwargs):
+        return closing(TarFile.open(*args, **kwargs))
 
-CACHE = []
-CACHESIZE = 10
 
 class tzfile(tzfile):
     def __reduce__(self):
         return (gettz, (self._filename,))
 
-def getzoneinfofile():
-    filenames = sorted(os.listdir(os.path.join(os.path.dirname(__file__))))
-    filenames.reverse()
-    for entry in filenames:
-        if entry.startswith("zoneinfo") and ".tar." in entry:
-            return os.path.join(os.path.dirname(__file__), entry)
-    return None
 
-ZONEINFOFILE = getzoneinfofile()
+def getzoneinfofile_stream():
+    try:
+        return BytesIO(get_data(__name__, ZONEFILENAME))
+    except IOError as e:  # TODO  switch to FileNotFoundError?
+        warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
+        return None
+
+
+class ZoneInfoFile(object):
+    def __init__(self, zonefile_stream=None):
+        if zonefile_stream is not None:
+            with tar_open(fileobj=zonefile_stream, mode='r') as tf:
+                # dict comprehension does not work on python2.6
+                # TODO: get back to the nicer syntax when we ditch python2.6
+                # self.zones = {zf.name: tzfile(tf.extractfile(zf),
+                #               filename = zf.name)
+                #              for zf in tf.getmembers() if zf.isfile()}
+                self.zones = dict((zf.name, tzfile(tf.extractfile(zf),
+                                                   filename=zf.name))
+                                  for zf in tf.getmembers()
+                                  if zf.isfile() and zf.name != METADATA_FN)
+                # deal with links: They'll point to their parent object. Less
+                # waste of memory
+                # links = {zl.name: self.zones[zl.linkname]
+                #        for zl in tf.getmembers() if zl.islnk() or zl.issym()}
+                links = dict((zl.name, self.zones[zl.linkname])
+                             for zl in tf.getmembers() if
+                             zl.islnk() or zl.issym())
+                self.zones.update(links)
+                try:
+                    metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
+                    metadata_str = metadata_json.read().decode('UTF-8')
+                    self.metadata = json.loads(metadata_str)
+                except KeyError:
+                    # no metadata in tar file
+                    self.metadata = None
+        else:
+            self.zones = dict()
+            self.metadata = None
+
 
-del getzoneinfofile
+# The current API has gettz as a module function, although in fact it taps into
+# a stateful class. So as a workaround for now, without changing the API, we
+# will create a new "global" class instance the first time a user requests a
+# timezone. Ugly, but adheres to the api.
+#
+# TODO: deprecate this.
+_CLASS_ZONE_INSTANCE = list()
 
-def setcachesize(size):
-    global CACHESIZE, CACHE
-    CACHESIZE = size
-    del CACHE[size:]
 
 def gettz(name):
-    tzinfo = None
-    if ZONEINFOFILE:
-        for cachedname, tzinfo in CACHE:
-            if cachedname == name:
-                break
-        else:
-            tf = TarFile.open(ZONEINFOFILE)
-            try:
-                zonefile = tf.extractfile(name)
-            except KeyError:
-                tzinfo = None
-            else:
-                tzinfo = tzfile(zonefile)
-            tf.close()
-            CACHE.insert(0, (name, tzinfo))
-            del CACHE[CACHESIZE:]
-    return tzinfo
-
-def rebuild(filename, tag=None, format="gz"):
-    """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
-
-    filename is the timezone tarball from ftp.iana.org/tz.
+    if len(_CLASS_ZONE_INSTANCE) == 0:
+        _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
+    return _CLASS_ZONE_INSTANCE[0].zones.get(name)
+
+
+def gettz_db_metadata():
+    """ Get the zonefile metadata
+
+    See `zonefile_metadata`_
 
+    :returns: A dictionary with the database metadata
     """
-    import tempfile, shutil
-    tmpdir = tempfile.mkdtemp()
-    zonedir = os.path.join(tmpdir, "zoneinfo")
-    moduledir = os.path.dirname(__file__)
-    if tag: tag = "-"+tag
-    targetname = "zoneinfo%s.tar.%s" % (tag, format)
-    try:
-        tf = TarFile.open(filename)
-        # The "backwards" zone file contains links to other files, so must be
-        # processed as last
-        for name in sorted(tf.getnames(),
-                           key=lambda k: k != "backward" and k or "z"):
-            if not (name.endswith(".sh") or
-                    name.endswith(".tab") or
-                    name == "leapseconds"):
-                tf.extract(name, tmpdir)
-                filepath = os.path.join(tmpdir, name)
-                try:
-                    # zic will return errors for nontz files in the package
-                    # such as the Makefile or README, so check_call cannot
-                    # be used (or at least extra checks would be needed)
-                    call(["zic", "-d", zonedir, filepath])
-                except OSError as e:
-                    if e.errno == 2:
-                        logging.error(
-                            "Could not find zic. Perhaps you need to install "
-                            "libc-bin or some other package that provides it, "
-                            "or it's not in your PATH?")
-                    raise
-        tf.close()
-        target = os.path.join(moduledir, targetname)
-        for entry in os.listdir(moduledir):
-            if entry.startswith("zoneinfo") and ".tar." in entry:
-                os.unlink(os.path.join(moduledir, entry))
-        tf = TarFile.open(target, "w:%s" % format)
-        for entry in os.listdir(zonedir):
-            entrypath = os.path.join(zonedir, entry)
-            tf.add(entrypath, entry)
-        tf.close()
-    finally:
-        shutil.rmtree(tmpdir)
+    if len(_CLASS_ZONE_INSTANCE) == 0:
+        _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
+    return _CLASS_ZONE_INSTANCE[0].metadata
+
+
diff --git a/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..a56bdfa19ab3c1185fe54e281a3401ace6a1a9d3
Binary files /dev/null and b/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ
diff --git a/lib/dateutil/zoneinfo/rebuild.py b/lib/dateutil/zoneinfo/rebuild.py
new file mode 100644
index 0000000000000000000000000000000000000000..e646148c09b5af705bd0932773326f7371b81174
--- /dev/null
+++ b/lib/dateutil/zoneinfo/rebuild.py
@@ -0,0 +1,43 @@
+import logging
+import os
+import tempfile
+import shutil
+import json
+from subprocess import check_call
+
+from dateutil.zoneinfo import tar_open, METADATA_FN, ZONEFILENAME
+
+
+def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
+    """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
+
+    filename is the timezone tarball from ftp.iana.org/tz.
+
+    """
+    tmpdir = tempfile.mkdtemp()
+    zonedir = os.path.join(tmpdir, "zoneinfo")
+    moduledir = os.path.dirname(__file__)
+    try:
+        with tar_open(filename) as tf:
+            for name in zonegroups:
+                tf.extract(name, tmpdir)
+            filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
+            try:
+                check_call(["zic", "-d", zonedir] + filepaths)
+            except OSError as e:
+                if e.errno == 2:
+                    logging.error(
+                        "Could not find zic. Perhaps you need to install "
+                        "libc-bin or some other package that provides it, "
+                        "or it's not in your PATH?")
+                    raise
+        # write metadata file
+        with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
+            json.dump(metadata, f, indent=4, sort_keys=True)
+        target = os.path.join(moduledir, ZONEFILENAME)
+        with tar_open(target, "w:%s" % format) as tf:
+            for entry in os.listdir(zonedir):
+                entrypath = os.path.join(zonedir, entry)
+                tf.add(entrypath, entry)
+    finally:
+        shutil.rmtree(tmpdir)
diff --git a/sickbeard/common.py b/sickbeard/common.py
index f8b67ffdb6125c3e483f764969a18617c6ccd225..a4086734f165d3e6cba0729ad92b4d1b10f44d92 100644
--- a/sickbeard/common.py
+++ b/sickbeard/common.py
@@ -138,6 +138,18 @@ class Quality:
                       HDBLURAY: "720p BluRay",
                       FULLHDBLURAY: "1080p BluRay"}
 
+    sceneQualityStrings = {NONE: "N/A",
+                      UNKNOWN: "Unknown",
+                      SDTV: "HDTV",
+                      SDDVD: "BDRip",
+                      HDTV: "720p HDTV",
+                      RAWHDTV: "1080i HDTV",
+                      FULLHDTV: "1080p HDTV",
+                      HDWEBDL: "720p WEB-DL",
+                      FULLHDWEBDL: "1080p WEB-DL",
+                      HDBLURAY: "720p BluRay",
+                      FULLHDBLURAY: "1080p BluRay"}
+
     combinedQualityStrings = {ANYHDTV: "HDTV",
                               ANYWEBDL: "WEB-DL",
                               ANYBLURAY: "BluRay"}
diff --git a/sickbeard/network_timezones.py b/sickbeard/network_timezones.py
index deb5ebe07bbf54f25b544f2ef7507c218577d119..425e150f71d5c6f966f7ffc98c7fd2efc5a15a0a 100644
--- a/sickbeard/network_timezones.py
+++ b/sickbeard/network_timezones.py
@@ -17,18 +17,14 @@
 # You should have received a copy of the GNU General Public License
 # along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
 
+import re
+import datetime
+import requests
 from dateutil import tz
-from dateutil import zoneinfo
 
 from sickbeard import db
 from sickbeard import helpers
 from sickbeard import logger
-from sickrage.helper.encoding import ek
-from os.path import basename, join, isfile
-import os
-import re
-import datetime
-import requests
 
 # regex to parse time (12/24 hour format)
 time_regex = re.compile(r'(\d{1,2})(([:.](\d{2,2}))? ?([PA][. ]? ?M)|[:.](\d{2,2}))\b', flags=re.IGNORECASE)
@@ -39,121 +35,13 @@ network_dict = None
 
 sb_timezone = tz.tzlocal()
 
-
-# helper to remove failed temp download
-def _remove_zoneinfo_failed(filename):
-    try:
-        ek(os.remove, filename)
-    except:
-        pass
-
-
-# helper to remove old unneeded zoneinfo files
-def _remove_old_zoneinfo():
-    """
-    Removes zoneinfo tar.gz file from repository, as we do not need it
-    """
-    if zoneinfo.ZONEINFOFILE is not None:
-        cur_zoneinfo = ek(basename, zoneinfo.ZONEINFOFILE)
-    else:
-        return
-
-    cur_file = helpers.real_path(ek(join, ek(os.path.dirname, zoneinfo.__file__), cur_zoneinfo))
-    for (path, dirs, files) in ek(os.walk, helpers.real_path(ek(os.path.dirname, zoneinfo.__file__))):
-        for filename in files:
-            if filename.endswith('.tar.gz'):
-                file_w_path = ek(join, path, filename)
-                if file_w_path != cur_file and ek(isfile, file_w_path):
-                    try:
-                        ek(os.remove, file_w_path)
-                        logger.log(u'Delete unneeded old zoneinfo File: %s' % file_w_path)
-                    except:
-                        logger.log(u'Unable to delete: %s' % file_w_path, logger.WARNING)
-
-
-# update the dateutil zoneinfo
-def _update_zoneinfo():
-    """
-    Request new zoneinfo directly from repository
-    """
-    global sb_timezone
-    sb_timezone = tz.tzlocal()
-    url_zv = 'http://sickragetv.github.io/sb_network_timezones/zoneinfo.txt'
-    try:
-        url_data = helpers.getURL(url_zv, session=requests.Session())
-        if not url_data:
-            raise
-
-        # Filename of existing zoneinfo
-        if zoneinfo.ZONEINFOFILE is not None:
-            cur_zoneinfo = ek(basename, zoneinfo.ZONEINFOFILE)
-        else:
-            cur_zoneinfo = None
-
-        # Filename and hash of new zoneinfo
-        (new_zoneinfo, zoneinfo_md5) = url_data.strip().rsplit(u' ')
-    except Exception as e:
-        logger.log(u'Loading zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s' %
-                url_zv, logger.WARNING)
-        return
-
-    if (cur_zoneinfo is not None) and (new_zoneinfo == cur_zoneinfo):
-        return
-
-    # now load the new zoneinfo
-    url_tar = u'http://sickragetv.github.io/sb_network_timezones/%s' % new_zoneinfo
-
-    zonefile = helpers.real_path(ek(join, ek(os.path.dirname, zoneinfo.__file__), new_zoneinfo))
-    zonefile_tmp = re.sub(r'\.tar\.gz$', '.tmp', zonefile)
-
-    if ek(os.path.exists, zonefile_tmp):
-        try:
-            ekk(os.remove, zonefile_tmp)
-        except:
-            logger.log(u'Unable to delete: %s' % zonefile_tmp, logger.WARNING)
-            return
-
-    if not helpers.download_file(url_tar, zonefile_tmp, session=requests.Session()):
-        return
-
-    if not ek(os.path.exists, zonefile_tmp):
-        logger.log(u'Download of %s failed.' % zonefile_tmp, logger.WARNING)
-        return
-
-    new_hash = str(helpers.md5_for_file(zonefile_tmp))
-
-    if zoneinfo_md5.upper() == new_hash.upper():
-        logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo, logger.INFO)
-        try:
-            # remove the old zoneinfo file
-            if cur_zoneinfo is not None:
-                old_file = helpers.real_path(
-                    ek(join, ek(os.path.dirname, zoneinfo.__file__), cur_zoneinfo))
-                if ek(os.path.exists, old_file):
-                    ek(os.remove, old_file)
-            # rename downloaded file
-            ek(os.rename, zonefile_tmp, zonefile)
-            # load the new zoneinfo
-            reload(zoneinfo)
-            sb_timezone = tz.tzlocal()
-        except:
-            _remove_zoneinfo_failed(zonefile_tmp)
-            return
-    else:
-        _remove_zoneinfo_failed(zonefile_tmp)
-        logger.log(u'MD5 hash does not match: %s File: %s' % (zoneinfo_md5.upper(), new_hash.upper()), logger.WARNING)
-        return
-
-
 # update the network timezone table
 def update_network_dict():
     """Update timezone information from SR repositories"""
-    _remove_old_zoneinfo()
-    _update_zoneinfo()
 
     url = 'http://sickragetv.github.io/sb_network_timezones/network_timezones.txt'
     url_data = helpers.getURL(url, session=requests.Session())
-    if url_data is None:
+    if not url_data:
         logger.log(u'Updating network timezones failed, this can happen from time to time. URL: %s' % url, logger.WARNING)
         load_network_dict()
         return
@@ -204,14 +92,15 @@ def load_network_dict():
             update_network_dict()
             cur_network_list = my_db.select('SELECT * FROM network_timezones;')
         d = dict(cur_network_list)
-    except:
+    except Exception:
         d = {}
+    # pylint: disable=W0603
     global network_dict
     network_dict = d
 
 
 # get timezone of a network or return default timezone
-def get_network_timezone(network, network_dict):
+def get_network_timezone(network, _network_dict):
     """
     Get a timezone of a network from a given network dict
 
@@ -223,21 +112,12 @@ def get_network_timezone(network, network_dict):
         return sb_timezone
 
     try:
-        if zoneinfo.ZONEINFOFILE is not None:
-            try:
-                n_t = tz.gettz(network_dict[network])
-            except:
-                return sb_timezone
-
-            if n_t is not None:
-                return n_t
-            else:
-                return sb_timezone
-        else:
-            return sb_timezone
-    except:
+        n_t = tz.gettz(_network_dict[network])
+    except Exception:
         return sb_timezone
 
+    return n_t if n_t is not None else sb_timezone
+
 
 # parse date and time string into local time
 def parse_date_time(d, t, network):
@@ -249,8 +129,10 @@ def parse_date_time(d, t, network):
     :param network: network to use as base
     :return: datetime object containing local time
     """
-    if network_dict is None:
+
+    if not network_dict:
         load_network_dict()
+
     mo = time_regex.search(t)
     if mo is not None and len(mo.groups()) >= 5:
         if mo.group(5) is not None:
@@ -264,14 +146,14 @@ def parse_date_time(d, t, network):
                         hr += 12
                     elif am_regex.search(ap) is not None and hr == 12:
                         hr -= 12
-            except:
+            except Exception:
                 hr = 0
                 m = 0
         else:
             try:
                 hr = helpers.tryInt(mo.group(1))
                 m = helpers.tryInt(mo.group(6))
-            except:
+            except Exception:
                 hr = 0
                 m = 0
     else:
@@ -286,7 +168,7 @@ def parse_date_time(d, t, network):
         foreign_timezone = get_network_timezone(network, network_dict)
         foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
         return foreign_naive
-    except:
+    except Exception:
         return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=sb_timezone)
 
 
diff --git a/sickbeard/postProcessor.py b/sickbeard/postProcessor.py
index 93358b248cb59a47d568df53982620e6beac0f92..f2558b9ecd58f7494d0c9abd32ad422c75419ce1 100644
--- a/sickbeard/postProcessor.py
+++ b/sickbeard/postProcessor.py
@@ -203,7 +203,7 @@ class PostProcessor(object):
                 continue
 
             # Exclude .rar files from associated list
-            if re.search('(^.+\.(rar|r\d+)$)', associated_file_path):
+            if re.search(r'(^.+\.(rar|r\d+)$)', associated_file_path):
                 continue
 
             if ek(os.path.isfile, associated_file_path):
@@ -443,7 +443,7 @@ class PostProcessor(object):
         # search the database for a possible match and return immediately if we find one
         myDB = db.DBConnection()
         for curName in names:
-            search_name = re.sub("[\.\-\ ]", "_", curName)
+            search_name = re.sub(r"[\.\-\ ]", "_", curName)
             sql_results = myDB.select("SELECT * FROM history WHERE resource LIKE ?", [search_name])
 
             if len(sql_results) == 0:
@@ -479,7 +479,7 @@ class PostProcessor(object):
 
         # remember whether it's a proper
         if parse_result.extra_info:
-            self.is_proper = re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', parse_result.extra_info, re.I) != None
+            self.is_proper = re.search(r'(^|[\. _-])(proper|repack)([\. _-]|$)', parse_result.extra_info, re.I) != None
 
         # if the result is complete then remember that for later
         # if the result is complete then set release name
@@ -487,7 +487,7 @@ class PostProcessor(object):
                                          or parse_result.air_date) and parse_result.release_group:
 
             if not self.release_name:
-                self.release_name = helpers.remove_extension(ek(os.path.basename, parse_result.original_name))
+                self.release_name = helpers.remove_non_release_groups(helpers.remove_extension(ek(os.path.basename, parse_result.original_name)))
 
         else:
             logger.log(u"Parse result not sufficient (all following have to be set). will not save release name",
@@ -744,7 +744,7 @@ class PostProcessor(object):
                 return ep_quality
 
         # Try getting quality from the episode (snatched) status
-        if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER:
+        if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER  + common.Quality.SNATCHED_BEST:
             oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)  # @UnusedVariable
             if ep_quality != common.Quality.UNKNOWN:
                 self._log(
diff --git a/sickbeard/properFinder.py b/sickbeard/properFinder.py
index d4caf3718a435a29be253194943d57573208a9e0..b12940d2a6e0197a018bdfeb1ac1c44b9d38f383 100644
--- a/sickbeard/properFinder.py
+++ b/sickbeard/properFinder.py
@@ -17,26 +17,23 @@
 # You should have received a copy of the GNU General Public License
 # along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
 
+import re
 import time
 import datetime
 import operator
 import threading
 import traceback
-import re
-
-from search import pickBestResult
 
 import sickbeard
 
 from sickbeard import db
 from sickbeard import helpers, logger
-from sickbeard import search
-
+from sickbeard.search import snatchEpisode
+from sickbeard.search import pickBestResult
 from sickbeard.common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, Quality, cpu_presets
 from sickrage.helper.exceptions import AuthException, ex
 from sickrage.show.History import History
-
-from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
 
 
 class ProperFinder:
@@ -100,7 +97,7 @@ class ProperFinder:
 
             # if they haven't been added by a different provider than add the proper to the list
             for x in curPropers:
-                if not re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', x.name, re.I):
+                if not re.search(r'(^|[\. _-])(proper|repack)([\. _-]|$)', x.name, re.I):
                     logger.log(u'findPropers returned a non-proper, we have caught and skipped it.', logger.DEBUG)
                     continue
 
@@ -262,7 +259,7 @@ class ProperFinder:
                 result.content = curProper.content
 
                 # snatch it
-                search.snatchEpisode(result, SNATCHED_PROPER)
+                snatchEpisode(result, SNATCHED_PROPER)
                 time.sleep(cpu_presets[sickbeard.CPU_PRESET])
 
     def _genericName(self, name):
diff --git a/sickbeard/providers/generic.py b/sickbeard/providers/generic.py
index dc00696bcc94cfd4378c87502e075db13fc3f3c9..4c2c333f2629cff74589e6bd577f310f4bda3e1e 100644
--- a/sickbeard/providers/generic.py
+++ b/sickbeard/providers/generic.py
@@ -649,7 +649,7 @@ class TorrentProvider(GenericProvider):
             'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
             ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
             ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
-            ' AND e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED]) + ')'
+            ' AND e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST]) + ')'
         )
 
         for sqlshow in sqlResults or []:
diff --git a/sickbeard/providers/hdtorrents.py b/sickbeard/providers/hdtorrents.py
index 619e96a51a48b75380bee8e593a528143b04cea2..eeda3f3726ee5ec984525010b4470dcbd11b03a2 100644
--- a/sickbeard/providers/hdtorrents.py
+++ b/sickbeard/providers/hdtorrents.py
@@ -18,7 +18,7 @@
 # along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
 
 import re
-import urllib
+from six.moves import urllib
 import requests
 from bs4 import BeautifulSoup
 
@@ -96,7 +96,7 @@ class HDTorrentsProvider(generic.TorrentProvider):
             for search_string in search_strings[mode]:
 
                 if mode != 'RSS':
-                    searchURL = self.urls['search'] % (urllib.quote_plus(search_string.replace('.', ' ')), self.categories)
+                    searchURL = self.urls['search'] % (urllib.quote_plus(search_string), self.categories)
                 else:
                     searchURL = self.urls['rss'] % self.categories
 
@@ -118,7 +118,7 @@ class HDTorrentsProvider(generic.TorrentProvider):
                     logger.log(u"Could not find table of torrents mainblockcontenttt", logger.ERROR)
                     continue
 
-                data = data[index:]
+                data = urllib.unquote(data[index:].encode('utf-8')).decode('utf-8')
 
                 html = BeautifulSoup(data, 'html5lib')
                 if not html:
@@ -151,6 +151,7 @@ class HDTorrentsProvider(generic.TorrentProvider):
                             try:
                                 if None is title and cell.get('title') and cell.get('title') in 'Download':
                                     title = re.search('f=(.*).torrent', cell.a['href']).group(1).replace('+', '.')
+                                    title = title.decode('utf-8')
                                     download_url = self.urls['home'] % cell.a['href']
                                     continue
                                 if None is seeders and cell.get('class')[0] and cell.get('class')[0] in 'green' 'yellow' 'red':
diff --git a/sickbeard/providers/thepiratebay.py b/sickbeard/providers/thepiratebay.py
index 81490d0a4abded86f7ad0ca2d1b0ddcd1ef35af5..8586e2e0376c58709d1a0d49deaa9949b8f5ace5 100644
--- a/sickbeard/providers/thepiratebay.py
+++ b/sickbeard/providers/thepiratebay.py
@@ -43,9 +43,9 @@ class ThePirateBayProvider(generic.TorrentProvider):
         self.cache = ThePirateBayCache(self)
 
         self.urls = {
-            'base_url': 'https://thepiratebay.gd/',
-            'search': 'https://thepiratebay.gd/s/',
-            'rss': 'https://thepiratebay.gd/tv/latest'
+            'base_url': 'https://pirateproxy.la/',
+            'search': 'https://pirateproxy.la/s/',
+            'rss': 'https://pirateproxy.la/tv/latest'
         }
 
         self.url = self.urls['base_url']
diff --git a/sickbeard/providers/torrentbytes.py b/sickbeard/providers/torrentbytes.py
index 461009f78beee3017751d6cb898e6441682e757f..48d4c5d5a333eab66582a77f7a1bd1adb67d48c8 100644
--- a/sickbeard/providers/torrentbytes.py
+++ b/sickbeard/providers/torrentbytes.py
@@ -1,4 +1,4 @@
-# Author: Idan Gutman
+# Author: Idan Gutman
 # URL: http://code.google.com/p/sickbeard/
 #
 # This file is part of SickRage.
@@ -39,6 +39,7 @@ class TorrentBytesProvider(generic.TorrentProvider):
         self.ratio = None
         self.minseed = None
         self.minleech = None
+        self.freeleech = False
 
         self.urls = {'base_url': 'https://www.torrentbytes.net',
                      'login': 'https://www.torrentbytes.net/takelogin.php',
@@ -98,22 +99,33 @@ class TorrentBytesProvider(generic.TorrentProvider):
 
                 try:
                     with BS4Parser(data, features=["html5lib", "permissive"]) as html:
-                        torrent_table = html.find('table', attrs={'border': '1'})
-                        torrent_rows = torrent_table.find_all('tr') if torrent_table else []
-
                         #Continue only if one Release is found
-                        if len(torrent_rows) < 2:
+                        empty = html.find('Nothing found!')
+                        if empty:
                             logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                             continue
 
+                        torrent_table = html.find('table', attrs={'border': '1'})
+                        torrent_rows = torrent_table.find_all('tr') if torrent_table else []
+
                         for result in torrent_rows[1:]:
                             cells = result.find_all('td')
-
+                            size = None
                             link = cells[1].find('a', attrs={'class': 'index'})
 
                             full_id = link['href'].replace('details.php?id=', '')
                             torrent_id = full_id.split("&")[0]
 
+                            #Free leech torrents are marked with green [F L] in the title (i.e. <font color=green>[F&nbsp;L]</font>)
+                            freeleechTag = cells[1].find('font', attrs={'color': 'green'})
+                            if freeleechTag and freeleechTag.text == u'[F\xa0L]':
+                                isFreeleechTorrent = True
+                            else:
+                                isFreeleechTorrent = False
+
+                            if self.freeleech and not isFreeleechTorrent:
+                                continue
+
                             try:
                                 if link.has_key('title'):
                                     title = cells[1].find('a', {'class': 'index'})['title']
@@ -121,9 +133,15 @@ class TorrentBytesProvider(generic.TorrentProvider):
                                     title = link.contents[0]
                                 download_url = self.urls['download'] % (torrent_id, link.contents[0])
                                 seeders = int(cells[8].find('span').contents[0])
-                                leechers = int(cells[9].find('span').contents[0])
-                                #FIXME
-                                size = -1
+                                leechers = int(cells[9].find('span').contents[0])       
+                                                         
+                                # Need size for failed downloads handling
+                                if size is None:
+                                    if re.match(r'[0-9]+,?\.?[0-9]*[KkMmGg]+[Bb]+', cells[6].text):
+                                        size = self._convertSize(cells[6].text)
+                                        if not size:
+                                            size = -1
+                               
                             except (AttributeError, TypeError):
                                 continue
 
@@ -154,6 +172,20 @@ class TorrentBytesProvider(generic.TorrentProvider):
 
     def seedRatio(self):
         return self.ratio
+    
+    def _convertSize(self, sizeString):
+        size = sizeString[:-2]
+        modifier = sizeString[-2:]
+        size = float(size)
+        if modifier in 'KB':
+            size = size * 1024
+        elif modifier in 'MB':
+            size = size * 1024**2
+        elif modifier in 'GB':
+            size = size * 1024**3
+        elif modifier in 'TB':
+            size = size * 1024**4
+        return int(size)
 
 
 class TorrentBytesCache(tvcache.TVCache):
diff --git a/sickbeard/subtitles.py b/sickbeard/subtitles.py
index 1b5ab9f938d6d63adbe548b8ffa330ab7dc4b5e0..a9e3000dc46b2f60a44a62c520a2b1ca72b9cb64 100644
--- a/sickbeard/subtitles.py
+++ b/sickbeard/subtitles.py
@@ -140,14 +140,15 @@ def downloadSubtitles(subtitles_info):
                         'opensubtitles': {'username': sickbeard.OPENSUBTITLES_USER, 'password': sickbeard.OPENSUBTITLES_PASS}}
 
     pool = subliminal.api.ProviderPool(providers=providers, provider_configs=provider_configs)
-    subtitles_list = pool.list_subtitles(video, languages)
 
     try:
-        found_subtitles = pool.download_best_subtitles(subtitles_list, video, languages=languages, hearing_impaired=sickbeard.SUBTITLES_HEARING_IMPAIRED, only_one=not sickbeard.SUBTITLES_MULTI)
-        if not found_subtitles:
+        subtitles_list = pool.list_subtitles(video, languages)
+        if not subtitles_list:
             logger.log(u'%s: No subtitles found for S%02dE%02d on any provider' % (subtitles_info['show.indexerid'], subtitles_info['season'], subtitles_info['episode']), logger.DEBUG)
             return (existing_subtitles, None)
 
+        found_subtitles = pool.download_best_subtitles(subtitles_list, video, languages=languages, hearing_impaired=sickbeard.SUBTITLES_HEARING_IMPAIRED, only_one=not sickbeard.SUBTITLES_MULTI)
+
         save_subtitles(video, found_subtitles, directory=subtitles_path, single=not sickbeard.SUBTITLES_MULTI)
 
         if not sickbeard.EMBEDDED_SUBTITLES_ALL and sickbeard.SUBTITLES_EXTRA_SCRIPTS and video_path.endswith(('.mkv', '.mp4')):
@@ -312,7 +313,7 @@ def getEmbeddedLanguages(video_path):
             else:
                 logger.log('MKV has no subtitle track', logger.DEBUG)
     except MalformedMKVError:
-        logger.log('MKV seems to be malformed, ignoring embedded subtitles', logger.INFO)
+        logger.log('MKV seems to be malformed ( %s ), ignoring embedded subtitles' % video_path, logger.INFO)
 
     return embedded_subtitle_languages
 
diff --git a/sickbeard/tv.py b/sickbeard/tv.py
index 9e10b4c32f3db7942ba65f3f45135562c11af91f..ee7229b95b3b9965613789e41af6250223cad603 100644
--- a/sickbeard/tv.py
+++ b/sickbeard/tv.py
@@ -2112,6 +2112,35 @@ class TVEpisode(object):
                 name = helpers.remove_non_release_groups(helpers.remove_extension(name))
             return name
 
+        def release_codec(name):
+            if hasattr(self, 'location') and self.location:
+                codecList = ['xvid', 'x264', 'x265', 'h264', 'x 264', 'x 265', 'h 264', 'x.264', 'x.265', 'h.264', 'divx']
+                found_codec = None
+
+                for codec in codecList:
+                    if codec in name.lower():
+                        found_codec = codec
+
+                if found_codec:
+                    if codecList[0] in found_codec:
+                        found_codec = 'XviD'
+                    elif codecList[1] or codecList[4] or codecList[7] in found_codec:
+                        found_codec = codecList[1]
+                    elif codecList[2] or codecList[5] or codecList[8] in found_codec:
+                        found_codec = codecList[2]
+                    elif codecList[3] or codecList[6] or codecList[9] in found_codec:
+                        found_codec = codecList[3]
+                    elif codecList[10] in found_codec:
+                        found_codec = 'DivX'
+
+                    logger.log(u"Found following codec for " + name + ": " + found_codec, logger.DEBUG)
+                    return " " + found_codec
+                else:
+                    logger.log(u"Couldn't find any codec for " + name + ". Codec information won't be added.", logger.DEBUG)
+                    return ""
+            else:
+                return ""
+
         def release_group(show, name):
             if name:
                 name = helpers.remove_non_release_groups(helpers.remove_extension(name))
@@ -2136,6 +2165,9 @@ class TVEpisode(object):
         else:
             show_name = self.show.name
 
+        # try to get the release encoder to comply with scene naming standards
+        encoder = release_codec(self.release_name)
+
         #try to get the release group
         rel_grp = {};
         rel_grp["SiCKRAGE"] = 'SiCKRAGE';
@@ -2166,6 +2198,9 @@ class TVEpisode(object):
             '%QN': Quality.qualityStrings[epQual],
             '%Q.N': dot(Quality.qualityStrings[epQual]),
             '%Q_N': us(Quality.qualityStrings[epQual]),
+            '%SQN': Quality.sceneQualityStrings[epQual] + encoder,
+            '%SQ.N': dot(Quality.sceneQualityStrings[epQual] + encoder),
+            '%SQ_N': us(Quality.sceneQualityStrings[epQual] + encoder),
             '%S': str(self.season),
             '%0S': '%02d' % self.season,
             '%E': str(self.episode),
@@ -2178,6 +2213,7 @@ class TVEpisode(object):
             '%XAB': '%(#)03d' % {'#': self.scene_absolute_number},
             '%RN': release_name(self.release_name),
             '%RG': rel_grp[relgrp],
+            '%CRG': rel_grp[relgrp].upper(),
             '%AD': str(self.airdate).replace('-', ' '),
             '%A.D': str(self.airdate).replace('-', '.'),
             '%A_D': us(str(self.airdate)),