diff --git a/gui/slick/images/subtitles/legendastv.png b/gui/slick/images/subtitles/legendastv.png
new file mode 100644
index 0000000000000000000000000000000000000000..47cd638b7edf0ae637863a87374c46896d8aa50b
Binary files /dev/null and b/gui/slick/images/subtitles/legendastv.png differ
diff --git a/gui/slick/views/config_subtitles.mako b/gui/slick/views/config_subtitles.mako
index 8711862e4d7852290051a9770d950fb378b64b78..28cf9a36921a08105d4daba25a3cfe1353abfc4f 100644
--- a/gui/slick/views/config_subtitles.mako
+++ b/gui/slick/views/config_subtitles.mako
@@ -34,11 +34,12 @@ $('#subtitles_dir').fileBrowser({ title: 'Select Subtitles Download Directory' }
 
             <div id="config-components">
                 <ul>
-                    <li><a href="#core-component-group4">Subtitles Search</a></li>
+                    <li><a href="#core-component-group1">Subtitles Search</a></li>
                     <li><a href="#core-component-group2">Subtitles Plugin</a></li>
+                    <li><a href="#core-component-group3">Plugin Settings</a></li>
                 </ul>
 
-                <div id="core-component-group4" class="component-group">
+                <div id="core-component-group1" class="component-group">
 
                     <div class="component-group-desc">
                         <h3>Subtitles Search</h3>
@@ -110,6 +111,15 @@ $('#subtitles_dir').fileBrowser({ title: 'Select Subtitles Download Directory' }
                                         </span>
                                     </label>
                                 </div>
+                                <div class="field-pair">
+                                    <label class="clearfix" for="subtitles_hearing_impaired">
+                                        <span class="component-title">Hearing Impaired Subtitles</span>
+                                        <span class="component-desc">
+                                            <input type="checkbox" name="subtitles_hearing_impaired" id="subtitles_hearing_impaired" ${('', 'checked="checked"')[bool(sickbeard.SUBTITLES_HEARING_IMPAIRED)]}/>
+                                            <p>Download hearing impaired style subtitles?</p>
+                                        </span>
+                                    </label>
+                                </div>
                                 <div class="field-pair">
                                     <label class="nocheck">
                                         <span class="component-title">Extra Scripts</span>
@@ -168,9 +178,42 @@ $('#subtitles_dir').fileBrowser({ title: 'Select Subtitles Download Directory' }
                         <br/><input type="submit" class="btn config_submitter" value="Save Changes" /><br/>
                     </fieldset>
                 </div><!-- /component-group2 //-->
+                <div id="core-component-group3" class="component-group">
+                    <div class="component-group-desc">
+                        <h3>Subtitle Settings</h3>
+                        <p>Set user and password for each provider</p>
+                    </div><!-- /component-group-desc //-->
 
-                <br/><input type="submit" class="btn config_submitter" value="Save Changes" /><br/>
-
+                    <fieldset class="component-group-list" style="margin-left: 50px; margin-top:36px">
+                        <%
+                            providerLoginDict = {
+                                'legendastv': {'user': sickbeard.LEGENDASTV_USER, 'pass': sickbeard.LEGENDASTV_PASS},
+                                'addic7ed': {'user': sickbeard.ADDIC7ED_USER, 'pass': sickbeard.ADDIC7ED_PASS},
+                                'opensubtitles': {'user': sickbeard.OPENSUBTITLES_USER, 'pass': sickbeard.OPENSUBTITLES_PASS}}
+                        %>
+                        % for curService in sickbeard.subtitles.sortedServiceList():
+                            % if curService['name'] not in providerLoginDict.keys():
+                                <% continue %>
+                            % endif
+                            ##<div class="field-pair${(' hidden', '')[curService['enabled']]}"> ## Need js to show/hide on save
+                            <div class="field-pair">
+                                <label class="nocheck" for="${curService['name']}_user">
+                                    <span class="component-title">${curService['name'].capitalize()} User Name</span>
+                                    <span class="component-desc">
+                                        <input type="text" name="${curService['name']}_user" id="${curService['name']}_user" value="${providerLoginDict[curService['name']]['user']}" class="form-control input-sm input300" />
+                                    </span>
+                                </label>
+                                <label class="nocheck" for="${curService['name']}_pass">
+                                    <span class="component-title">${curService['name'].capitalize()} Password</span>
+                                    <span class="component-desc">
+                                        <input type="password" name="${curService['name']}_pass" id="${curService['name']}_pass" value="${providerLoginDict[curService['name']]['pass']}" class="form-control input-sm input300" />
+                                    </span>
+                                </label>
+                            </div>
+                        % endfor
+                        <br/><input type="submit" class="btn config_submitter" value="Save Changes" /><br/>
+                    </fieldset>
+                </div><!-- /component-group3 //-->
             </div><!-- /config-components //-->
 
 </form>
diff --git a/gui/slick/views/schedule.mako b/gui/slick/views/schedule.mako
index c1b03a6e10ed2ba170e837b2bdfd3473c4745a05..6f6e69472c5d8f926f8d42951d3be3e21af67356 100644
--- a/gui/slick/views/schedule.mako
+++ b/gui/slick/views/schedule.mako
@@ -207,6 +207,12 @@
         continue
 
     run_time = cur_result['runtime']
+    cur_ep_airdate = cur_result['localtime'].date()
+
+    if run_time:
+        cur_ep_enddate = cur_result['localtime'] + datetime.timedelta(minutes = run_time)
+    else:
+        cur_ep_enddate = cur_result['localtime']
 %>
     % if 'network' == sort:
         <% show_network = ('no network', cur_result['network'])[bool(cur_result['network'])] %>
@@ -216,44 +222,36 @@
 
             <% cur_segment = cur_result['network'] %>
         % endif
-        <% cur_ep_airdate = cur_result['localtime'].date() %>
-
-        % if run_time:
-            <% cur_ep_enddate = cur_result['localtime'] + datetime.timedelta(minutes = run_time) %>
-            % if cur_ep_enddate < today:
-                <% show_div = 'ep_listing listing-overdue' %>
-            % elif cur_ep_airdate >= next_week.date():
-                <% show_div = 'ep_listing listing-toofar' %>
-            % elif cur_ep_enddate >= today and cur_ep_airdate < next_week.date():
-                % if cur_ep_airdate == today.date():
-                    <% show_div = 'ep_listing listing-current' %>
-                % else:
-                    <% show_div = 'ep_listing listing-default' %>
-                % endif
+
+        % if cur_ep_enddate < today:
+            <% show_div = 'ep_listing listing-overdue' %>
+        % elif cur_ep_airdate >= next_week.date():
+            <% show_div = 'ep_listing listing-toofar' %>
+        % elif cur_ep_enddate >= today and cur_ep_airdate < next_week.date():
+            % if cur_ep_airdate == today.date():
+                <% show_div = 'ep_listing listing-current' %>
+            % else:
+                <% show_div = 'ep_listing listing-default' %>
             % endif
         % endif
-    % elif 'date' == sort:
-        <% cur_ep_airdate = cur_result['localtime'].date() %>
 
+    % elif 'date' == sort:
         % if cur_segment != cur_ep_airdate:
-            %if run_time:
-                <% cur_ep_enddate = cur_result['localtime'] + datetime.timedelta(minutes = run_time) %>
-                % if cur_ep_enddate < today and cur_ep_airdate != today.date() and not missed_header:
-                        <br /><h2 class="day">Missed</h2>
+            % if cur_ep_enddate < today and cur_ep_airdate != today.date() and not missed_header:
+                <br /><h2 class="day">Missed</h2>
                 <% missed_header = True %>
-                % elif cur_ep_airdate >= next_week.date() and not too_late_header:
-                        <br /><h2 class="day">Later</h2>
+            % elif cur_ep_airdate >= next_week.date() and not too_late_header:
+                <br /><h2 class="day">Later</h2>
                 <% too_late_header = True %>
-                % elif cur_ep_enddate >= today and cur_ep_airdate < next_week.date():
-                    % if cur_ep_airdate == today.date():
-                        <br /><h2 class="day">${datetime.date.fromordinal(cur_ep_airdate.toordinal()).strftime('%A').decode(sickbeard.SYS_ENCODING).capitalize()}<span style="font-size: 14px; vertical-align: top;">[Today]</span></h2>
-                        <% today_header = True %>
-                    % else:
-                        <br /><h2 class="day">${datetime.date.fromordinal(cur_ep_airdate.toordinal()).strftime('%A').decode(sickbeard.SYS_ENCODING).capitalize()}</h2>
-                    % endif
+            % elif cur_ep_enddate >= today and cur_ep_airdate < next_week.date():
+                % if cur_ep_airdate == today.date():
+                    <br /><h2 class="day">${datetime.date.fromordinal(cur_ep_airdate.toordinal()).strftime('%A').decode(sickbeard.SYS_ENCODING).capitalize()}<span style="font-size: 14px; vertical-align: top;">[Today]</span></h2>
+                    <% today_header = True %>
+                % else:
+                    <br /><h2 class="day">${datetime.date.fromordinal(cur_ep_airdate.toordinal()).strftime('%A').decode(sickbeard.SYS_ENCODING).capitalize()}</h2>
                 % endif
             % endif
-                <% cur_segment = cur_ep_airdate %>
+            <% cur_segment = cur_ep_airdate %>
         % endif
 
         % if cur_ep_airdate == today.date() and not today_header:
@@ -261,38 +259,32 @@
             <br /><h2 class="day">${datetime.date.fromordinal(cur_ep_airdate.toordinal()).strftime('%A').decode(sickbeard.SYS_ENCODING).capitalize()} <span style="font-size: 14px; vertical-align: top;">[Today]</span></h2>
             <% today_header = True %>
         % endif
-        % if run_time:
-            % if cur_ep_enddate < today:
-                <% show_div = 'ep_listing listing-overdue' %>
-            % elif cur_ep_airdate >= next_week.date():
-                <% show_div = 'ep_listing listing-toofar' %>
-            % elif cur_ep_enddate >= today and cur_ep_airdate < next_week.date():
-                % if cur_ep_airdate == today.date():
-                    <% show_div = 'ep_listing listing-current' %>
-                % else:
-                    <% show_div = 'ep_listing listing-default'%>
-                % endif
+
+        % if cur_ep_enddate < today:
+            <% show_div = 'ep_listing listing-overdue' %>
+        % elif cur_ep_airdate >= next_week.date():
+            <% show_div = 'ep_listing listing-toofar' %>
+        % elif cur_ep_enddate >= today and cur_ep_airdate < next_week.date():
+            % if cur_ep_airdate == today.date():
+                <% show_div = 'ep_listing listing-current' %>
+            % else:
+                <% show_div = 'ep_listing listing-default'%>
             % endif
         % endif
 
-        % elif 'show' == sort:
-            <% cur_ep_airdate = cur_result['localtime'].date() %>
-
-            % if run_time:
-                <% cur_ep_enddate = cur_result['localtime'] + datetime.timedelta(minutes = run_time) %>
-                % if cur_ep_enddate < today:
-                    <% show_div = 'ep_listing listing-overdue listingradius' %>
-                % elif cur_ep_airdate >= next_week.date():
-                    <% show_div = 'ep_listing listing-toofar listingradius' %>
-                % elif cur_ep_enddate >= today and cur_ep_airdate < next_week.date():
-                    % if cur_ep_airdate == today.date():
-                        <% show_div = 'ep_listing listing-current listingradius' %>
-                    % else:
-                        <% show_div = 'ep_listing listing-default listingradius' %>
-                    % endif
-                % endif
+    % elif 'show' == sort:
+        % if cur_ep_enddate < today:
+            <% show_div = 'ep_listing listing-overdue listingradius' %>
+        % elif cur_ep_airdate >= next_week.date():
+            <% show_div = 'ep_listing listing-toofar listingradius' %>
+        % elif cur_ep_enddate >= today and cur_ep_airdate < next_week.date():
+            % if cur_ep_airdate == today.date():
+                <% show_div = 'ep_listing listing-current listingradius' %>
+            % else:
+                <% show_div = 'ep_listing listing-default listingradius' %>
             % endif
         % endif
+    % endif
 
 <div class="${show_div}" id="listing-${cur_result['showid']}">
     <div class="tvshowDiv">
diff --git a/lib/rarfile/__init__.py b/lib/rarfile/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3db5840ba69eb2a266c58705e4db6ca03a2ccabb
--- /dev/null
+++ b/lib/rarfile/__init__.py
@@ -0,0 +1,1985 @@
+# rarfile.py
+#
+# Copyright (c) 2005-2014  Marko Kreen <markokr@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+r"""RAR archive reader.
+
+This is Python module for Rar archive reading.  The interface
+is made as :mod:`zipfile`-like as possible.
+
+Basic logic:
+ - Parse archive structure with Python.
+ - Extract non-compressed files with Python
+ - Extract compressed files with unrar.
+ - Optionally write compressed data to temp file to speed up unrar,
+   otherwise it needs to scan whole archive on each execution.
+
+Example::
+
+    import rarfile
+
+    rf = rarfile.RarFile('myarchive.rar')
+    for f in rf.infolist():
+        print f.filename, f.file_size
+        if f.filename == 'README':
+            print(rf.read(f))
+
+Archive files can also be accessed via file-like object returned
+by :meth:`RarFile.open`::
+
+    import rarfile
+
+    with rarfile.RarFile('archive.rar') as rf:
+        with rf.open('README') as f:
+            for ln in f:
+                print(ln.strip())
+
+There are few module-level parameters to tune behaviour,
+here they are with defaults, and reason to change it::
+
+    import rarfile
+
+    # Set to full path of unrar.exe if it is not in PATH
+    rarfile.UNRAR_TOOL = "unrar"
+
+    # Set to 0 if you don't look at comments and want to
+    # avoid wasting time for parsing them
+    rarfile.NEED_COMMENTS = 1
+
+    # Set up to 1 if you don't want to deal with decoding comments
+    # from unknown encoding.  rarfile will try couple of common
+    # encodings in sequence.
+    rarfile.UNICODE_COMMENTS = 0
+
+    # Set to 1 if you prefer timestamps to be datetime objects
+    # instead tuples
+    rarfile.USE_DATETIME = 0
+
+    # Set to '/' to be more compatible with zipfile
+    rarfile.PATH_SEP = '\\'
+
+For more details, refer to source.
+
+"""
+
+__version__ = '2.7'
+
+# export only interesting items
+__all__ = ['is_rarfile', 'RarInfo', 'RarFile', 'RarExtFile']
+
+##
+## Imports and compat - support both Python 2.x and 3.x
+##
+
+import sys, os, struct, errno
+from struct import pack, unpack
+from binascii import crc32
+from tempfile import mkstemp
+from subprocess import Popen, PIPE, STDOUT
+from datetime import datetime
+
+# only needed for encryped headers
+try:
+    from Crypto.Cipher import AES
+    try:
+        from hashlib import sha1
+    except ImportError:
+        from sha import new as sha1
+    _have_crypto = 1
+except ImportError:
+    _have_crypto = 0
+
+# compat with 2.x
+if sys.hexversion < 0x3000000:
+    # prefer 3.x behaviour
+    range = xrange
+    # py2.6 has broken bytes()
+    def bytes(s, enc):
+        return str(s)
+else:
+    unicode = str
+
+# see if compat bytearray() is needed
+try:
+    bytearray
+except NameError:
+    import array
+    class bytearray:
+        def __init__(self, val = ''):
+            self.arr = array.array('B', val)
+            self.append = self.arr.append
+            self.__getitem__ = self.arr.__getitem__
+            self.__len__ = self.arr.__len__
+        def decode(self, *args):
+            return self.arr.tostring().decode(*args)
+
+# Optimized .readinto() requires memoryview
+try:
+    memoryview
+    have_memoryview = 1
+except NameError:
+    have_memoryview = 0
+
+# Struct() for older python
+try:
+    from struct import Struct
+except ImportError:
+    class Struct:
+        def __init__(self, fmt):
+            self.format = fmt
+            self.size = struct.calcsize(fmt)
+        def unpack(self, buf):
+            return unpack(self.format, buf)
+        def unpack_from(self, buf, ofs = 0):
+            return unpack(self.format, buf[ofs : ofs + self.size])
+        def pack(self, *args):
+            return pack(self.format, *args)
+
+# file object superclass
+try:
+    from io import RawIOBase
+except ImportError:
+    class RawIOBase(object):
+        def close(self):
+            pass
+
+
+##
+## Module configuration.  Can be tuned after importing.
+##
+
+#: default fallback charset
+DEFAULT_CHARSET = "windows-1252"
+
+#: list of encodings to try, with fallback to DEFAULT_CHARSET if none succeed
+TRY_ENCODINGS = ('utf8', 'utf-16le')
+
+#: 'unrar', 'rar' or full path to either one
+UNRAR_TOOL = "unrar"
+
+#: Command line args to use for opening file for reading.
+OPEN_ARGS = ('p', '-inul')
+
+#: Command line args to use for extracting file to disk.
+EXTRACT_ARGS = ('x', '-y', '-idq')
+
+#: args for testrar()
+TEST_ARGS = ('t', '-idq')
+
+#
+# Allow use of tool that is not compatible with unrar.
+#
+# By default use 'bsdtar' which is 'tar' program that
+# sits on top of libarchive.
+#
+# Problems with libarchive RAR backend:
+# - Does not support solid archives.
+# - Does not support password-protected archives.
+#
+
+ALT_TOOL = 'bsdtar'
+ALT_OPEN_ARGS = ('-x', '--to-stdout', '-f')
+ALT_EXTRACT_ARGS = ('-x', '-f')
+ALT_TEST_ARGS = ('-t', '-f')
+ALT_CHECK_ARGS = ('--help',)
+
+#: whether to speed up decompression by using tmp archive
+USE_EXTRACT_HACK = 1
+
+#: limit the filesize for tmp archive usage
+HACK_SIZE_LIMIT = 20*1024*1024
+
+#: whether to parse file/archive comments.
+NEED_COMMENTS = 1
+
+#: whether to convert comments to unicode strings
+UNICODE_COMMENTS = 0
+
+#: Convert RAR time tuple into datetime() object
+USE_DATETIME = 0
+
+#: Separator for path name components.  RAR internally uses '\\'.
+#: Use '/' to be similar with zipfile.
+PATH_SEP = '\\'
+
+##
+## rar constants
+##
+
+# block types
+RAR_BLOCK_MARK          = 0x72 # r
+RAR_BLOCK_MAIN          = 0x73 # s
+RAR_BLOCK_FILE          = 0x74 # t
+RAR_BLOCK_OLD_COMMENT   = 0x75 # u
+RAR_BLOCK_OLD_EXTRA     = 0x76 # v
+RAR_BLOCK_OLD_SUB       = 0x77 # w
+RAR_BLOCK_OLD_RECOVERY  = 0x78 # x
+RAR_BLOCK_OLD_AUTH      = 0x79 # y
+RAR_BLOCK_SUB           = 0x7a # z
+RAR_BLOCK_ENDARC        = 0x7b # {
+
+# flags for RAR_BLOCK_MAIN
+RAR_MAIN_VOLUME         = 0x0001
+RAR_MAIN_COMMENT        = 0x0002
+RAR_MAIN_LOCK           = 0x0004
+RAR_MAIN_SOLID          = 0x0008
+RAR_MAIN_NEWNUMBERING   = 0x0010
+RAR_MAIN_AUTH           = 0x0020
+RAR_MAIN_RECOVERY       = 0x0040
+RAR_MAIN_PASSWORD       = 0x0080
+RAR_MAIN_FIRSTVOLUME    = 0x0100
+RAR_MAIN_ENCRYPTVER     = 0x0200
+
+# flags for RAR_BLOCK_FILE
+RAR_FILE_SPLIT_BEFORE   = 0x0001
+RAR_FILE_SPLIT_AFTER    = 0x0002
+RAR_FILE_PASSWORD       = 0x0004
+RAR_FILE_COMMENT        = 0x0008
+RAR_FILE_SOLID          = 0x0010
+RAR_FILE_DICTMASK       = 0x00e0
+RAR_FILE_DICT64         = 0x0000
+RAR_FILE_DICT128        = 0x0020
+RAR_FILE_DICT256        = 0x0040
+RAR_FILE_DICT512        = 0x0060
+RAR_FILE_DICT1024       = 0x0080
+RAR_FILE_DICT2048       = 0x00a0
+RAR_FILE_DICT4096       = 0x00c0
+RAR_FILE_DIRECTORY      = 0x00e0
+RAR_FILE_LARGE          = 0x0100
+RAR_FILE_UNICODE        = 0x0200
+RAR_FILE_SALT           = 0x0400
+RAR_FILE_VERSION        = 0x0800
+RAR_FILE_EXTTIME        = 0x1000
+RAR_FILE_EXTFLAGS       = 0x2000
+
+# flags for RAR_BLOCK_ENDARC
+RAR_ENDARC_NEXT_VOLUME  = 0x0001
+RAR_ENDARC_DATACRC      = 0x0002
+RAR_ENDARC_REVSPACE     = 0x0004
+RAR_ENDARC_VOLNR        = 0x0008
+
+# flags common to all blocks
+RAR_SKIP_IF_UNKNOWN     = 0x4000
+RAR_LONG_BLOCK          = 0x8000
+
+# Host OS types
+RAR_OS_MSDOS = 0
+RAR_OS_OS2   = 1
+RAR_OS_WIN32 = 2
+RAR_OS_UNIX  = 3
+RAR_OS_MACOS = 4
+RAR_OS_BEOS  = 5
+
+# Compression methods - '0'..'5'
+RAR_M0 = 0x30
+RAR_M1 = 0x31
+RAR_M2 = 0x32
+RAR_M3 = 0x33
+RAR_M4 = 0x34
+RAR_M5 = 0x35
+
+##
+## internal constants
+##
+
+RAR_ID = bytes("Rar!\x1a\x07\x00", 'ascii')
+ZERO = bytes("\0", 'ascii')
+EMPTY = bytes("", 'ascii')
+
+S_BLK_HDR = Struct('<HBHH')
+S_FILE_HDR = Struct('<LLBLLBBHL')
+S_LONG = Struct('<L')
+S_SHORT = Struct('<H')
+S_BYTE = Struct('<B')
+S_COMMENT_HDR = Struct('<HBBH')
+
+##
+## Public interface
+##
+
+class Error(Exception):
+    """Base class for rarfile errors."""
+class BadRarFile(Error):
+    """Incorrect data in archive."""
+class NotRarFile(Error):
+    """The file is not RAR archive."""
+class BadRarName(Error):
+    """Cannot guess multipart name components."""
+class NoRarEntry(Error):
+    """File not found in RAR"""
+class PasswordRequired(Error):
+    """File requires password"""
+class NeedFirstVolume(Error):
+    """Need to start from first volume."""
+class NoCrypto(Error):
+    """Cannot parse encrypted headers - no crypto available."""
+class RarExecError(Error):
+    """Problem reported by unrar/rar."""
+class RarWarning(RarExecError):
+    """Non-fatal error"""
+class RarFatalError(RarExecError):
+    """Fatal error"""
+class RarCRCError(RarExecError):
+    """CRC error during unpacking"""
+class RarLockedArchiveError(RarExecError):
+    """Must not modify locked archive"""
+class RarWriteError(RarExecError):
+    """Write error"""
+class RarOpenError(RarExecError):
+    """Open error"""
+class RarUserError(RarExecError):
+    """User error"""
+class RarMemoryError(RarExecError):
+    """Memory error"""
+class RarCreateError(RarExecError):
+    """Create error"""
+class RarNoFilesError(RarExecError):
+    """No files that match pattern were found"""
+class RarUserBreak(RarExecError):
+    """User stop"""
+class RarUnknownError(RarExecError):
+    """Unknown exit code"""
+class RarSignalExit(RarExecError):
+    """Unrar exited with signal"""
+class RarCannotExec(RarExecError):
+    """Executable not found."""
+
+
+def is_rarfile(xfile):
+    '''Check quickly whether file is rar archive.'''
+    fd = XFile(xfile)
+    buf = fd.read(len(RAR_ID))
+    fd.close()
+    return buf == RAR_ID
+
+
+class RarInfo(object):
+    r'''An entry in rar archive.
+
+    :mod:`zipfile`-compatible fields:
+    
+        filename
+            File name with relative path.
+            Default path separator is '\\', to change set rarfile.PATH_SEP.
+            Always unicode string.
+        date_time
+            Modification time, tuple of (year, month, day, hour, minute, second).
+            Or datetime() object if USE_DATETIME is set.
+        file_size
+            Uncompressed size.
+        compress_size
+            Compressed size.
+        CRC
+            CRC-32 of uncompressed file, unsigned int.
+        comment
+            File comment.  Byte string or None.  Use UNICODE_COMMENTS
+            to get automatic decoding to unicode.
+        volume
+            Volume nr, starting from 0.
+
+    RAR-specific fields:
+
+        compress_type
+            Compression method: 0x30 - 0x35.
+        extract_version
+            Minimal Rar version needed for decompressing.
+        host_os
+            Host OS type, one of RAR_OS_* constants.
+        mode
+            File attributes. May be either dos-style or unix-style, depending on host_os.
+        volume_file
+            Volume file name, where file starts.
+        mtime
+            Optional time field: Modification time, with float seconds.
+            Same as .date_time but with more precision.
+        ctime
+            Optional time field: creation time, with float seconds.
+        atime
+            Optional time field: last access time, with float seconds.
+        arctime
+            Optional time field: archival time, with float seconds.
+
+    Internal fields:
+
+        type
+            One of RAR_BLOCK_* types.  Only entries with type==RAR_BLOCK_FILE are shown in .infolist().
+        flags
+            For files, RAR_FILE_* bits.
+    '''
+
+    __slots__ = (
+        # zipfile-compatible fields
+        'filename',
+        'file_size',
+        'compress_size',
+        'date_time',
+        'comment',
+        'CRC',
+        'volume',
+        'orig_filename', # bytes in unknown encoding
+
+        # rar-specific fields
+        'extract_version',
+        'compress_type',
+        'host_os',
+        'mode',
+        'type',
+        'flags',
+
+        # optional extended time fields
+        # tuple where the sec is float, or datetime().
+        'mtime', # same as .date_time
+        'ctime',
+        'atime',
+        'arctime',
+
+        # RAR internals
+        'name_size',
+        'header_size',
+        'header_crc',
+        'file_offset',
+        'add_size',
+        'header_data',
+        'header_base',
+        'header_offset',
+        'salt',
+        'volume_file',
+    )
+
+    def isdir(self):
+        '''Returns True if the entry is a directory.'''
+        if self.type == RAR_BLOCK_FILE:
+            return (self.flags & RAR_FILE_DIRECTORY) == RAR_FILE_DIRECTORY
+        return False
+
+    def needs_password(self):
+        return self.flags & RAR_FILE_PASSWORD
+
+
+class RarFile(object):
+    '''Parse RAR structure, provide access to files in archive.
+    '''
+
+    #: Archive comment.  Byte string or None.  Use :data:`UNICODE_COMMENTS`
+    #: to get automatic decoding to unicode.
+    comment = None
+
+    def __init__(self, rarfile, mode="r", charset=None, info_callback=None,
+                 crc_check = True, errors = "stop"):
+        """Open and parse a RAR archive.
+        
+        Parameters:
+
+            rarfile
+                archive file name
+            mode
+                only 'r' is supported.
+            charset
+                fallback charset to use, if filenames are not already Unicode-enabled.
+            info_callback
+                debug callback, gets to see all archive entries.
+            crc_check
+                set to False to disable CRC checks
+            errors
+                Either "stop" to quietly stop parsing on errors,
+                or "strict" to raise errors.  Default is "stop".
+        """
+        self.rarfile = rarfile
+        self.comment = None
+        self._charset = charset or DEFAULT_CHARSET
+        self._info_callback = info_callback
+
+        self._info_list = []
+        self._info_map = {}
+        self._needs_password = False
+        self._password = None
+        self._crc_check = crc_check
+        self._vol_list = []
+
+        if errors == "stop":
+            self._strict = False
+        elif errors == "strict":
+            self._strict = True
+        else:
+            raise ValueError("Invalid value for 'errors' parameter.")
+
+        self._main = None
+
+        if mode != "r":
+            raise NotImplementedError("RarFile supports only mode=r")
+
+        self._parse()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.close()
+
+    def setpassword(self, password):
+        '''Sets the password to use when extracting.'''
+        self._password = password
+        if not self._main:
+            self._parse()
+
+    def needs_password(self):
+        '''Returns True if any archive entries require password for extraction.'''
+        return self._needs_password
+
+    def namelist(self):
+        '''Return list of filenames in archive.'''
+        return [f.filename for f in self._info_list]
+
+    def infolist(self):
+        '''Return RarInfo objects for all files/directories in archive.'''
+        return self._info_list
+
+    def volumelist(self):
+        '''Returns filenames of archive volumes.
+
+        In case of single-volume archive, the list contains
+        just the name of main archive file.
+        '''
+        return self._vol_list
+
+    def getinfo(self, fname):
+        '''Return RarInfo for file.'''
+
+        if isinstance(fname, RarInfo):
+            return fname
+
+        # accept both ways here
+        if PATH_SEP == '/':
+            fname2 = fname.replace("\\", "/")
+        else:
+            fname2 = fname.replace("/", "\\")
+
+        try:
+            return self._info_map[fname]
+        except KeyError:
+            try:
+                return self._info_map[fname2]
+            except KeyError:
+                raise NoRarEntry("No such file: "+fname)
+
+    def open(self, fname, mode = 'r', psw = None):
+        '''Returns file-like object (:class:`RarExtFile`),
+        from where the data can be read.
+        
+        The object implements :class:`io.RawIOBase` interface, so it can
+        be further wrapped with :class:`io.BufferedReader`
+        and :class:`io.TextIOWrapper`.
+
+        On older Python where io module is not available, it implements
+        only .read(), .seek(), .tell() and .close() methods.
+
+        The object is seekable, although the seeking is fast only on
+        uncompressed files, on compressed files the seeking is implemented
+        by reading ahead and/or restarting the decompression.
+
+        Parameters:
+
+            fname
+                file name or RarInfo instance.
+            mode
+                must be 'r'
+            psw
+                password to use for extracting.
+        '''
+
+        if mode != 'r':
+            raise NotImplementedError("RarFile.open() supports only mode=r")
+
+        # entry lookup
+        inf = self.getinfo(fname)
+        if inf.isdir():
+            raise TypeError("Directory does not have any data: " + inf.filename)
+
+        if inf.flags & RAR_FILE_SPLIT_BEFORE:
+            raise NeedFirstVolume("Partial file, please start from first volume: " + inf.filename)
+
+        # check password
+        if inf.needs_password():
+            psw = psw or self._password
+            if psw is None:
+                raise PasswordRequired("File %s requires password" % inf.filename)
+        else:
+            psw = None
+
+        # is temp write usable?
+        use_hack = 1
+        if not self._main:
+            use_hack = 0
+        elif self._main.flags & (RAR_MAIN_SOLID | RAR_MAIN_PASSWORD):
+            use_hack = 0
+        elif inf.flags & (RAR_FILE_SPLIT_BEFORE | RAR_FILE_SPLIT_AFTER):
+            use_hack = 0
+        elif is_filelike(self.rarfile):
+            pass
+        elif inf.file_size > HACK_SIZE_LIMIT:
+            use_hack = 0
+        elif not USE_EXTRACT_HACK:
+            use_hack = 0
+
+        # now extract
+        if inf.compress_type == RAR_M0 and (inf.flags & RAR_FILE_PASSWORD) == 0:
+            return self._open_clear(inf)
+        elif use_hack:
+            return self._open_hack(inf, psw)
+        else:
+            return self._open_unrar(self.rarfile, inf, psw)
+
+    def read(self, fname, psw = None):
+        """Return uncompressed data for archive entry.
+        
+        For longer files using :meth:`RarFile.open` may be better idea.
+
+        Parameters:
+
+            fname
+                filename or RarInfo instance
+            psw
+                password to use for extracting.
+        """
+
+        f = self.open(fname, 'r', psw)
+        try:
+            return f.read()
+        finally:
+            f.close()
+
+    def close(self):
+        """Release open resources."""
+        pass
+
+    def printdir(self):
+        """Print archive file list to stdout."""
+        for f in self._info_list:
+            print(f.filename)
+
+    def extract(self, member, path=None, pwd=None):
+        """Extract single file into current directory.
+        
+        Parameters:
+
+            member
+                filename or :class:`RarInfo` instance
+            path
+                optional destination path
+            pwd
+                optional password to use
+        """
+        if isinstance(member, RarInfo):
+            fname = member.filename
+        else:
+            fname = member
+        self._extract([fname], path, pwd)
+
+    def extractall(self, path=None, members=None, pwd=None):
+        """Extract all files into current directory.
+        
+        Parameters:
+
+            path
+                optional destination path
+            members
+                optional filename or :class:`RarInfo` instance list to extract
+            pwd
+                optional password to use
+        """
+        fnlist = []
+        if members is not None:
+            for m in members:
+                if isinstance(m, RarInfo):
+                    fnlist.append(m.filename)
+                else:
+                    fnlist.append(m)
+        self._extract(fnlist, path, pwd)
+
+    def testrar(self):
+        """Let 'unrar' test the archive.
+        """
+        cmd = [UNRAR_TOOL] + list(TEST_ARGS)
+        add_password_arg(cmd, self._password)
+        cmd.append(self.rarfile)
+        p = custom_popen(cmd)
+        output = p.communicate()[0]
+        check_returncode(p, output)
+
+    def strerror(self):
+        """Return error string if parsing failed,
+        or None if no problems.
+        """
+        return self._parse_error
+
+    ##
+    ## private methods
+    ##
+
+    def _set_error(self, msg, *args):
+        if args:
+            msg = msg % args
+        self._parse_error = msg
+        if self._strict:
+            raise BadRarFile(msg)
+
+    # store entry
+    def _process_entry(self, item):
+        if item.type == RAR_BLOCK_FILE:
+            # use only first part
+            if (item.flags & RAR_FILE_SPLIT_BEFORE) == 0:
+                self._info_map[item.filename] = item
+                self._info_list.append(item)
+                # remember if any items require password
+                if item.needs_password():
+                    self._needs_password = True
+            elif len(self._info_list) > 0:
+                # final crc is in last block
+                old = self._info_list[-1]
+                old.CRC = item.CRC
+                old.compress_size += item.compress_size
+
+        # parse new-style comment
+        if item.type == RAR_BLOCK_SUB and item.filename == 'CMT':
+            if not NEED_COMMENTS:
+                pass
+            elif item.flags & (RAR_FILE_SPLIT_BEFORE | RAR_FILE_SPLIT_AFTER):
+                pass
+            elif item.flags & RAR_FILE_SOLID:
+                # file comment
+                cmt = self._read_comment_v3(item, self._password)
+                if len(self._info_list) > 0:
+                    old = self._info_list[-1]
+                    old.comment = cmt
+            else:
+                # archive comment
+                cmt = self._read_comment_v3(item, self._password)
+                self.comment = cmt
+
+        if self._info_callback:
+            self._info_callback(item)
+
+    # read rar
+    def _parse(self):
+        self._fd = None
+        try:
+            self._parse_real()
+        finally:
+            if self._fd:
+                self._fd.close()
+                self._fd = None
+
+    def _parse_real(self):
+        fd = XFile(self.rarfile)
+        self._fd = fd
+        id = fd.read(len(RAR_ID))
+        if id != RAR_ID:
+            raise NotRarFile("Not a Rar archive: "+self.rarfile)
+
+        volume = 0  # first vol (.rar) is 0
+        more_vols = 0
+        endarc = 0
+        volfile = self.rarfile
+        self._vol_list = [self.rarfile]
+        while 1:
+            if endarc:
+                h = None    # don't read past ENDARC
+            else:
+                h = self._parse_header(fd)
+            if not h:
+                if more_vols:
+                    volume += 1
+                    fd.close()
+                    try:
+                        volfile = self._next_volname(volfile)
+                        fd = XFile(volfile)
+                    except IOError:
+                        self._set_error("Cannot open next volume: %s", volfile)
+                        break
+                    self._fd = fd
+                    more_vols = 0
+                    endarc = 0
+                    self._vol_list.append(volfile)
+                    continue
+                break
+            h.volume = volume
+            h.volume_file = volfile
+
+            if h.type == RAR_BLOCK_MAIN and not self._main:
+                self._main = h
+                if h.flags & RAR_MAIN_NEWNUMBERING:
+                    # RAR 2.x does not set FIRSTVOLUME,
+                    # so check it only if NEWNUMBERING is used
+                    if (h.flags & RAR_MAIN_FIRSTVOLUME) == 0:
+                        raise NeedFirstVolume("Need to start from first volume")
+                if h.flags & RAR_MAIN_PASSWORD:
+                    self._needs_password = True
+                    if not self._password:
+                        self._main = None
+                        break
+            elif h.type == RAR_BLOCK_ENDARC:
+                more_vols = h.flags & RAR_ENDARC_NEXT_VOLUME
+                endarc = 1
+            elif h.type == RAR_BLOCK_FILE:
+                # RAR 2.x does not write RAR_BLOCK_ENDARC
+                if h.flags & RAR_FILE_SPLIT_AFTER:
+                    more_vols = 1
+                # RAR 2.x does not set RAR_MAIN_FIRSTVOLUME
+                if volume == 0 and h.flags & RAR_FILE_SPLIT_BEFORE:
+                    raise NeedFirstVolume("Need to start from first volume")
+
+            # store it
+            self._process_entry(h)
+
+            # go to next header
+            if h.add_size > 0:
+                fd.seek(h.file_offset + h.add_size, 0)
+
+    # AES encrypted headers
+    _last_aes_key = (None, None, None) # (salt, key, iv)
+    def _decrypt_header(self, fd):
+        if not _have_crypto:
+            raise NoCrypto('Cannot parse encrypted headers - no crypto')
+        salt = fd.read(8)
+        if self._last_aes_key[0] == salt:
+            key, iv = self._last_aes_key[1:]
+        else:
+            key, iv = rar3_s2k(self._password, salt)
+            self._last_aes_key = (salt, key, iv)
+        return HeaderDecrypt(fd, key, iv)
+
+    # read single header
+    def _parse_header(self, fd):
+        try:
+            # handle encrypted headers
+            if self._main and self._main.flags & RAR_MAIN_PASSWORD:
+                if not self._password:
+                    return
+                fd = self._decrypt_header(fd)
+
+            # now read actual header
+            return self._parse_block_header(fd)
+        except struct.error:
+            self._set_error('Broken header in RAR file')
+            return None
+
+    # common header
+    def _parse_block_header(self, fd):
+        h = RarInfo()
+        h.header_offset = fd.tell()
+        h.comment = None
+
+        # read and parse base header
+        buf = fd.read(S_BLK_HDR.size)
+        if not buf:
+            return None
+        t = S_BLK_HDR.unpack_from(buf)
+        h.header_crc, h.type, h.flags, h.header_size = t
+        h.header_base = S_BLK_HDR.size
+        pos = S_BLK_HDR.size
+
+        # read full header
+        if h.header_size > S_BLK_HDR.size:
+            h.header_data = buf + fd.read(h.header_size - S_BLK_HDR.size)
+        else:
+            h.header_data = buf
+        h.file_offset = fd.tell()
+
+        # unexpected EOF?
+        if len(h.header_data) != h.header_size:
+            self._set_error('Unexpected EOF when reading header')
+            return None
+
+        # block has data assiciated with it?
+        if h.flags & RAR_LONG_BLOCK:
+            h.add_size = S_LONG.unpack_from(h.header_data, pos)[0]
+        else:
+            h.add_size = 0
+
+        # parse interesting ones, decide header boundaries for crc
+        if h.type == RAR_BLOCK_MARK:
+            return h
+        elif h.type == RAR_BLOCK_MAIN:
+            h.header_base += 6
+            if h.flags & RAR_MAIN_ENCRYPTVER:
+                h.header_base += 1
+            if h.flags & RAR_MAIN_COMMENT:
+                self._parse_subblocks(h, h.header_base)
+                self.comment = h.comment
+        elif h.type == RAR_BLOCK_FILE:
+            self._parse_file_header(h, pos)
+        elif h.type == RAR_BLOCK_SUB:
+            self._parse_file_header(h, pos)
+            h.header_base = h.header_size
+        elif h.type == RAR_BLOCK_OLD_AUTH:
+            h.header_base += 8
+        elif h.type == RAR_BLOCK_OLD_EXTRA:
+            h.header_base += 7
+        else:
+            h.header_base = h.header_size
+
+        # check crc
+        if h.type == RAR_BLOCK_OLD_SUB:
+            crcdat = h.header_data[2:] + fd.read(h.add_size)
+        else:
+            crcdat = h.header_data[2:h.header_base]
+
+        calc_crc = crc32(crcdat) & 0xFFFF
+
+        # return good header
+        if h.header_crc == calc_crc:
+            return h
+
+        # header parsing failed.
+        self._set_error('Header CRC error (%02x): exp=%x got=%x (xlen = %d)',
+                h.type, h.header_crc, calc_crc, len(crcdat))
+
+        # instead panicing, send eof
+        return None
+
+    # read file-specific header
+    def _parse_file_header(self, h, pos):
+        fld = S_FILE_HDR.unpack_from(h.header_data, pos)
+        h.compress_size = fld[0]
+        h.file_size = fld[1]
+        h.host_os = fld[2]
+        h.CRC = fld[3]
+        h.date_time = parse_dos_time(fld[4])
+        h.extract_version = fld[5]
+        h.compress_type = fld[6]
+        h.name_size = fld[7]
+        h.mode = fld[8]
+        pos += S_FILE_HDR.size
+
+        if h.flags & RAR_FILE_LARGE:
+            h1 = S_LONG.unpack_from(h.header_data, pos)[0]
+            h2 = S_LONG.unpack_from(h.header_data, pos + 4)[0]
+            h.compress_size |= h1 << 32
+            h.file_size |= h2 << 32
+            pos += 8
+            h.add_size = h.compress_size
+
+        name = h.header_data[pos : pos + h.name_size ]
+        pos += h.name_size
+        if h.flags & RAR_FILE_UNICODE:
+            nul = name.find(ZERO)
+            h.orig_filename = name[:nul]
+            u = UnicodeFilename(h.orig_filename, name[nul + 1 : ])
+            h.filename = u.decode()
+
+            # if parsing failed fall back to simple name
+            if u.failed:
+                h.filename = self._decode(h.orig_filename)
+        else:
+            h.orig_filename = name
+            h.filename = self._decode(name)
+
+        # change separator, if requested
+        if PATH_SEP != '\\':
+            h.filename = h.filename.replace('\\', PATH_SEP)
+
+        if h.flags & RAR_FILE_SALT:
+            h.salt = h.header_data[pos : pos + 8]
+            pos += 8
+        else:
+            h.salt = None
+
+        # optional extended time stamps
+        if h.flags & RAR_FILE_EXTTIME:
+            pos = self._parse_ext_time(h, pos)
+        else:
+            h.mtime = h.atime = h.ctime = h.arctime = None
+
+        # base header end
+        h.header_base = pos
+
+        if h.flags & RAR_FILE_COMMENT:
+            self._parse_subblocks(h, pos)
+
+        # convert timestamps
+        if USE_DATETIME:
+            h.date_time = to_datetime(h.date_time)
+            h.mtime = to_datetime(h.mtime)
+            h.atime = to_datetime(h.atime)
+            h.ctime = to_datetime(h.ctime)
+            h.arctime = to_datetime(h.arctime)
+
+        # .mtime is .date_time with more precision
+        if h.mtime:
+            if USE_DATETIME:
+                h.date_time = h.mtime
+            else:
+                # keep seconds int
+                h.date_time = h.mtime[:5] + (int(h.mtime[5]),)
+
+        return pos
+
+    # find old-style comment subblock
+    def _parse_subblocks(self, h, pos):
+        hdata = h.header_data
+        while pos < len(hdata):
+            # ordinary block header
+            t = S_BLK_HDR.unpack_from(hdata, pos)
+            scrc, stype, sflags, slen = t
+            pos_next = pos + slen
+            pos += S_BLK_HDR.size
+
+            # corrupt header
+            if pos_next < pos:
+                break
+
+            # followed by block-specific header
+            if stype == RAR_BLOCK_OLD_COMMENT and pos + S_COMMENT_HDR.size <= pos_next:
+                declen, ver, meth, crc = S_COMMENT_HDR.unpack_from(hdata, pos)
+                pos += S_COMMENT_HDR.size
+                data = hdata[pos : pos_next]
+                cmt = rar_decompress(ver, meth, data, declen, sflags,
+                                     crc, self._password)
+                if not self._crc_check:
+                    h.comment = self._decode_comment(cmt)
+                elif crc32(cmt) & 0xFFFF == crc:
+                    h.comment = self._decode_comment(cmt)
+
+            pos = pos_next
+
+    def _parse_ext_time(self, h, pos):
+        data = h.header_data
+
+        # flags and rest of data can be missing
+        flags = 0
+        if pos + 2 <= len(data):
+            flags = S_SHORT.unpack_from(data, pos)[0]
+            pos += 2
+
+        h.mtime, pos = self._parse_xtime(flags >> 3*4, data, pos, h.date_time)
+        h.ctime, pos = self._parse_xtime(flags >> 2*4, data, pos)
+        h.atime, pos = self._parse_xtime(flags >> 1*4, data, pos)
+        h.arctime, pos = self._parse_xtime(flags >> 0*4, data, pos)
+        return pos
+
+    def _parse_xtime(self, flag, data, pos, dostime = None):
+        unit = 10000000.0 # 100 ns units
+        if flag & 8:
+            if not dostime:
+                t = S_LONG.unpack_from(data, pos)[0]
+                dostime = parse_dos_time(t)
+                pos += 4
+            rem = 0
+            cnt = flag & 3
+            for i in range(cnt):
+                b = S_BYTE.unpack_from(data, pos)[0]
+                rem = (b << 16) | (rem >> 8)
+                pos += 1
+            sec = dostime[5] + rem / unit
+            if flag & 4:
+                sec += 1
+            dostime = dostime[:5] + (sec,)
+        return dostime, pos
+
+    # given current vol name, construct next one
+    def _next_volname(self, volfile):
+        if is_filelike(volfile):
+            raise IOError("Working on single FD")
+        if self._main.flags & RAR_MAIN_NEWNUMBERING:
+            return self._next_newvol(volfile)
+        return self._next_oldvol(volfile)
+
+    # new-style next volume
+    def _next_newvol(self, volfile):
+        i = len(volfile) - 1
+        while i >= 0:
+            if volfile[i] >= '0' and volfile[i] <= '9':
+                return self._inc_volname(volfile, i)
+            i -= 1
+        raise BadRarName("Cannot construct volume name: "+volfile)
+
+    # old-style next volume
+    def _next_oldvol(self, volfile):
+        # rar -> r00
+        if volfile[-4:].lower() == '.rar':
+            return volfile[:-2] + '00'
+        return self._inc_volname(volfile, len(volfile) - 1)
+
+    # increase digits with carry, otherwise just increment char
+    def _inc_volname(self, volfile, i):
+        fn = list(volfile)
+        while i >= 0:
+            if fn[i] != '9':
+                fn[i] = chr(ord(fn[i]) + 1)
+                break
+            fn[i] = '0'
+            i -= 1
+        return ''.join(fn)
+
+    def _open_clear(self, inf):
+        return DirectReader(self, inf)
+
+    # put file compressed data into temporary .rar archive, and run
+    # unrar on that, thus avoiding unrar going over whole archive
+    def _open_hack(self, inf, psw = None):
+        BSIZE = 32*1024
+
+        size = inf.compress_size + inf.header_size
+        rf = XFile(inf.volume_file, 0)
+        rf.seek(inf.header_offset)
+
+        tmpfd, tmpname = mkstemp(suffix='.rar')
+        tmpf = os.fdopen(tmpfd, "wb")
+
+        try:
+            # create main header: crc, type, flags, size, res1, res2
+            mh = S_BLK_HDR.pack(0x90CF, 0x73, 0, 13) + ZERO * (2+4)
+            tmpf.write(RAR_ID + mh)
+            while size > 0:
+                if size > BSIZE:
+                    buf = rf.read(BSIZE)
+                else:
+                    buf = rf.read(size)
+                if not buf:
+                    raise BadRarFile('read failed: ' + inf.filename)
+                tmpf.write(buf)
+                size -= len(buf)
+            tmpf.close()
+            rf.close()
+        except:
+            rf.close()
+            tmpf.close()
+            os.unlink(tmpname)
+            raise
+
+        return self._open_unrar(tmpname, inf, psw, tmpname)
+
+    def _read_comment_v3(self, inf, psw=None):
+
+        # read data
+        rf = XFile(inf.volume_file)
+        rf.seek(inf.file_offset)
+        data = rf.read(inf.compress_size)
+        rf.close()
+
+        # decompress
+        cmt = rar_decompress(inf.extract_version, inf.compress_type, data,
+                             inf.file_size, inf.flags, inf.CRC, psw, inf.salt)
+
+        # check crc
+        if self._crc_check:
+            crc = crc32(cmt)
+            if crc < 0:
+                crc += (long(1) << 32)
+            if crc != inf.CRC:
+                return None
+
+        return self._decode_comment(cmt)
+
+    # extract using unrar
+    def _open_unrar(self, rarfile, inf, psw = None, tmpfile = None):
+        if is_filelike(rarfile):
+            raise ValueError("Cannot use unrar directly on memory buffer")
+        cmd = [UNRAR_TOOL] + list(OPEN_ARGS)
+        add_password_arg(cmd, psw)
+        cmd.append(rarfile)
+
+        # not giving filename avoids encoding related problems
+        if not tmpfile:
+            fn = inf.filename
+            if PATH_SEP != os.sep:
+                fn = fn.replace(PATH_SEP, os.sep)
+            cmd.append(fn)
+
+        # read from unrar pipe
+        return PipeReader(self, inf, cmd, tmpfile)
+
+    def _decode(self, val):
+        for c in TRY_ENCODINGS:
+            try:
+                return val.decode(c)
+            except UnicodeError:
+                pass
+        return val.decode(self._charset, 'replace')
+
+    def _decode_comment(self, val):
+        if UNICODE_COMMENTS:
+            return self._decode(val)
+        return val
+
+    # call unrar to extract a file
+    def _extract(self, fnlist, path=None, psw=None):
+        cmd = [UNRAR_TOOL] + list(EXTRACT_ARGS)
+
+        # pasoword
+        psw = psw or self._password
+        add_password_arg(cmd, psw)
+
+        # rar file
+        cmd.append(self.rarfile)
+
+        # file list
+        for fn in fnlist:
+            if os.sep != PATH_SEP:
+                fn = fn.replace(PATH_SEP, os.sep)
+            cmd.append(fn)
+
+        # destination path
+        if path is not None:
+            cmd.append(path + os.sep)
+
+        # call
+        p = custom_popen(cmd)
+        output = p.communicate()[0]
+        check_returncode(p, output)
+
+##
+## Utility classes
+##
+
+class UnicodeFilename:
+    """Handle unicode filename decompression"""
+
+    def __init__(self, name, encdata):
+        self.std_name = bytearray(name)
+        self.encdata = bytearray(encdata)
+        self.pos = self.encpos = 0
+        self.buf = bytearray()
+        self.failed = 0
+
+    def enc_byte(self):
+        try:
+            c = self.encdata[self.encpos]
+            self.encpos += 1
+            return c
+        except IndexError:
+            self.failed = 1
+            return 0
+
+    def std_byte(self):
+        try:
+            return self.std_name[self.pos]
+        except IndexError:
+            self.failed = 1
+            return ord('?')
+
+    def put(self, lo, hi):
+        self.buf.append(lo)
+        self.buf.append(hi)
+        self.pos += 1
+
+    def decode(self):
+        hi = self.enc_byte()
+        flagbits = 0
+        while self.encpos < len(self.encdata):
+            if flagbits == 0:
+                flags = self.enc_byte()
+                flagbits = 8
+            flagbits -= 2
+            t = (flags >> flagbits) & 3
+            if t == 0:
+                self.put(self.enc_byte(), 0)
+            elif t == 1:
+                self.put(self.enc_byte(), hi)
+            elif t == 2:
+                self.put(self.enc_byte(), self.enc_byte())
+            else:
+                n = self.enc_byte()
+                if n & 0x80:
+                    c = self.enc_byte()
+                    for i in range((n & 0x7f) + 2):
+                        lo = (self.std_byte() + c) & 0xFF
+                        self.put(lo, hi)
+                else:
+                    for i in range(n + 2):
+                        self.put(self.std_byte(), 0)
+        return self.buf.decode("utf-16le", "replace")
+
+
+class RarExtFile(RawIOBase):
+    """Base class for file-like object that :meth:`RarFile.open` returns.
+
+    Provides public methods and common crc checking.
+
+    Behaviour:
+     - no short reads - .read() and .readinfo() read as much as requested.
+     - no internal buffer, use io.BufferedReader for that.
+
+    If :mod:`io` module is available (Python 2.6+, 3.x), then this calls
+    will inherit from :class:`io.RawIOBase` class.  This makes line-based
+    access available: :meth:`RarExtFile.readline` and ``for ln in f``.
+    """
+
+    #: Filename of the archive entry
+    name = None
+
+    def __init__(self, rf, inf):
+        RawIOBase.__init__(self)
+
+        # standard io.* properties
+        self.name = inf.filename
+        self.mode = 'rb'
+
+        self.rf = rf
+        self.inf = inf
+        self.crc_check = rf._crc_check
+        self.fd = None
+        self.CRC = 0
+        self.remain = 0
+        self.returncode = 0
+
+        self._open()
+
+    def _open(self):
+        if self.fd:
+            self.fd.close()
+        self.fd = None
+        self.CRC = 0
+        self.remain = self.inf.file_size
+
+    def read(self, cnt = None):
+        """Read all or specified amount of data from archive entry."""
+
+        # sanitize cnt
+        if cnt is None or cnt < 0:
+            cnt = self.remain
+        elif cnt > self.remain:
+            cnt = self.remain
+        if cnt == 0:
+            return EMPTY
+
+        # actual read
+        data = self._read(cnt)
+        if data:
+            self.CRC = crc32(data, self.CRC)
+            self.remain -= len(data)
+        if len(data) != cnt:
+            raise BadRarFile("Failed the read enough data")
+
+        # done?
+        if not data or self.remain == 0:
+            #self.close()
+            self._check()
+        return data
+
+    def _check(self):
+        """Check final CRC."""
+        if not self.crc_check:
+            return
+        if self.returncode:
+            check_returncode(self, '')
+        if self.remain != 0:
+            raise BadRarFile("Failed the read enough data")
+        crc = self.CRC
+        if crc < 0:
+            crc += (long(1) << 32)
+        if crc != self.inf.CRC:
+            raise BadRarFile("Corrupt file - CRC check failed: " + self.inf.filename)
+
+    def _read(self, cnt):
+        """Actual read that gets sanitized cnt."""
+
+    def close(self):
+        """Close open resources."""
+
+        RawIOBase.close(self)
+
+        if self.fd:
+            self.fd.close()
+            self.fd = None
+
+    def __del__(self):
+        """Hook delete to make sure tempfile is removed."""
+        self.close()
+
+    def readinto(self, buf):
+        """Zero-copy read directly into buffer.
+
+        Returns bytes read.
+        """
+
+        data = self.read(len(buf))
+        n = len(data)
+        try:
+            buf[:n] = data
+        except TypeError:
+            import array
+            if not isinstance(buf, array.array):
+                raise
+            buf[:n] = array.array(buf.typecode, data)
+        return n
+
+    def tell(self):
+        """Return current reading position in uncompressed data."""
+        return self.inf.file_size - self.remain
+
+    def seek(self, ofs, whence = 0):
+        """Seek in data.
+        
+        On uncompressed files, the seeking works by actual
+        seeks so it's fast.  On compresses files its slow
+        - forward seeking happends by reading ahead,
+        backwards by re-opening and decompressing from the start.
+        """
+
+        # disable crc check when seeking
+        self.crc_check = 0
+
+        fsize = self.inf.file_size
+        cur_ofs = self.tell()
+
+        if whence == 0:     # seek from beginning of file
+            new_ofs = ofs
+        elif whence == 1:   # seek from current position
+            new_ofs = cur_ofs + ofs
+        elif whence == 2:   # seek from end of file
+            new_ofs = fsize + ofs
+        else:
+            raise ValueError('Invalid value for whence')
+
+        # sanity check
+        if new_ofs < 0:
+            new_ofs = 0
+        elif new_ofs > fsize:
+            new_ofs = fsize
+
+        # do the actual seek
+        if new_ofs >= cur_ofs:
+            self._skip(new_ofs - cur_ofs)
+        else:
+            # process old data ?
+            #self._skip(fsize - cur_ofs)
+            # reopen and seek
+            self._open()
+            self._skip(new_ofs)
+        return self.tell()
+
+    def _skip(self, cnt):
+        """Read and discard data"""
+        while cnt > 0:
+            if cnt > 8192:
+                buf = self.read(8192)
+            else:
+                buf = self.read(cnt)
+            if not buf:
+                break
+            cnt -= len(buf)
+
+    def readable(self):
+        """Returns True"""
+        return True
+
+    def writable(self):
+        """Returns False.
+        
+        Writing is not supported."""
+        return False
+
+    def seekable(self):
+        """Returns True.
+        
+        Seeking is supported, although it's slow on compressed files.
+        """
+        return True
+
+    def readall(self):
+        """Read all remaining data"""
+        # avoid RawIOBase default impl
+        return self.read()
+
+
+class PipeReader(RarExtFile):
+    """Read data from pipe, handle tempfile cleanup."""
+
+    def __init__(self, rf, inf, cmd, tempfile=None):
+        self.cmd = cmd
+        self.proc = None
+        self.tempfile = tempfile
+        RarExtFile.__init__(self, rf, inf)
+
+    def _close_proc(self):
+        if not self.proc:
+            return
+        if self.proc.stdout:
+            self.proc.stdout.close()
+        if self.proc.stdin:
+            self.proc.stdin.close()
+        if self.proc.stderr:
+            self.proc.stderr.close()
+        self.proc.wait()
+        self.returncode = self.proc.returncode
+        self.proc = None
+
+    def _open(self):
+        RarExtFile._open(self)
+
+        # stop old process
+        self._close_proc()
+
+        # launch new process
+        self.returncode = 0
+        self.proc = custom_popen(self.cmd)
+        self.fd = self.proc.stdout
+
+        # avoid situation where unrar waits on stdin
+        if self.proc.stdin:
+            self.proc.stdin.close()
+
+    def _read(self, cnt):
+        """Read from pipe."""
+
+        # normal read is usually enough
+        data = self.fd.read(cnt)
+        if len(data) == cnt or not data:
+            return data
+
+        # short read, try looping
+        buf = [data]
+        cnt -= len(data)
+        while cnt > 0:
+            data = self.fd.read(cnt)
+            if not data:
+                break
+            cnt -= len(data)
+            buf.append(data)
+        return EMPTY.join(buf)
+
+    def close(self):
+        """Close open resources."""
+
+        self._close_proc()
+        RarExtFile.close(self)
+
+        if self.tempfile:
+            try:
+                os.unlink(self.tempfile)
+            except OSError:
+                pass
+            self.tempfile = None
+
+    if have_memoryview:
+        def readinto(self, buf):
+            """Zero-copy read directly into buffer."""
+            cnt = len(buf)
+            if cnt > self.remain:
+                cnt = self.remain
+            vbuf = memoryview(buf)
+            res = got = 0
+            while got < cnt:
+                res = self.fd.readinto(vbuf[got : cnt])
+                if not res:
+                    break
+                if self.crc_check:
+                    self.CRC = crc32(vbuf[got : got + res], self.CRC)
+                self.remain -= res
+                got += res
+            return got
+
+
+class DirectReader(RarExtFile):
+    """Read uncompressed data directly from archive."""
+
+    def _open(self):
+        RarExtFile._open(self)
+
+        self.volfile = self.inf.volume_file
+        self.fd = XFile(self.volfile, 0)
+        self.fd.seek(self.inf.header_offset, 0)
+        self.cur = self.rf._parse_header(self.fd)
+        self.cur_avail = self.cur.add_size
+
+    def _skip(self, cnt):
+        """RAR Seek, skipping through rar files to get to correct position
+        """
+
+        while cnt > 0:
+            # next vol needed?
+            if self.cur_avail == 0:
+                if not self._open_next():
+                    break
+
+            # fd is in read pos, do the read
+            if cnt > self.cur_avail:
+                cnt -= self.cur_avail
+                self.remain -= self.cur_avail
+                self.cur_avail = 0
+            else:
+                self.fd.seek(cnt, 1)
+                self.cur_avail -= cnt
+                self.remain -= cnt
+                cnt = 0
+
+    def _read(self, cnt):
+        """Read from potentially multi-volume archive."""
+
+        buf = []
+        while cnt > 0:
+            # next vol needed?
+            if self.cur_avail == 0:
+                if not self._open_next():
+                    break
+
+            # fd is in read pos, do the read
+            if cnt > self.cur_avail:
+                data = self.fd.read(self.cur_avail)
+            else:
+                data = self.fd.read(cnt)
+            if not data:
+                break
+
+            # got some data
+            cnt -= len(data)
+            self.cur_avail -= len(data)
+            buf.append(data)
+
+        if len(buf) == 1:
+            return buf[0]
+        return EMPTY.join(buf)
+
+    def _open_next(self):
+        """Proceed to next volume."""
+
+        # is the file split over archives?
+        if (self.cur.flags & RAR_FILE_SPLIT_AFTER) == 0:
+            return False
+
+        if self.fd:
+            self.fd.close()
+            self.fd = None
+
+        # open next part
+        self.volfile = self.rf._next_volname(self.volfile)
+        fd = open(self.volfile, "rb", 0)
+        self.fd = fd
+
+        # loop until first file header
+        while 1:
+            cur = self.rf._parse_header(fd)
+            if not cur:
+                raise BadRarFile("Unexpected EOF")
+            if cur.type in (RAR_BLOCK_MARK, RAR_BLOCK_MAIN):
+                if cur.add_size:
+                    fd.seek(cur.add_size, 1)
+                continue
+            if cur.orig_filename != self.inf.orig_filename:
+                raise BadRarFile("Did not found file entry")
+            self.cur = cur
+            self.cur_avail = cur.add_size
+            return True
+
+    if have_memoryview:
+        def readinto(self, buf):
+            """Zero-copy read directly into buffer."""
+            got = 0
+            vbuf = memoryview(buf)
+            while got < len(buf):
+                # next vol needed?
+                if self.cur_avail == 0:
+                    if not self._open_next():
+                        break
+
+                # lenght for next read
+                cnt = len(buf) - got
+                if cnt > self.cur_avail:
+                    cnt = self.cur_avail
+
+                # read into temp view
+                res = self.fd.readinto(vbuf[got : got + cnt])
+                if not res:
+                    break
+                if self.crc_check:
+                    self.CRC = crc32(vbuf[got : got + res], self.CRC)
+                self.cur_avail -= res
+                self.remain -= res
+                got += res
+            return got
+
+
+class HeaderDecrypt:
+    """File-like object that decrypts from another file"""
+    def __init__(self, f, key, iv):
+        self.f = f
+        self.ciph = AES.new(key, AES.MODE_CBC, iv)
+        self.buf = EMPTY
+
+    def tell(self):
+        return self.f.tell()
+
+    def read(self, cnt=None):
+        if cnt > 8*1024:
+            raise BadRarFile('Bad count to header decrypt - wrong password?')
+
+        # consume old data
+        if cnt <= len(self.buf):
+            res = self.buf[:cnt]
+            self.buf = self.buf[cnt:]
+            return res
+        res = self.buf
+        self.buf = EMPTY
+        cnt -= len(res)
+
+        # decrypt new data
+        BLK = self.ciph.block_size
+        while cnt > 0:
+            enc = self.f.read(BLK)
+            if len(enc) < BLK:
+                break
+            dec = self.ciph.decrypt(enc)
+            if cnt >= len(dec):
+                res += dec
+                cnt -= len(dec)
+            else:
+                res += dec[:cnt]
+                self.buf = dec[cnt:]
+                cnt = 0
+
+        return res
+
+# handle (filename|filelike) object
+class XFile(object):
+    __slots__ = ('_fd', '_need_close')
+    def __init__(self, xfile, bufsize = 1024):
+        if is_filelike(xfile):
+            self._need_close = False
+            self._fd = xfile
+            self._fd.seek(0)
+        else:
+            self._need_close = True
+            self._fd = open(xfile, 'rb', bufsize)
+    def read(self, n=None):
+        return self._fd.read(n)
+    def tell(self):
+        return self._fd.tell()
+    def seek(self, ofs, whence=0):
+        return self._fd.seek(ofs, whence)
+    def readinto(self, dst):
+        return self._fd.readinto(dst)
+    def close(self):
+        if self._need_close:
+            self._fd.close()
+    def __enter__(self):
+        return self
+    def __exit__(self, typ, val, tb):
+        self.close()
+
+##
+## Utility functions
+##
+
+def is_filelike(obj):
+    if isinstance(obj, str) or isinstance(obj, unicode):
+        return False
+    res = True
+    for a in ('read', 'tell', 'seek'):
+        res = res and hasattr(obj, a)
+    if not res:
+        raise ValueError("Invalid object passed as file")
+    return True
+
+def rar3_s2k(psw, salt):
+    """String-to-key hash for RAR3."""
+
+    seed = psw.encode('utf-16le') + salt
+    iv = EMPTY
+    h = sha1()
+    for i in range(16):
+        for j in range(0x4000):
+            cnt = S_LONG.pack(i*0x4000 + j)
+            h.update(seed + cnt[:3])
+            if j == 0:
+                iv += h.digest()[19:20]
+    key_be = h.digest()[:16]
+    key_le = pack("<LLLL", *unpack(">LLLL", key_be))
+    return key_le, iv
+
+def rar_decompress(vers, meth, data, declen=0, flags=0, crc=0, psw=None, salt=None):
+    """Decompress blob of compressed data.
+
+    Used for data with non-standard header - eg. comments.
+    """
+
+    # already uncompressed?
+    if meth == RAR_M0 and (flags & RAR_FILE_PASSWORD) == 0:
+        return data
+
+    # take only necessary flags
+    flags = flags & (RAR_FILE_PASSWORD | RAR_FILE_SALT | RAR_FILE_DICTMASK)
+    flags |= RAR_LONG_BLOCK
+
+    # file header
+    fname = bytes('data', 'ascii')
+    date = 0
+    mode = 0x20
+    fhdr = S_FILE_HDR.pack(len(data), declen, RAR_OS_MSDOS, crc,
+                           date, vers, meth, len(fname), mode)
+    fhdr += fname
+    if flags & RAR_FILE_SALT:
+        if not salt:
+            return EMPTY
+        fhdr += salt
+
+    # full header
+    hlen = S_BLK_HDR.size + len(fhdr)
+    hdr = S_BLK_HDR.pack(0, RAR_BLOCK_FILE, flags, hlen) + fhdr
+    hcrc = crc32(hdr[2:]) & 0xFFFF
+    hdr = S_BLK_HDR.pack(hcrc, RAR_BLOCK_FILE, flags, hlen) + fhdr
+
+    # archive main header
+    mh = S_BLK_HDR.pack(0x90CF, RAR_BLOCK_MAIN, 0, 13) + ZERO * (2+4)
+
+    # decompress via temp rar
+    tmpfd, tmpname = mkstemp(suffix='.rar')
+    tmpf = os.fdopen(tmpfd, "wb")
+    try:
+        tmpf.write(RAR_ID + mh + hdr + data)
+        tmpf.close()
+
+        cmd = [UNRAR_TOOL] + list(OPEN_ARGS)
+        add_password_arg(cmd, psw, (flags & RAR_FILE_PASSWORD))
+        cmd.append(tmpname)
+
+        p = custom_popen(cmd)
+        return p.communicate()[0]
+    finally:
+        tmpf.close()
+        os.unlink(tmpname)
+
+def to_datetime(t):
+    """Convert 6-part time tuple into datetime object."""
+
+    if t is None:
+        return None
+
+    # extract values
+    year, mon, day, h, m, xs = t
+    s = int(xs)
+    us = int(1000000 * (xs - s))
+
+    # assume the values are valid
+    try:
+        return datetime(year, mon, day, h, m, s, us)
+    except ValueError:
+        pass
+
+    # sanitize invalid values
+    MDAY = (0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
+    if mon < 1:  mon = 1
+    if mon > 12: mon = 12
+    if day < 1:  day = 1
+    if day > MDAY[mon]: day = MDAY[mon]
+    if h > 23:   h = 23
+    if m > 59:   m = 59
+    if s > 59:   s = 59
+    if mon == 2 and day == 29:
+        try:
+            return datetime(year, mon, day, h, m, s, us)
+        except ValueError:
+            day = 28
+    return datetime(year, mon, day, h, m, s, us)
+
+def parse_dos_time(stamp):
+    """Parse standard 32-bit DOS timestamp."""
+
+    sec = stamp & 0x1F; stamp = stamp >> 5
+    min = stamp & 0x3F; stamp = stamp >> 6
+    hr  = stamp & 0x1F; stamp = stamp >> 5
+    day = stamp & 0x1F; stamp = stamp >> 5
+    mon = stamp & 0x0F; stamp = stamp >> 4
+    yr = (stamp & 0x7F) + 1980
+    return (yr, mon, day, hr, min, sec * 2)
+
+def custom_popen(cmd):
+    """Disconnect cmd from parent fds, read only from stdout."""
+
+    # needed for py2exe
+    creationflags = 0
+    if sys.platform == 'win32':
+        creationflags = 0x08000000 # CREATE_NO_WINDOW
+
+    # run command
+    try:
+        p = Popen(cmd, bufsize = 0,
+                  stdout = PIPE, stdin = PIPE, stderr = STDOUT,
+                  creationflags = creationflags)
+    except OSError:
+        ex = sys.exc_info()[1]
+        if ex.errno == errno.ENOENT:
+            raise RarCannotExec("Unrar not installed? (rarfile.UNRAR_TOOL=%r)" % UNRAR_TOOL)
+        raise
+    return p
+
+def custom_check(cmd, ignore_retcode=False):
+    """Run command, collect output, raise error if needed."""
+    p = custom_popen(cmd)
+    out, err = p.communicate()
+    if p.returncode and not ignore_retcode:
+        raise RarExecError("Check-run failed")
+    return out
+
+def add_password_arg(cmd, psw, required=False):
+    """Append password switch to commandline."""
+    if UNRAR_TOOL == ALT_TOOL:
+        return
+    if psw is not None:
+        cmd.append('-p' + psw)
+    else:
+        cmd.append('-p-')
+
+def check_returncode(p, out):
+    """Raise exception according to unrar exit code"""
+
+    code = p.returncode
+    if code == 0:
+        return
+
+    # map return code to exception class
+    errmap = [None,
+        RarWarning, RarFatalError, RarCRCError, RarLockedArchiveError,
+        RarWriteError, RarOpenError, RarUserError, RarMemoryError,
+        RarCreateError, RarNoFilesError] # codes from rar.txt
+    if UNRAR_TOOL == ALT_TOOL:
+        errmap = [None]
+    if code > 0 and code < len(errmap):
+        exc = errmap[code]
+    elif code == 255:
+        exc = RarUserBreak
+    elif code < 0:
+        exc = RarSignalExit
+    else:
+        exc = RarUnknownError
+
+    # format message
+    if out:
+        msg = "%s [%d]: %s" % (exc.__doc__, p.returncode, out)
+    else:
+        msg = "%s [%d]" % (exc.__doc__, p.returncode)
+
+    raise exc(msg)
+
+#
+# Check if unrar works
+#
+
+try:
+    # does UNRAR_TOOL work?
+    custom_check([UNRAR_TOOL], True)
+except RarCannotExec:
+    try:
+        # does ALT_TOOL work?
+        custom_check([ALT_TOOL] + list(ALT_CHECK_ARGS), True)
+        # replace config
+        UNRAR_TOOL = ALT_TOOL
+        OPEN_ARGS = ALT_OPEN_ARGS
+        EXTRACT_ARGS = ALT_EXTRACT_ARGS
+        TEST_ARGS = ALT_TEST_ARGS
+    except RarCannotExec:
+        # no usable tool, only uncompressed archives work
+        pass
+
diff --git a/lib/rarfile/dumprar.py b/lib/rarfile/dumprar.py
new file mode 100644
index 0000000000000000000000000000000000000000..c922fe3aaf2198adeef4b097a8d0c33965fb749f
--- /dev/null
+++ b/lib/rarfile/dumprar.py
@@ -0,0 +1,352 @@
+#! /usr/bin/env python
+
+"""Dump archive contents, test extraction."""
+
+import sys
+import rarfile as rf
+from binascii import crc32, hexlify
+from datetime import datetime
+
+try:
+    bytearray
+except NameError:
+    import array
+    def bytearray(v):
+        return array.array('B', v)
+
+rf.UNICODE_COMMENTS = 1
+rf.USE_DATETIME = 1
+
+usage = """
+dumprar [switches] [ARC1 ARC2 ...] [@ARCLIST]
+switches:
+  @file      read archive names from file
+  -pPSW      set password
+  -Ccharset  set fallback charset
+  -v         increase verbosity
+  -t         attemt to read all files
+  -x         write read files out
+  -c         show archive comment
+  -h         show usage
+  --         stop switch parsing
+""".strip()
+
+os_list = ['DOS', 'OS2', 'WIN', 'UNIX', 'MACOS', 'BEOS']
+
+block_strs = ['MARK', 'MAIN', 'FILE', 'OLD_COMMENT', 'OLD_EXTRA',
+              'OLD_SUB', 'OLD_RECOVERY', 'OLD_AUTH', 'SUB', 'ENDARC']
+
+def rarType(type):
+    if type < rf.RAR_BLOCK_MARK or type > rf.RAR_BLOCK_ENDARC:
+        return "*UNKNOWN*"
+    return block_strs[type - rf.RAR_BLOCK_MARK]
+                                 
+main_bits = (
+    (rf.RAR_MAIN_VOLUME, "VOL"),
+    (rf.RAR_MAIN_COMMENT, "COMMENT"),
+    (rf.RAR_MAIN_LOCK, "LOCK"),
+    (rf.RAR_MAIN_SOLID, "SOLID"),
+    (rf.RAR_MAIN_NEWNUMBERING, "NEWNR"),
+    (rf.RAR_MAIN_AUTH, "AUTH"),
+    (rf.RAR_MAIN_RECOVERY, "RECOVERY"),
+    (rf.RAR_MAIN_PASSWORD, "PASSWORD"),
+    (rf.RAR_MAIN_FIRSTVOLUME, "FIRSTVOL"),
+    (rf.RAR_SKIP_IF_UNKNOWN, "SKIP"),
+    (rf.RAR_LONG_BLOCK, "LONG"),
+)
+
+endarc_bits = (
+    (rf.RAR_ENDARC_NEXT_VOLUME, "NEXTVOL"),
+    (rf.RAR_ENDARC_DATACRC, "DATACRC"),
+    (rf.RAR_ENDARC_REVSPACE, "REVSPACE"),
+    (rf.RAR_ENDARC_VOLNR, "VOLNR"),
+    (rf.RAR_SKIP_IF_UNKNOWN, "SKIP"),
+    (rf.RAR_LONG_BLOCK, "LONG"),
+)
+
+file_bits = (
+    (rf.RAR_FILE_SPLIT_BEFORE, "SPLIT_BEFORE"),
+    (rf.RAR_FILE_SPLIT_AFTER, "SPLIT_AFTER"),
+    (rf.RAR_FILE_PASSWORD, "PASSWORD"),
+    (rf.RAR_FILE_COMMENT, "COMMENT"),
+    (rf.RAR_FILE_SOLID, "SOLID"),
+    (rf.RAR_FILE_LARGE, "LARGE"),
+    (rf.RAR_FILE_UNICODE, "UNICODE"),
+    (rf.RAR_FILE_SALT, "SALT"),
+    (rf.RAR_FILE_VERSION, "VERSION"),
+    (rf.RAR_FILE_EXTTIME, "EXTTIME"),
+    (rf.RAR_FILE_EXTFLAGS, "EXTFLAGS"),
+    (rf.RAR_SKIP_IF_UNKNOWN, "SKIP"),
+    (rf.RAR_LONG_BLOCK, "LONG"),
+)
+
+generic_bits = (
+    (rf.RAR_SKIP_IF_UNKNOWN, "SKIP"),
+    (rf.RAR_LONG_BLOCK, "LONG"),
+)
+
+file_parms = ("D64", "D128", "D256", "D512",
+              "D1024", "D2048", "D4096", "DIR")
+
+def xprint(m, *args):
+    if sys.hexversion < 0x3000000:
+        m = m.decode('utf8')
+    if args:
+        m = m % args
+    if sys.hexversion < 0x3000000:
+        m = m.encode('utf8')
+    sys.stdout.write(m)
+    sys.stdout.write('\n')
+
+def render_flags(flags, bit_list):
+    res = []
+    known = 0
+    for bit in bit_list:
+        known = known | bit[0]
+        if flags & bit[0]:
+            res.append(bit[1])
+    unknown = flags & ~known
+    n = 0
+    while unknown:
+        if unknown & 1:
+            res.append("UNK_%04x" % (1 << n))
+        unknown = unknown >> 1
+        n += 1
+
+    return ",".join(res)
+
+def get_file_flags(flags):
+    res = render_flags(flags & ~rf.RAR_FILE_DICTMASK, file_bits)
+
+    xf = (flags & rf.RAR_FILE_DICTMASK) >> 5
+    res += "," + file_parms[xf]
+    return res
+
+def get_main_flags(flags):
+    return render_flags(flags, main_bits)
+
+def get_endarc_flags(flags):
+    return render_flags(flags, endarc_bits)
+
+def get_generic_flags(flags):
+    return render_flags(flags, generic_bits)
+
+def fmt_time(t):
+    if isinstance(t, datetime):
+        return t.isoformat(' ')
+    return "%04d-%02d-%02d %02d:%02d:%02d" % t
+
+def show_item(h):
+    st = rarType(h.type)
+    unknown = h.header_size - h.header_base
+    xprint("%s: hdrlen=%d datlen=%d hdr_unknown=%d", st, h.header_size,
+                h.add_size, unknown)
+    if unknown > 0 and cf_verbose > 1:
+        dat = h.header_data[h.header_base : ]
+        xprint("  unknown: %s", hexlify(dat))
+    if h.type in (rf.RAR_BLOCK_FILE, rf.RAR_BLOCK_SUB):
+        if h.host_os == rf.RAR_OS_UNIX:
+            s_mode = "0%o" % h.mode
+        else:
+            s_mode = "0x%x" % h.mode
+        xprint("  flags=0x%04x:%s", h.flags, get_file_flags(h.flags))
+        if h.host_os >= 0 and h.host_os < len(os_list):
+            s_os = os_list[h.host_os]
+        else:
+            s_os = "?"
+        xprint("  os=%d:%s ver=%d mode=%s meth=%c cmp=%d dec=%d vol=%d",
+                h.host_os, s_os,
+                h.extract_version, s_mode, h.compress_type,
+                h.compress_size, h.file_size, h.volume)
+        ucrc = (h.CRC + (1 << 32)) & ((1 << 32) - 1)
+        xprint("  crc=0x%08x (%d) time=%s", ucrc, h.CRC, fmt_time(h.date_time))
+        xprint("  name=%s", h.filename)
+        if h.mtime:
+            xprint("  mtime=%s", fmt_time(h.mtime))
+        if h.ctime:
+            xprint("  ctime=%s", fmt_time(h.ctime))
+        if h.atime:
+            xprint("  atime=%s", fmt_time(h.atime))
+        if h.arctime:
+            xprint("  arctime=%s", fmt_time(h.arctime))
+    elif h.type == rf.RAR_BLOCK_MAIN:
+        xprint("  flags=0x%04x:%s", h.flags, get_main_flags(h.flags))
+    elif h.type == rf.RAR_BLOCK_ENDARC:
+        xprint("  flags=0x%04x:%s", h.flags, get_endarc_flags(h.flags))
+    elif h.type == rf.RAR_BLOCK_MARK:
+        xprint("  flags=0x%04x:", h.flags)
+    else:
+        xprint("  flags=0x%04x:%s", h.flags, get_generic_flags(h.flags))
+
+    if h.comment is not None:
+        cm = repr(h.comment)
+        if cm[0] == 'u':
+            cm = cm[1:]
+        xprint("  comment=%s", cm)
+
+cf_show_comment = 0
+cf_verbose = 0
+cf_charset = None
+cf_extract = 0
+cf_test_read = 0
+cf_test_unrar = 0
+
+def check_crc(f, inf):
+    ucrc = f.CRC
+    if ucrc < 0:
+        ucrc += (long(1) << 32)
+    if ucrc != inf.CRC:
+        print ('crc error')
+
+def test_read_long(r, inf):
+    f = r.open(inf.filename)
+    total = 0
+    while 1:
+        data = f.read(8192)
+        if not data:
+            break
+        total += len(data)
+    if total != inf.file_size:
+        xprint("\n *** %s has corrupt file: %s ***", r.rarfile, inf.filename)
+        xprint(" *** short read: got=%d, need=%d ***\n", total, inf.file_size)
+    check_crc(f, inf)
+
+    # test .seek() & .readinto()
+    if cf_test_read > 1:
+        f.seek(0,0)
+
+        # hack: re-enable crc calc
+        f.crc_check = 1
+        f.CRC = 0
+
+        total = 0
+        buf = bytearray(rf.ZERO*4096)
+        while 1:
+            res = f.readinto(buf)
+            if not res:
+                break
+            total += res
+        if inf.file_size != total:
+            xprint(" *** readinto failed: got=%d, need=%d ***\n", total, inf.file_size)
+        check_crc(f, inf)
+    f.close()
+
+def test_read(r, inf):
+    test_read_long(r, inf)
+
+
+def test_real(fn, psw):
+    xprint("Archive: %s", fn)
+
+    cb = None
+    if cf_verbose > 1:
+        cb = show_item
+
+    # check if rar
+    if not rf.is_rarfile(fn):
+        xprint(" --- %s is not a RAR file ---", fn)
+        return
+
+    # open
+    r = rf.RarFile(fn, charset = cf_charset, info_callback = cb)
+    # set password
+    if r.needs_password():
+        if psw:
+            r.setpassword(psw)
+        else:
+            xprint(" --- %s requires password ---", fn)
+            return
+
+    # show comment
+    if cf_show_comment and r.comment:
+        for ln in r.comment.split('\n'):
+            xprint("    %s", ln)
+    elif cf_verbose == 1 and r.comment:
+        cm = repr(r.comment)
+        if cm[0] == 'u':
+            cm = cm[1:]
+        xprint("  comment=%s", cm)
+
+    # process
+    for n in r.namelist():
+        inf = r.getinfo(n)
+        if inf.isdir():
+            continue
+        if cf_verbose == 1:
+            show_item(inf)
+        if cf_test_read:
+            test_read(r, inf)
+
+    if cf_extract:
+        r.extractall()
+        for inf in r.infolist():
+            r.extract(inf)
+
+    if cf_test_unrar:
+        r.testrar()
+
+def test(fn, psw):
+    try:
+        test_real(fn, psw)
+    except rf.NeedFirstVolume:
+        xprint(" --- %s is middle part of multi-vol archive ---", fn)
+    except rf.Error:
+        exc, msg, tb = sys.exc_info()
+        xprint("\n *** %s: %s ***\n", exc.__name__, msg)
+        del tb
+    except IOError:
+        exc, msg, tb = sys.exc_info()
+        xprint("\n *** %s: %s ***\n", exc.__name__, msg)
+        del tb
+
+def main():
+    global cf_verbose, cf_show_comment, cf_charset
+    global cf_extract, cf_test_read, cf_test_unrar
+
+    # parse args
+    args = []
+    psw = None
+    noswitch = False
+    for a in sys.argv[1:]:
+        if noswitch:
+            args.append(a)
+        elif a[0] == "@":
+            for ln in open(a[1:], 'r'):
+                fn = ln[:-1]
+                args.append(fn)
+        elif a[0] != '-':
+            args.append(a)
+        elif a[1] == 'p':
+            psw = a[2:]
+        elif a == '--':
+            noswitch = True
+        elif a == '-h':
+            xprint(usage)
+            return
+        elif a == '-v':
+            cf_verbose += 1
+        elif a == '-c':
+            cf_show_comment = 1
+        elif a == '-x':
+            cf_extract = 1
+        elif a == '-t':
+            cf_test_read += 1
+        elif a == '-T':
+            cf_test_unrar = 1
+        elif a[1] == 'C':
+            cf_charset = a[2:]
+        else:
+            raise Exception("unknown switch: "+a)
+    if not args:
+        xprint(usage)
+
+    for fn in args:
+        test(fn, psw)
+
+    
+if __name__ == '__main__':
+    try:
+        main()
+    except KeyboardInterrupt:
+        pass
+
diff --git a/lib/subliminal/api.py b/lib/subliminal/api.py
index 6fe33686f6016e46179f9aa22cd42bd586f83382..906b897c9596a9ac872a29f7afd4b13a41d5b713 100644
--- a/lib/subliminal/api.py
+++ b/lib/subliminal/api.py
@@ -33,6 +33,7 @@ class InternalExtensionManager(ExtensionManager):
 
 provider_manager = InternalExtensionManager('subliminal.providers', [EntryPoint.parse(ep) for ep in (
     'addic7ed = subliminal.providers.addic7ed:Addic7edProvider',
+    'legendastv = subliminal.providers.legendastv:LegendasTvProvider',
     'napiprojekt = subliminal.providers.napiprojekt:NapiProjektProvider',
     'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider',
     'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider',
diff --git a/lib/subliminal/cli.py b/lib/subliminal/cli.py
index 537c21c8a430da21c333ff87d16cead816df8898..c419ef46a6d288fe444f0e8b02a65f545b9a8d3a 100644
--- a/lib/subliminal/cli.py
+++ b/lib/subliminal/cli.py
@@ -203,6 +203,7 @@ config_file = 'config.ini'
 @click.group(context_settings={'max_content_width': 100}, epilog='Suggestions and bug reports are greatly appreciated: '
              'https://github.com/Diaoul/subliminal/')
 @click.option('--addic7ed', type=click.STRING, nargs=2, metavar='USERNAME PASSWORD', help='Addic7ed configuration.')
+@click.option('--legendastv', type=click.STRING, nargs=2, metavar='USERNAME PASSWORD', help='LegendasTV configuration.')
 @click.option('--opensubtitles', type=click.STRING, nargs=2, metavar='USERNAME PASSWORD',
               help='OpenSubtitles configuration.')
 @click.option('--cache-dir', type=click.Path(writable=True, resolve_path=True, file_okay=False), default=app_dir,
@@ -210,7 +211,7 @@ config_file = 'config.ini'
 @click.option('--debug', is_flag=True, help='Print useful information for debugging subliminal and for reporting bugs.')
 @click.version_option(__version__)
 @click.pass_context
-def subliminal(ctx, addic7ed, opensubtitles, cache_dir, debug):
+def subliminal(ctx, addic7ed, legendastv, opensubtitles, cache_dir, debug):
     """Subtitles, faster than your thoughts."""
     # create cache directory
     try:
@@ -234,6 +235,8 @@ def subliminal(ctx, addic7ed, opensubtitles, cache_dir, debug):
     ctx.obj = {'provider_configs': {}}
     if addic7ed:
         ctx.obj['provider_configs']['addic7ed'] = {'username': addic7ed[0], 'password': addic7ed[1]}
+    if legendastv:
+        ctx.obj['provider_configs']['legendastv'] = {'username': legendastv[0], 'password': legendastv[1]}
     if opensubtitles:
         ctx.obj['provider_configs']['opensubtitles'] = {'username': opensubtitles[0], 'password': opensubtitles[1]}
 
diff --git a/lib/subliminal/converters/legendastv.py b/lib/subliminal/converters/legendastv.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4bc5d327f2d25e5552a2cc444ff9ca610d8fb5e
--- /dev/null
+++ b/lib/subliminal/converters/legendastv.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+from babelfish import LanguageReverseConverter
+
+from ..exceptions import ConfigurationError
+
+
+class LegendasTvConverter(LanguageReverseConverter):
+    def __init__(self):
+        self.from_legendastv = {1:  ('por', 'BR'),
+                                2:  ('eng',),
+                                3:  ('spa',),
+                                4:  ('fra',),
+                                5:  ('deu',),
+                                6:  ('jpn',),
+                                7:  ('dan',),
+                                8:  ('nor',),
+                                9:  ('swe',),
+                                10: ('por',),
+                                11: ('ara',),
+                                12: ('ces',),
+                                13: ('zho',),
+                                14: ('kor',),
+                                15: ('bul',),
+                                16: ('ita',),
+                                17: ('pol',)}
+        self.to_legendastv = {v: k for k, v in self.from_legendastv.items()}
+        self.codes = set(self.from_legendastv.keys())
+
+    def convert(self, alpha3, country=None, script=None):
+        if (alpha3, country) in self.to_legendastv:
+            return self.to_legendastv[(alpha3, country)]
+        if (alpha3,) in self.to_legendastv:
+            return self.to_legendastv[(alpha3,)]
+
+        raise ConfigurationError('Unsupported language code for legendastv: %s, %s, %s' % (alpha3, country, script))
+
+    def reverse(self, legendastv):
+        if legendastv in self.from_legendastv:
+            return self.from_legendastv[legendastv]
+
+        raise ConfigurationError('Unsupported language number for legendastv: %s' % legendastv)
diff --git a/lib/subliminal/converters/podnapisi.py b/lib/subliminal/converters/podnapisi.py
deleted file mode 100644
index d73cb1c1fb978596ca5fff073c68195409754a1a..0000000000000000000000000000000000000000
--- a/lib/subliminal/converters/podnapisi.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-from babelfish import LanguageReverseConverter, LanguageConvertError, LanguageReverseError
-
-
-class PodnapisiConverter(LanguageReverseConverter):
-    def __init__(self):
-        self.from_podnapisi = {2: ('eng',), 28: ('spa',), 26: ('pol',), 36: ('srp',), 1: ('slv',), 38: ('hrv',),
-                               9: ('ita',), 8: ('fra',), 48: ('por', 'BR'), 23: ('nld',), 12: ('ara',), 13: ('ron',),
-                               33: ('bul',), 32: ('por',), 16: ('ell',), 15: ('hun',), 31: ('fin',), 30: ('tur',),
-                               7: ('ces',), 25: ('swe',), 27: ('rus',), 24: ('dan',), 22: ('heb',), 51: ('vie',),
-                               52: ('fas',), 5: ('deu',), 14: ('spa', 'AR'), 54: ('ind',), 47: ('srp', None, 'Cyrl'),
-                               3: ('nor',), 20: ('est',), 10: ('bos',), 17: ('zho',), 37: ('slk',), 35: ('mkd',),
-                               11: ('jpn',), 4: ('kor',), 29: ('sqi',), 6: ('isl',), 19: ('lit',), 46: ('ukr',),
-                               44: ('tha',), 53: ('cat',), 56: ('sin',), 21: ('lav',), 40: ('cmn',), 55: ('msa',),
-                               42: ('hin',), 50: ('bel',)}
-        self.to_podnapisi = {v: k for k, v in self.from_podnapisi.items()}
-        self.codes = set(self.from_podnapisi.keys())
-
-    def convert(self, alpha3, country=None, script=None):
-        if (alpha3,) in self.to_podnapisi:
-            return self.to_podnapisi[(alpha3,)]
-        if (alpha3, country) in self.to_podnapisi:
-            return self.to_podnapisi[(alpha3, country)]
-        if (alpha3, country, script) in self.to_podnapisi:
-            return self.to_podnapisi[(alpha3, country, script)]
-        raise LanguageConvertError(alpha3, country, script)
-
-    def reverse(self, podnapisi):
-        if podnapisi not in self.from_podnapisi:
-            raise LanguageReverseError(podnapisi)
-        return self.from_podnapisi[podnapisi]
diff --git a/lib/subliminal/converters/thesubdb.py b/lib/subliminal/converters/thesubdb.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9b879c5c725829cd122b6f57d836f5b9a41581d
--- /dev/null
+++ b/lib/subliminal/converters/thesubdb.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+from babelfish import LanguageReverseConverter
+from subliminal.exceptions import ConfigurationError
+
+
+class TheSubDBConverter(LanguageReverseConverter):
+    def __init__(self):
+        self.from_thesubdb = {'en': ('eng',), 'es': ('spa',), 'fr': ('fra',), 'it': ('ita',), 'nl': ('nld',),
+                              'pl': ('pol',), 'pt': ('por', 'BR'), 'ro': ('ron',), 'sv': ('swe',), 'tr': ('tur',)}
+        self.to_thesubdb = {v: k for k, v in self.from_thesubdb.items()}
+        self.codes = set(self.from_thesubdb.keys())
+
+    def convert(self, alpha3, country=None, script=None):
+        if (alpha3, country) in self.to_thesubdb:
+            return self.to_thesubdb[(alpha3, country)]
+        if (alpha3,) in self.to_thesubdb:
+            return self.to_thesubdb[(alpha3,)]
+
+        raise ConfigurationError('Unsupported language for thesubdb: %s, %s, %s' % (alpha3, country, script))
+
+    def reverse(self, thesubdb):
+        if thesubdb in self.from_thesubdb:
+            return self.from_thesubdb[thesubdb]
+
+        raise ConfigurationError('Unsupported language code for thesubdb: %s' % thesubdb)
diff --git a/lib/subliminal/providers/legendastv.py b/lib/subliminal/providers/legendastv.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2a66a2d1216c6950b5a8a0cd1ae058a516b7c06
--- /dev/null
+++ b/lib/subliminal/providers/legendastv.py
@@ -0,0 +1,467 @@
+# -*- coding: utf-8 -*-
+import json
+import logging
+import os
+import re
+
+from babelfish import Language, language_converters
+from datetime import datetime
+from guessit import guess_file_info
+from rarfile import RarFile, is_rarfile
+from requests import Session
+from tempfile import NamedTemporaryFile
+from zipfile import ZipFile, is_zipfile
+
+from . import ParserBeautifulSoup, Provider
+from ..cache import region, EPISODE_EXPIRATION_TIME, SHOW_EXPIRATION_TIME
+from ..exceptions import AuthenticationError, ConfigurationError
+from ..subtitle import Subtitle, fix_line_ending, guess_matches
+from ..video import Episode, Movie, SUBTITLE_EXTENSIONS
+
+TIMEOUT = 10
+
+logger = logging.getLogger(__name__)
+language_converters.register('legendastv = subliminal.converters.legendastv:LegendasTvConverter')
+
+
+class LegendasTvSubtitle(Subtitle):
+    provider_name = 'legendastv'
+
+    def __init__(self, language, page_link, subtitle_id, name, imdb_id=None, guess=None, type=None, season=None,
+                 year=None, no_downloads=None, rating=None, featured=False, multiple_episodes=False, timestamp=None):
+        super(LegendasTvSubtitle, self).__init__(language, page_link=page_link)
+        self.subtitle_id = subtitle_id
+        self.name = name
+        self.imdb_id = imdb_id
+        self.type = type
+        self.season = season
+        self.year = year
+        self.no_downloads = no_downloads
+        self.rating = rating
+        self.featured = featured
+        self.multiple_episodes = multiple_episodes
+        self.timestamp = timestamp
+        self.guess = guess  # Do not need to guess it again if it was guessed before
+
+    @property
+    def id(self):
+        return '%s (%s)' % (self.name, self.subtitle_id)
+
+    def get_matches(self, video, hearing_impaired=False):
+        matches = super(LegendasTvSubtitle, self).get_matches(video, hearing_impaired=hearing_impaired)
+
+        # The best available information about a subtitle is its name. Using guessit to parse it.
+        guess = self.guess if self.guess else guess_file_info(self.name + '.mkv', type=self.type)
+        matches |= guess_matches(video, guess)
+
+        # imdb_id match used only for movies
+        if self.type == 'movie' and video.imdb_id and self.imdb_id == video.imdb_id:
+            matches.add('imdb_id')
+
+        return matches
+
+
+class LegendasTvProvider(Provider):
+    languages = {Language.fromlegendastv(l) for l in language_converters['legendastv'].codes}
+    video_types = (Episode, Movie)
+    server_url = 'http://legendas.tv'
+    word_split_re = re.compile('(\w+)', re.IGNORECASE)
+
+    def __init__(self, username=None, password=None):
+        if username is not None and password is None or username is None and password is not None:
+            raise ConfigurationError('Username and password must be specified')
+
+        self.username = username
+        self.password = password
+        self.logged_in = False
+
+    def initialize(self):
+        self.session = Session()
+
+        # login
+        if self.username is not None and self.password is not None:
+            logger.info('Logging in')
+            data = {'_method': 'POST', 'data[User][username]': self.username, 'data[User][password]': self.password}
+            r = self.session.post('%s/login' % self.server_url, data, allow_redirects=False, timeout=TIMEOUT)
+            r.raise_for_status()
+
+            soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
+            auth_error = soup.find('div', {'class': 'alert-error'}, text=re.compile(u'.*Usuário ou senha inválidos.*'))
+
+            if auth_error:
+                raise AuthenticationError(self.username)
+
+            logger.debug('Logged in')
+            self.logged_in = True
+
+    def terminate(self):
+        # logout
+        if self.logged_in:
+            logger.info('Logging out')
+            r = self.session.get('%s/users/logout' % self.server_url, timeout=TIMEOUT)
+            r.raise_for_status()
+            logger.debug('Logged out')
+            self.logged_in = False
+
+        self.session.close()
+
+    def matches(self, expected, actual, ignore_episode=False):
+        """
+        Matches two dictionaries (expected and actual). The dictionary keys follow the guessit properties names.
+        If the expected dictionary represents a movie:
+          - ``type`` should match
+          - ``title`` should match
+          - ``year`` should match, unless they're not defined and expected and actual ``title``s are the same
+        If the expected dictionary represents an episode:
+          - ``type`` should match
+          - ``series`` should match
+          - ``eason`` should match
+          - ``episodeNumber`` should match, unless ``ignore_episode`` is True
+
+        :param expected: dictionary that contains the expected values
+        :param actual: dictionary that contains the actual values
+        :param ignore_episode: True if should ignore episodeNumber matching. Default: False
+        :return: True if actual matches expected
+        :rtype: bool
+        """
+        if expected.get('type') != actual.get('type'):
+            return False
+
+        if expected.get('type') == 'movie':
+            if not self.name_matches(expected.get('title'), actual.get('title')):
+                return False
+            if expected.get('year') != actual.get('year'):
+                if expected.get('year') and actual.get('year'):
+                    return False
+                if expected.get('title', '').lower() != actual.get('title', '').lower():
+                    return False
+
+        elif expected.get('type') == 'episode':
+            if not self.name_matches(expected.get('series'), actual.get('series')):
+                return False
+            if expected.get('season') != actual.get('season'):
+                return False
+            if not ignore_episode and expected.get('episodeNumber') != actual.get('episodeNumber'):
+                return False
+
+        return True
+
+    def name_matches(self, expected_name, actual_name):
+        """
+        Returns True if expected name and actual name matches. To be considered a match, actual_name should contain
+        all words of expected_name in the order of appearance.
+
+        :param expected_name:
+        :param actual_name:
+        :return: True if actual_name matches expected_name
+        :rtype : bool
+        """
+        if not expected_name or not actual_name:
+            return expected_name == actual_name
+
+        words = self.word_split_re.findall(expected_name)
+        name_regex_re = re.compile('(.*' + '\W+'.join(words) + '.*)', re.IGNORECASE)
+
+        return name_regex_re.match(actual_name)
+
+    @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
+    def search_titles(self, params):
+        """
+        Returns shows or movies information by querying `/legenda/sugestao` page.
+        Since the result is a list of suggestions (movies, tv shows, etc), additional filtering is required.
+        Type (movies or series), name, year and/or season are used to filter out bad suggestions.
+
+        :param params: dictionary containing the input parameters (title, series, season, episodeNumber, year)
+        :return: shows or movies information
+        :rtype: : ``list`` of ``dict``
+        """
+
+        keyword = params.get('title') if params.get('type') == 'movie' else params.get('series')
+        logger.info('Searching titles using the keyword %s', keyword)
+        r = self.session.get('%s/legenda/sugestao/%s' % (self.server_url, keyword), timeout=TIMEOUT)
+        r.raise_for_status()
+
+        # get the shows/movies out of the suggestions.
+        # json sample:
+        # [
+        #    {
+        #        "_index": "filmes",
+        #        "_type": "filme",
+        #        "_id": "24551",
+        #        "_score": null,
+        #        "_source": {
+        #            "id_filme": "24551",
+        #            "id_imdb": "903747",
+        #            "tipo": "S",
+        #            "int_genero": "1036",
+        #            "dsc_imagen": "tt903747.jpg",
+        #            "dsc_nome": "Breaking Bad",
+        #            "dsc_sinopse": "Dos mesmos criadores de Arquivo X, mas n\u00e3o tem nada de sobrenatural nesta
+        #                            s\u00e9rie. A express\u00e3o \"breaking bad\" \u00e9 usada quando uma coisa que
+        #                            j\u00e1 estava ruim, fica ainda pior. E \u00e9 exatamente isso que acontece com
+        #                            Walter White, um professor de qu\u00edmica, que vivia sua vida \"tranquilamente\"
+        #                            quando, boom, um diagn\u00f3stico terminal muda tudo. O liberta. Ele come\u00e7a a
+        #                            usar suas habilidades em qu\u00edmica de outra forma: montando um laborat\u00f3rio
+        #                            de drogas para financiar o futuro de sua fam\u00edlia.",
+        #            "dsc_data_lancamento": "2011",
+        #            "dsc_url_imdb": "http:\/\/www.imdb.com\/title\/tt0903747\/",
+        #            "dsc_nome_br": "Breaking Bad - 4\u00aa Temporada",
+        #            "soundex": null,
+        #            "temporada": "4",
+        #            "id_usuario": "241436",
+        #            "flg_liberado": "0",
+        #            "dsc_data_liberacao": null,
+        #            "dsc_data": "2011-06-12T21:06:42",
+        #            "dsc_metaphone_us": "BRKNKBT0SSN",
+        #            "dsc_metaphone_br": "BRKNKBTTMPRT",
+        #            "episodios": null,
+        #            "flg_seriado": null,
+        #            "last_used": "1372569074",
+        #            "deleted": false
+        #        },
+        #        "sort": [
+        #            "4"
+        #        ]
+        #    }
+        # ]
+        #
+        # Notes:
+        #  tipo: Defines if the entry is a movie or a tv show (or a collection??)
+        #  imdb_id: Sometimes it appears as a number and sometimes as a string prefixed with tt
+        #  temporada: Sometimes is ``null`` and season information should be extracted from dsc_nome_br
+
+        results = json.loads(r.text)
+
+        # type, title, series, season, year follow guessit properties names
+        mapping = dict(
+            id='id_filme',
+            type='tipo',
+            title='dsc_nome',
+            series='dsc_nome',
+            season='temporada',
+            year='dsc_data_lancamento',
+            title_br='dsc_nome_br',
+            imdb_id='id_imdb'
+        )
+
+        # movie and episode values follow guessit type values
+        type_map = {
+            'M': 'movie',
+            'S': 'episode',
+            'C': 'episode'  # Considering C as episode. Probably C stands for Collections
+        }
+
+        # Regex to extract the season number. e.g.: 3\u00aa Temporada, 1a Temporada, 2nd Season
+        season_re = re.compile('.*? - (\d{1,2}).*?((emporada)|(Season))', re.IGNORECASE)
+
+        # Regex to extract the IMDB id. e.g.: tt02342
+        imdb_re = re.compile('t{0,2}(\d+)')
+
+        candidates = []
+        for result in results:
+            entry = result['_source']
+            item = {k: entry.get(v) for k, v in mapping.items()}
+            item['type'] = type_map.get(item.get('type'), 'movie')
+            item['imdb_id'] = (lambda m: m.group(1) if m else None)(imdb_re.search(item.get('imdb_id')))
+
+            # Season information might be missing and it should be extracted from 'title_br'
+            if not item.get('season') and item.get('title_br'):
+                item['season'] = (lambda m: m.group(1) if m else None)(season_re.search(item.get('title_br')))
+
+            # Some string fields are actually integers
+            for field in ['season', 'year', 'imdb_id']:
+                item[field] = (lambda v: int(v) if v and v.isdigit() else None)(item.get(field))
+
+            # ignoring episode match since this first step is only about movie/season information
+            if self.matches(params, item, ignore_episode=True):
+                candidates.append(dict(item))
+
+        logger.debug('Titles found: %s', candidates)
+        return candidates
+
+    def query(self, language, video, params):
+        """
+        Returns a list of subtitles based on the input parameters.
+          - 1st step: initial lookup for the movie/show information (see ``search_titles``)
+          - 2nd step: list all candidates all movies/shows from previous step
+          - 3rd step: reject candidates that doesn't match the input parameters (wrong season, wrong episode, etc...)
+          - 4th step: download all subtitles to inspect the 'release name',
+           since each candidate might refer to several subtitles
+
+        :param language: the requested language
+        :param video: the input video
+        :param params: the dictionary with the query parameters
+        :return: a list of subtitles that matches the query parameters
+        :rtype: ``list`` of ``LegendasTvSubtitle``
+        """
+        titles = self.search_titles(params)
+
+        # The language code used by legendas.tv
+        language_code = language.legendastv
+
+        # Regex to extract rating information (number of downloads and rate). e.g.: 12345 downloads, nota 10
+        rating_info_re = re.compile('(\d*) downloads, nota (\d{0,2})')
+
+        # Regex to extract the last update timestamp. e.g.: 25/12/2014 - 19:25
+        timestamp_info_re = re.compile('(\d{1,2}/\d{1,2}/\d{2,4} \- \d{1,2}:\d{1,2})')
+
+        # Regex to identify the 'pack' suffix that candidates might have. e.g.: (p)Breaking.Bad.S05.HDTV.x264
+        pack_name_re = re.compile('^\(p\)')
+
+        # Regex to extract the subtitle_id from the 'href'. e.g.: /download/560014472eb4d/foo/bar
+        subtitle_href_re = re.compile('/download/(\w+)/.+')
+
+        subtitles = []
+        # loop over matched movies/shows
+        for title in titles:
+            # page_url: {server_url}/util/carrega_legendas_busca_filme/{title_id}/{language_code}
+            page_url = '%s/util/carrega_legendas_busca_filme/%s/%d' % (self.server_url, title.get('id'), language_code)
+
+            # loop over paginated results
+            while page_url:
+                # query the server
+                r = self.session.get(page_url, timeout=TIMEOUT)
+                r.raise_for_status()
+
+                soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
+                div_tags = soup.find_all('div', {'class': 'f_left'})
+
+                # loop over each div which contains information about a single subtitle
+                for div in div_tags:
+                    link_tag = div.p.a
+
+                    # Removing forward-slashes from the candidate name (common practice in legendas.tv), since it
+                    # misleads guessit to identify the candidate name as a file in a specific folder (which is wrong).
+                    candidate_name = pack_name_re.sub('', link_tag.string).replace('/', '.')
+                    page_link = link_tag['href']
+                    subtitle_id = (lambda m: m.group(1) if m else None)(subtitle_href_re.search(page_link))
+                    multiple_episodes = bool(div.find_parent('div', {'class': 'pack'}) or
+                                             pack_name_re.findall(link_tag.string))
+                    featured = bool(div.find_parent('div', {'class': 'destaque'}))
+                    no_downloads = (lambda v: int(v) if v and v.isdigit() else None)(
+                        (lambda m: m.group(1) if m else None)(rating_info_re.search(div.text)))
+                    rating = (lambda v: int(v) if v and v.isdigit() else None)(
+                        (lambda m: m.group(2) if m else None)(rating_info_re.search(div.text)))
+                    timestamp = (lambda d: datetime.strptime(d, '%d/%m/%Y - %H:%M') if d else None)(
+                        (lambda m: m.group(1) if m else None)(timestamp_info_re.search(div.text)))
+
+                    # Using the candidate name to filter out bad candidates
+                    # (wrong type, wrong episode, wrong season or even wrong title)
+                    guess = guess_file_info(candidate_name + '.mkv', type=title.get('type'))
+                    if not self.matches(params, guess, ignore_episode=multiple_episodes):
+                        continue
+
+                    # Unfortunately, the only possible way to know the release names of a specific candidate is to
+                    # download the compressed file (rar/zip) and list the file names.
+                    subtitle_names = self.get_subtitle_names(subtitle_id, timestamp)
+
+                    if not subtitle_names:
+                        continue
+
+                    for name in subtitle_names:
+                        # Filtering out bad candidates (one archive might contain subtitles for the whole season,
+                        # therefore this filtering is necessary) and some subtitles are in a relative folder inside the
+                        # zip/rar file, therefore \ should be replaced by / in order to guess it properly
+                        base_name = os.path.splitext(name)[0].replace('\\', '/')
+                        guess = guess_file_info(base_name + '.mkv', type=title.get('type'))
+                        if not self.matches(params, guess):
+                            continue
+
+                        subtitle = LegendasTvSubtitle(language, page_link, subtitle_id, name, guess=guess,
+                                                      imdb_id=title.get('imdb_id'), type=title.get('type'),
+                                                      season=title.get('season'), year=title.get('year'),
+                                                      no_downloads=no_downloads, rating=rating, featured=featured,
+                                                      multiple_episodes=multiple_episodes, timestamp=timestamp)
+
+                        logger.debug('Found subtitle %s', subtitle)
+                        subtitles.append(subtitle)
+
+                next_page_link = soup.find('a', attrs={'class': 'load_more'}, text='carregar mais')
+                page_url = self.server_url + next_page_link['href'] if next_page_link else None
+
+        # High quality subtitles should have higher precedence when their scores are equal.
+        subtitles.sort(key=lambda s: (s.featured, s.no_downloads, s.rating, s.multiple_episodes), reverse=True)
+
+        return subtitles
+
+    def list_subtitles(self, video, languages):
+        """
+        Returns a list of subtitles for the defined video and requested languages
+
+        :param video:
+        :param languages: the requested languages
+        :return: a list of subtitles for the requested video and languages
+        :rtype : ``list`` of ``LegendasTvSubtitles``
+        """
+        if isinstance(video, Episode):
+            params = {'type': 'episode', 'series': video.series, 'season': video.season,
+                      'episodeNumber': video.episode, 'year': video.year}
+        elif isinstance(video, Movie):
+            params = {'type': 'movie', 'title': video.title, 'year': video.year}
+
+        return [s for l in languages for s in self.query(l, video, params) if params]
+
+    def get_subtitle_names(self, subtitle_id, timestamp):
+        """
+        Returns all subtitle names for a specific subtitle_id. Only subtitle names are returned.
+
+        :param subtitle_id: the id used to download the compressed file
+        :param timestamp: represents the last update timestamp of the file
+        :return: list of subtitle names
+        :rtype: ``list`` of ``string``
+        """
+        return self._uncompress(
+            subtitle_id, timestamp,
+            lambda cf: [f for f in cf.namelist()
+                        if 'legendas.tv' not in f.lower() and f.lower().endswith(SUBTITLE_EXTENSIONS)])
+
+    def extract_subtitle(self, subtitle_id, subtitle_name, timestamp):
+        """
+        Extract the subtitle content from the compressed file. The file is downloaded, the subtitle_name is uncompressed
+        and its contents is returned.
+
+        :param subtitle_id: the id used to download the compressed file
+        :param subtitle_name: the filename to be extracted
+        :param timestamp: represents the last update timestamp of the file
+        :return: the subtitle content
+        :rtype : ``string``
+        """
+        return self._uncompress(subtitle_id, timestamp,
+                                lambda cf, name: fix_line_ending(cf.read(name)), subtitle_name)
+
+    def _uncompress(self, subtitle_id, timestamp, function, *args, **kwargs):
+        content = self.download_content(subtitle_id, timestamp)
+
+        # Download content might be a rar file (most common) or a zip.
+        # Unfortunately, rarfile module only works with files (no in-memory streams)
+        tmp = NamedTemporaryFile()
+        try:
+            tmp.write(content)
+            tmp.flush()
+
+            cf = RarFile(tmp.name) if is_rarfile(tmp.name) else (ZipFile(tmp.name) if is_zipfile(tmp.name) else None)
+
+            return function(cf, *args, **kwargs) if cf else None
+        finally:
+            tmp.close()
+
+    @region.cache_on_arguments(expiration_time=EPISODE_EXPIRATION_TIME)
+    def download_content(self, subtitle_id, timestamp):
+        """
+        Downloads the compressed file for the specified subtitle_id. The timestamp is required in order to avoid the
+        cache when the compressed file is updated (it's a common practice in legendas.tv to update the archive with new
+        subtitles)
+
+        :param subtitle_id: the id used to download the compressed file
+        :param timestamp: represents the last update timestamp of the file
+        :return: the downloaded file
+        :rtype : ``bytes``
+        """
+        logger.debug('Downloading subtitle_id %s. Last update on %s' % (subtitle_id, timestamp))
+        r = self.session.get('%s/downloadarquivo/%s' % (self.server_url, subtitle_id), timeout=TIMEOUT)
+        r.raise_for_status()
+
+        return r.content
+
+    def download_subtitle(self, subtitle):
+        subtitle.content = self.extract_subtitle(subtitle.subtitle_id, subtitle.name, subtitle.timestamp)
diff --git a/lib/subliminal/providers/thesubdb.py b/lib/subliminal/providers/thesubdb.py
index bb82f8af55d567618c8948e041c34637b2ad2ef0..67d71a504e9dc36ecef15566021f2b9727110822 100644
--- a/lib/subliminal/providers/thesubdb.py
+++ b/lib/subliminal/providers/thesubdb.py
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 import logging
 
-from babelfish import Language
+from babelfish import Language, language_converters
 from requests import Session
 
 from . import Provider, get_version
@@ -10,7 +10,7 @@ from ..subtitle import Subtitle, fix_line_ending
 
 
 logger = logging.getLogger(__name__)
-
+language_converters.register('thesubdb = subliminal.converters.thesubdb:TheSubDBConverter')
 
 class TheSubDBSubtitle(Subtitle):
     provider_name = 'thesubdb'
@@ -34,7 +34,7 @@ class TheSubDBSubtitle(Subtitle):
 
 
 class TheSubDBProvider(Provider):
-    languages = {Language.fromalpha2(l) for l in ['en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ro', 'sv', 'tr']}
+    languages = {Language.fromthesubdb(l) for l in language_converters['thesubdb'].codes}
     required_hash = 'thesubdb'
     server_url = 'http://api.thesubdb.com/'
 
@@ -61,7 +61,7 @@ class TheSubDBProvider(Provider):
         # loop over languages
         subtitles = []
         for language_code in r.text.split(','):
-            language = Language.fromalpha2(language_code)
+            language = Language.fromthesubdb(language_code)
 
             subtitle = TheSubDBSubtitle(language, hash)
             logger.debug('Found subtitle %r', subtitle)
diff --git a/lib/subliminal/tests/__init__.py b/lib/subliminal/tests/__init__.py
deleted file mode 100644
index 6cef7800c72744f406499ac3769df7cc7e7210c9..0000000000000000000000000000000000000000
--- a/lib/subliminal/tests/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-from unittest import TextTestRunner, TestSuite
-from subliminal import cache_region
-from . import test_providers, test_subliminal
-
-
-cache_region.configure('dogpile.cache.memory', expiration_time=60 * 30)  # @UndefinedVariable
-suite = TestSuite([test_providers.suite(), test_subliminal.suite()])
-
-
-if __name__ == '__main__':
-    TextTestRunner().run(suite)
diff --git a/lib/subliminal/tests/common.py b/lib/subliminal/tests/common.py
deleted file mode 100644
index bd1608d456ec3f2f51c092e24c2ca3cfa21a091c..0000000000000000000000000000000000000000
--- a/lib/subliminal/tests/common.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-from subliminal import Movie, Episode
-
-
-MOVIES = [Movie('Man of Steel (2013)/man.of.steel.2013.720p.bluray.x264-felony.mkv', 'Man of Steel',
-                format='BluRay', release_group='felony', resolution='720p', video_codec='h264', audio_codec='DTS',
-                imdb_id=770828, size=7033732714, year=2013,
-                hashes={'opensubtitles': '5b8f8f4e41ccb21e', 'thesubdb': 'ad32876133355929d814457537e12dc2'})]
-
-EPISODES = [Episode('The Big Bang Theory/Season 07/The.Big.Bang.Theory.S07E05.720p.HDTV.X264-DIMENSION.mkv',
-                    'The Big Bang Theory', 7, 5, format='HDTV', release_group='DIMENSION', resolution='720p',
-                    video_codec='h264', audio_codec='AC3', imdb_id=3229392, size=501910737,
-                    title='The Workplace Proximity', year=2007, tvdb_id=80379,
-                    hashes={'opensubtitles': '6878b3ef7c1bd19e', 'thesubdb': '9dbbfb7ba81c9a6237237dae8589fccc'}),
-            Episode('Game of Thrones/Season 03/Game.of.Thrones.S03E10.Mhysa.720p.WEB-DL.DD5.1.H.264-NTb.mkv',
-                    'Game of Thrones', 3, 10, format='WEB-DL', release_group='NTb', resolution='720p',
-                    video_codec='h264', audio_codec='AC3', imdb_id=2178796, size=2142810931, title='Mhysa',
-                    tvdb_id=121361,
-                    hashes={'opensubtitles': 'b850baa096976c22', 'thesubdb': 'b1f899c77f4c960b84b8dbf840d4e42d'}),
-            Episode('Dallas.S01E03.mkv', 'Dallas', 1, 3),
-            Episode('Dallas.2012.S01E03.mkv', 'Dallas', 1, 3, year=2012)]
diff --git a/lib/subliminal/tests/test_providers.py b/lib/subliminal/tests/test_providers.py
deleted file mode 100644
index e98d9ad321c1dae3bd0dbf3975f41dab01ba0eda..0000000000000000000000000000000000000000
--- a/lib/subliminal/tests/test_providers.py
+++ /dev/null
@@ -1,475 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-import os
-from unittest import TestCase, TestSuite, TestLoader, TextTestRunner
-from babelfish import Language
-from subliminal import provider_manager
-from subliminal.tests.common import MOVIES, EPISODES
-
-
-class ProviderTestCase(TestCase):
-    provider_name = ''
-
-    def setUp(self):
-        self.Provider = provider_manager[self.provider_name]
-
-
-class Addic7edProviderTestCase(ProviderTestCase):
-    provider_name = 'addic7ed'
-
-    def test_find_show_id(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('the big bang')
-        self.assertEqual(show_id, 126)
-
-    def test_find_show_id_no_year(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('dallas')
-        self.assertEqual(show_id, 802)
-
-    def test_find_show_id_year(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('dallas', 2012)
-        self.assertEqual(show_id, 2559)
-
-    def test_find_show_id_error(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('the big how i met your mother')
-        self.assertIsNone(show_id)
-
-    def test_get_show_ids(self):
-        with self.Provider() as provider:
-            show_ids = provider.get_show_ids()
-        self.assertIn('the big bang theory', show_ids)
-        self.assertEqual(show_ids['the big bang theory'], 126)
-
-    def test_get_show_ids_no_year(self):
-        with self.Provider() as provider:
-            show_ids = provider.get_show_ids()
-        self.assertIn('dallas', show_ids)
-        self.assertEqual(show_ids['dallas'], 802)
-
-    def test_get_show_ids_year(self):
-        with self.Provider() as provider:
-            show_ids = provider.get_show_ids()
-        self.assertIn('dallas (2012)', show_ids)
-        self.assertEqual(show_ids['dallas (2012)'], 2559)
-
-    def test_query_episode_0(self):
-        video = EPISODES[0]
-        languages = {Language('tur'), Language('rus'), Language('heb'), Language('ita'), Language('fra'),
-                     Language('ron'), Language('nld'), Language('eng'), Language('deu'), Language('ell'),
-                     Language('por', 'BR'), Language('bul'), Language('por'), Language('msa')}
-        matches = {frozenset(['series', 'resolution', 'season']),
-                   frozenset(['series', 'episode', 'season', 'title']),
-                   frozenset(['series', 'release_group', 'season']),
-                   frozenset(['series', 'episode', 'season', 'release_group', 'title']),
-                   frozenset(['series', 'season']),
-                   frozenset(['series', 'season', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(video.series, video.season, video.year)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_episode_1(self):
-        video = EPISODES[1]
-        languages = {Language('ind'), Language('spa'), Language('hrv'), Language('ita'), Language('fra'),
-                     Language('cat'), Language('ell'), Language('nld'), Language('eng'), Language('fas'),
-                     Language('por'), Language('nor'), Language('deu'), Language('ron'), Language('por', 'BR'),
-                     Language('bul')}
-        matches = {frozenset(['series', 'episode', 'resolution', 'season', 'title', 'year']),
-                   frozenset(['series', 'resolution', 'season', 'year']),
-                   frozenset(['series', 'resolution', 'season', 'year', 'format']),
-                   frozenset(['series', 'episode', 'season', 'title', 'year']),
-                   frozenset(['series', 'episode', 'season', 'title', 'year', 'format']),
-                   frozenset(['series', 'release_group', 'season', 'year']),
-                   frozenset(['series', 'release_group', 'season', 'year', 'format']),
-                   frozenset(['series', 'resolution', 'release_group', 'season', 'year']),
-                   frozenset(['series', 'resolution', 'release_group', 'season', 'year', 'format']),
-                   frozenset(['series', 'episode', 'season', 'release_group', 'title', 'year', 'format']),
-                   frozenset(['series', 'season', 'year']),
-                   frozenset(['series', 'season', 'year', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(video.series, video.season, video.year)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_episode_year(self):
-        video_no_year = EPISODES[2]
-        video_year = EPISODES[3]
-        with self.Provider() as provider:
-            subtitles_no_year = provider.query(video_no_year.series, video_no_year.season, video_no_year.year)
-            subtitles_year = provider.query(video_year.series, video_year.season, video_year.year)
-        self.assertNotEqual(subtitles_no_year, subtitles_year)
-
-    def test_list_subtitles(self):
-        video = EPISODES[0]
-        languages = {Language('eng'), Language('fra')}
-        matches = {frozenset(['series', 'episode', 'season', 'release_group', 'title']),
-                   frozenset(['series', 'episode', 'season', 'title'])}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_download_subtitle(self):
-        video = EPISODES[0]
-        languages = {Language('eng'), Language('fra')}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-            provider.download_subtitle(subtitles[0])
-        self.assertIsNotNone(subtitles[0].content)
-        self.assertTrue(subtitles[0].is_valid)
-
-
-class OpenSubtitlesProviderTestCase(ProviderTestCase):
-    provider_name = 'opensubtitles'
-
-    def test_query_movie_0_query(self):
-        video = MOVIES[0]
-        languages = {Language('eng')}
-        matches = {frozenset([]),
-                   frozenset(['imdb_id', 'resolution', 'title', 'year']),
-                   frozenset(['imdb_id', 'resolution', 'title', 'year', 'format']),
-                   frozenset(['imdb_id', 'title', 'year']),
-                   frozenset(['imdb_id', 'title', 'year', 'format']),
-                   frozenset(['imdb_id', 'video_codec', 'title', 'year', 'format']),
-                   frozenset(['imdb_id', 'resolution', 'title', 'video_codec', 'year', 'format']),
-                   frozenset(['imdb_id', 'title', 'year', 'video_codec', 'resolution', 'release_group', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(languages, query=video.title)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_episode_0_query(self):
-        video = EPISODES[0]
-        languages = {Language('eng')}
-        matches = {frozenset(['series', 'episode', 'season', 'imdb_id', 'format']),
-                   frozenset(['series', 'imdb_id', 'video_codec', 'episode', 'season', 'format']),
-                   frozenset(['episode', 'title', 'series', 'imdb_id', 'video_codec', 'season'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(languages, query=os.path.split(video.name)[1])
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_episode_year(self):
-        video_no_year = EPISODES[2]
-        video_year = EPISODES[3]
-        languages = {Language('eng')}
-        with self.Provider() as provider:
-            subtitles_no_year = provider.query(languages, query=os.path.split(video_no_year.name)[1])
-            subtitles_year = provider.query(languages, query=os.path.split(video_year.name)[1])
-        self.assertNotEqual(subtitles_no_year, subtitles_year)
-
-    def test_query_episode_1_query(self):
-        video = EPISODES[1]
-        languages = {Language('eng'), Language('fra')}
-        matches = {frozenset(['episode', 'title', 'series', 'imdb_id', 'video_codec', 'season', 'year', 'format']),
-                   frozenset(['series', 'imdb_id', 'video_codec', 'episode', 'season', 'year']),
-                   frozenset(['episode', 'video_codec', 'series', 'imdb_id', 'resolution', 'season', 'year']),
-                   frozenset(['series', 'imdb_id', 'resolution', 'episode', 'season', 'year']),
-                   frozenset(['series', 'episode', 'season', 'imdb_id', 'year']),
-                   frozenset(['series', 'episode', 'season', 'imdb_id', 'year', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(languages, query=os.path.split(video.name)[1])
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_movie_0_imdb_id(self):
-        video = MOVIES[0]
-        languages = {Language('eng'), Language('fra')}
-        matches = {frozenset(['imdb_id', 'video_codec', 'title', 'year', 'format']),
-                   frozenset(['imdb_id', 'resolution', 'title', 'video_codec', 'year']),
-                   frozenset(['imdb_id', 'resolution', 'title', 'video_codec', 'year', 'format']),
-                   frozenset(['imdb_id', 'title', 'year', 'video_codec', 'resolution', 'release_group', 'format']),
-                   frozenset(['imdb_id', 'title', 'year']),
-                   frozenset(['imdb_id', 'title', 'year', 'format']),
-                   frozenset(['imdb_id', 'resolution', 'title', 'year']),
-                   frozenset(['imdb_id', 'resolution', 'title', 'year', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(languages, imdb_id=video.imdb_id)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_episode_0_imdb_id(self):
-        video = EPISODES[0]
-        languages = {Language('eng'), Language('fra')}
-        matches = {frozenset(['series', 'episode', 'season', 'imdb_id', 'format']),
-                   frozenset(['episode', 'release_group', 'video_codec', 'series', 'imdb_id', 'resolution', 'season', 'format']),
-                   frozenset(['series', 'imdb_id', 'video_codec', 'episode', 'season', 'format']),
-                   frozenset(['episode', 'title', 'series', 'imdb_id', 'video_codec', 'season'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(languages, imdb_id=video.imdb_id)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_movie_0_hash(self):
-        video = MOVIES[0]
-        languages = {Language('eng')}
-        matches = {frozenset(['hash', 'title', 'video_codec', 'year', 'resolution', 'imdb_id', 'format']),
-                   frozenset(['hash', 'title', 'video_codec', 'year', 'resolution', 'release_group', 'imdb_id', 'format']),
-                   frozenset(['year', 'video_codec', 'imdb_id', 'hash', 'title', 'format']),
-                   frozenset([]),
-                   frozenset(['year', 'resolution', 'imdb_id', 'hash', 'title', 'format']),
-                   frozenset(['year', 'imdb_id', 'hash', 'title']),
-                   frozenset(['year', 'imdb_id', 'hash', 'title', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(languages, hash=video.hashes['opensubtitles'], size=video.size)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_episode_0_hash(self):
-        video = EPISODES[0]
-        languages = {Language('eng')}
-        matches = {frozenset(['series', 'hash', 'format']),
-                   frozenset(['episode', 'season', 'series', 'imdb_id', 'video_codec', 'hash', 'format']),
-                   frozenset(['series', 'episode', 'season', 'hash', 'imdb_id', 'format']),
-                   frozenset(['series', 'resolution', 'hash', 'video_codec', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(languages, hash=video.hashes['opensubtitles'], size=video.size)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_list_subtitles(self):
-        video = MOVIES[0]
-        languages = {Language('eng'), Language('fra')}
-        matches = {frozenset(['title', 'video_codec', 'year', 'resolution', 'release_group', 'imdb_id', 'format']),
-                   frozenset(['imdb_id', 'year', 'title']),
-                   frozenset(['imdb_id', 'year', 'title', 'format']),
-                   frozenset(['year', 'video_codec', 'imdb_id', 'resolution', 'title']),
-                   frozenset(['year', 'video_codec', 'imdb_id', 'resolution', 'title', 'format']),
-                   frozenset(['hash', 'title', 'video_codec', 'year', 'resolution', 'release_group', 'imdb_id', 'format']),
-                   frozenset(['year', 'video_codec', 'imdb_id', 'hash', 'title', 'format']),
-                   frozenset([]),
-                   frozenset(['year', 'resolution', 'imdb_id', 'hash', 'title', 'format']),
-                   frozenset(['hash', 'title', 'video_codec', 'year', 'resolution', 'imdb_id', 'format']),
-                   frozenset(['year', 'imdb_id', 'hash', 'title']),
-                   frozenset(['year', 'imdb_id', 'hash', 'title', 'format']),
-                   frozenset(['video_codec', 'imdb_id', 'year', 'title', 'format']),
-                   frozenset(['year', 'imdb_id', 'resolution', 'title']),
-                   frozenset(['year', 'imdb_id', 'resolution', 'title', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_download_subtitle(self):
-        video = MOVIES[0]
-        languages = {Language('eng'), Language('fra')}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-            provider.download_subtitle(subtitles[0])
-        self.assertIsNotNone(subtitles[0].content)
-        self.assertTrue(subtitles[0].is_valid)
-
-
-class PodnapisiProviderTestCase(ProviderTestCase):
-    provider_name = 'podnapisi'
-
-    def test_query_movie_0(self):
-        video = MOVIES[0]
-        language = Language('eng')
-        matches = {frozenset(['video_codec', 'title', 'resolution', 'year']),
-                   frozenset(['title', 'resolution', 'year']),
-                   frozenset(['video_codec', 'title', 'year']),
-                   frozenset(['title', 'year']),
-                   frozenset(['title']),
-                   frozenset(['video_codec', 'title', 'resolution', 'release_group', 'year', 'format']),
-                   frozenset(['video_codec', 'title', 'resolution', 'audio_codec', 'year', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(language, title=video.title, year=video.year)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, {language})
-
-    def test_query_episode_0(self):
-        video = EPISODES[0]
-        language = Language('eng')
-        matches = {frozenset(['episode', 'series', 'season', 'video_codec', 'resolution', 'release_group', 'format']),
-                   frozenset(['season', 'video_codec', 'episode', 'resolution', 'series'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(language, series=video.series, season=video.season, episode=video.episode,
-                                       year=video.year)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, {language})
-
-    def test_query_episode_1(self):
-        video = EPISODES[1]
-        language = Language('eng')
-        matches = {frozenset(['episode', 'release_group', 'series', 'video_codec', 'resolution', 'season', 'year', 'format']),
-                   frozenset(['episode', 'series', 'video_codec', 'resolution', 'season', 'year']),
-                   frozenset(['season', 'video_codec', 'episode', 'series', 'year'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(language, series=video.series, season=video.season, episode=video.episode,
-                                       year=video.year)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, {language})
-
-    def test_list_subtitles(self):
-        video = MOVIES[0]
-        languages = {Language('eng'), Language('fra')}
-        matches = {frozenset(['video_codec', 'title', 'resolution', 'year']),
-                   frozenset(['title', 'resolution', 'year']),
-                   frozenset(['video_codec', 'title', 'year']),
-                   frozenset(['video_codec', 'title', 'year', 'format']),
-                   frozenset(['title', 'year']),
-                   frozenset(['title']),
-                   frozenset(['video_codec', 'title', 'resolution', 'release_group', 'year', 'format']),
-                   frozenset(['video_codec', 'title', 'resolution', 'audio_codec', 'year', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_download_subtitle(self):
-        video = MOVIES[0]
-        languages = {Language('eng'), Language('fra')}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-            provider.download_subtitle(subtitles[0])
-        self.assertIsNotNone(subtitles[0].content)
-        self.assertTrue(subtitles[0].is_valid)
-
-
-class TheSubDBProviderTestCase(ProviderTestCase):
-    provider_name = 'thesubdb'
-
-    def test_query_episode_0(self):
-        video = EPISODES[0]
-        languages = {Language('eng'), Language('spa'), Language('por')}
-        matches = {frozenset(['hash'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(video.hashes['thesubdb'])
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_episode_1(self):
-        video = EPISODES[1]
-        languages = {Language('eng'), Language('por')}
-        matches = {frozenset(['hash'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(video.hashes['thesubdb'])
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_list_subtitles(self):
-        video = MOVIES[0]
-        languages = {Language('eng'), Language('por')}
-        matches = {frozenset(['hash'])}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_download_subtitle(self):
-        video = MOVIES[0]
-        languages = {Language('eng'), Language('por')}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-            provider.download_subtitle(subtitles[0])
-            provider.download_subtitle(subtitles[1])
-        self.assertIsNotNone(subtitles[0].content)
-        self.assertTrue(subtitles[0].is_valid)
-
-
-class TVsubtitlesProviderTestCase(ProviderTestCase):
-    provider_name = 'tvsubtitles'
-
-    def test_find_show_id(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('the big bang')
-        self.assertEqual(show_id, 154)
-
-    def test_find_show_id_ambiguous(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('new girl')
-        self.assertEqual(show_id, 977)
-
-    def test_find_show_id_no_dots(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('marvel\'s agents of s h i e l d')
-        self.assertEqual(show_id, 1340)
-
-    def test_find_show_id_no_year_dallas(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('dallas')
-        self.assertEqual(show_id, 646)
-
-    def test_find_show_id_no_year_house_of_cards(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('house of cards')
-        self.assertEqual(show_id, 352)
-
-    def test_find_show_id_year_dallas(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('dallas', 2012)
-        self.assertEqual(show_id, 1127)
-
-    def test_find_show_id_year_house_of_cards(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('house of cards', 2013)
-        self.assertEqual(show_id, 1246)
-
-    def test_find_show_id_error(self):
-        with self.Provider() as provider:
-            show_id = provider.find_show_id('the big gaming')
-        self.assertIsNone(show_id)
-
-    def test_find_episode_ids(self):
-        with self.Provider() as provider:
-            episode_ids = provider.find_episode_ids(154, 5)
-        self.assertEqual(set(episode_ids.keys()), set(range(1, 25)))
-
-    def test_query_episode_0(self):
-        video = EPISODES[0]
-        languages = {Language('fra'), Language('por'), Language('hun'), Language('ron'), Language('eng')}
-        matches = {frozenset(['series', 'episode', 'season', 'video_codec', 'format']),
-                   frozenset(['series', 'episode', 'season', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(video.series, video.season, video.episode, video.year)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_query_episode_1(self):
-        video = EPISODES[1]
-        languages = {Language('fra'), Language('ell'), Language('ron'), Language('eng'), Language('hun'),
-                     Language('por'), Language('por', 'BR'), Language('jpn')}
-        matches = {frozenset(['series', 'episode', 'resolution', 'season', 'year']),
-                   frozenset(['series', 'episode', 'season', 'video_codec', 'year']),
-                   frozenset(['series', 'episode', 'season', 'year'])}
-        with self.Provider() as provider:
-            subtitles = provider.query(video.series, video.season, video.episode, video.year)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_list_subtitles(self):
-        video = EPISODES[0]
-        languages = {Language('eng'), Language('fra')}
-        matches = {frozenset(['series', 'episode', 'season', 'format'])}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-        self.assertEqual({frozenset(subtitle.compute_matches(video)) for subtitle in subtitles}, matches)
-        self.assertEqual({subtitle.language for subtitle in subtitles}, languages)
-
-    def test_download_subtitle(self):
-        video = EPISODES[0]
-        languages = {Language('hun')}
-        with self.Provider() as provider:
-            subtitles = provider.list_subtitles(video, languages)
-            provider.download_subtitle(subtitles[0])
-        self.assertIsNotNone(subtitles[0].content)
-        self.assertTrue(subtitles[0].is_valid)
-
-
-def suite():
-    suite = TestSuite()
-    suite.addTest(TestLoader().loadTestsFromTestCase(Addic7edProviderTestCase))
-    suite.addTest(TestLoader().loadTestsFromTestCase(OpenSubtitlesProviderTestCase))
-    suite.addTest(TestLoader().loadTestsFromTestCase(PodnapisiProviderTestCase))
-    suite.addTest(TestLoader().loadTestsFromTestCase(TheSubDBProviderTestCase))
-    suite.addTest(TestLoader().loadTestsFromTestCase(TVsubtitlesProviderTestCase))
-    return suite
-
-
-if __name__ == '__main__':
-    TextTestRunner().run(suite())
diff --git a/lib/subliminal/tests/test_subliminal.py b/lib/subliminal/tests/test_subliminal.py
deleted file mode 100644
index a991d81fc50180c5fefa9614612fefa4b3e31eba..0000000000000000000000000000000000000000
--- a/lib/subliminal/tests/test_subliminal.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-import os
-import shutil
-from unittest import TestCase, TestSuite, TestLoader, TextTestRunner
-from babelfish import Language
-from subliminal import list_subtitles, download_subtitles, save_subtitles, download_best_subtitles, scan_video
-from subliminal.tests.common import MOVIES, EPISODES
-
-
-TEST_DIR = 'test_data'
-
-
-class ApiTestCase(TestCase):
-    def setUp(self):
-        os.mkdir(TEST_DIR)
-
-    def tearDown(self):
-        shutil.rmtree(TEST_DIR)
-
-    def test_list_subtitles_movie_0(self):
-        videos = [MOVIES[0]]
-        languages = {Language('eng')}
-        subtitles = list_subtitles(videos, languages)
-        self.assertEqual(len(subtitles), len(videos))
-        self.assertGreater(len(subtitles[videos[0]]), 0)
-
-    def test_list_subtitles_movie_0_por_br(self):
-        videos = [MOVIES[0]]
-        languages = {Language('por', 'BR')}
-        subtitles = list_subtitles(videos, languages)
-        self.assertEqual(len(subtitles), len(videos))
-        self.assertGreater(len(subtitles[videos[0]]), 0)
-
-    def test_list_subtitles_episodes(self):
-        videos = [EPISODES[0], EPISODES[1]]
-        languages = {Language('eng'), Language('fra')}
-        subtitles = list_subtitles(videos, languages)
-        self.assertEqual(len(subtitles), len(videos))
-        self.assertGreater(len(subtitles[videos[0]]), 0)
-
-    def test_download_subtitles(self):
-        videos = [EPISODES[0]]
-        for video in videos:
-            video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
-        languages = {Language('eng')}
-        subtitles = list_subtitles(videos, languages)
-        download_subtitles(subtitles[videos[0]][:5])
-        self.assertGreaterEqual(len([s for s in subtitles[videos[0]] if s.content is not None]), 4)
-
-    def test_download_best_subtitles(self):
-        videos = [EPISODES[0], EPISODES[1]]
-        for video in videos:
-            video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
-        languages = {Language('eng'), Language('fra')}
-        subtitles = download_best_subtitles(videos, languages)
-        for video in videos:
-            self.assertIn(video, subtitles)
-            self.assertEqual(len(subtitles[video]), 2)
-
-    def test_save_subtitles(self):
-        videos = [EPISODES[0], EPISODES[1]]
-        for video in videos:
-            video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
-        languages = {Language('eng'), Language('fra')}
-        subtitles = list_subtitles(videos, languages)
-
-        # make a list of subtitles to download (one per language per video)
-        subtitles_to_download = []
-        for video, video_subtitles in subtitles.items():
-            video_subtitle_languages = set()
-            for video_subtitle in video_subtitles:
-                if video_subtitle.language in video_subtitle_languages:
-                    continue
-                subtitles_to_download.append(video_subtitle)
-                video_subtitle_languages.add(video_subtitle.language)
-                if video_subtitle_languages == languages:
-                    break
-        self.assertEqual(len(subtitles_to_download), 4)
-
-        # download
-        download_subtitles(subtitles_to_download)
-        save_subtitles(subtitles)
-        for video in videos:
-            self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.en.srt'))
-            self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.fr.srt'))
-
-    def test_save_subtitles_single(self):
-        videos = [EPISODES[0], EPISODES[1]]
-        for video in videos:
-            video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
-        languages = {Language('eng'), Language('fra')}
-        subtitles = download_best_subtitles(videos, languages)
-        save_subtitles(subtitles, single=True)
-        for video in videos:
-            self.assertIn(video, subtitles)
-            self.assertEqual(len(subtitles[video]), 2)
-            self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.srt'))
-
-    def test_download_best_subtitles_min_score(self):
-        videos = [MOVIES[0]]
-        for video in videos:
-            video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
-        languages = {Language('eng'), Language('fra')}
-        subtitles = download_best_subtitles(videos, languages, min_score=1000)
-        self.assertEqual(len(subtitles), 0)
-
-    def test_download_best_subtitles_hearing_impaired(self):
-        videos = [MOVIES[0]]
-        for video in videos:
-            video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
-        languages = {Language('eng')}
-        subtitles = download_best_subtitles(videos, languages, hearing_impaired=True)
-        self.assertTrue(subtitles[videos[0]][0].hearing_impaired)
-
-
-class VideoTestCase(TestCase):
-    def setUp(self):
-        os.mkdir(TEST_DIR)
-        for video in MOVIES + EPISODES:
-            open(os.path.join(TEST_DIR, os.path.split(video.name)[1]), 'w').close()
-
-    def tearDown(self):
-        shutil.rmtree(TEST_DIR)
-
-    def test_scan_video_movie(self):
-        video = MOVIES[0]
-        scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
-        self.assertEqual(scanned_video.name, os.path.join(TEST_DIR, os.path.split(video.name)[1]))
-        self.assertEqual(scanned_video.title.lower(), video.title.lower())
-        self.assertEqual(scanned_video.year, video.year)
-        self.assertEqual(scanned_video.video_codec, video.video_codec)
-        self.assertEqual(scanned_video.format, video.format)
-        self.assertEqual(scanned_video.resolution, video.resolution)
-        self.assertEqual(scanned_video.release_group, video.release_group)
-        self.assertEqual(scanned_video.subtitle_languages, set())
-        self.assertEqual(scanned_video.hashes, {})
-        self.assertIsNone(scanned_video.audio_codec)
-        self.assertIsNone(scanned_video.imdb_id)
-        self.assertEqual(scanned_video.size, 0)
-
-    def test_scan_video_episode(self):
-        video = EPISODES[0]
-        scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
-        self.assertEqual(scanned_video.name, os.path.join(TEST_DIR, os.path.split(video.name)[1]))
-        self.assertEqual(scanned_video.series, video.series)
-        self.assertEqual(scanned_video.season, video.season)
-        self.assertEqual(scanned_video.episode, video.episode)
-        self.assertEqual(scanned_video.video_codec, video.video_codec)
-        self.assertEqual(scanned_video.format, video.format)
-        self.assertEqual(scanned_video.resolution, video.resolution)
-        self.assertEqual(scanned_video.release_group, video.release_group)
-        self.assertEqual(scanned_video.subtitle_languages, set())
-        self.assertEqual(scanned_video.hashes, {})
-        self.assertIsNone(scanned_video.title)
-        self.assertIsNone(scanned_video.tvdb_id)
-        self.assertIsNone(scanned_video.imdb_id)
-        self.assertIsNone(scanned_video.audio_codec)
-        self.assertEqual(scanned_video.size, 0)
-
-    def test_scan_video_subtitle_language_und(self):
-        video = EPISODES[0]
-        open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.srt', 'w').close()
-        scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
-        self.assertEqual(scanned_video.subtitle_languages, {Language('und')})
-
-    def test_scan_video_subtitles_language_eng(self):
-        video = EPISODES[0]
-        open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.en.srt', 'w').close()
-        scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
-        self.assertEqual(scanned_video.subtitle_languages, {Language('eng')})
-
-    def test_scan_video_subtitles_languages(self):
-        video = EPISODES[0]
-        open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.en.srt', 'w').close()
-        open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.fr.srt', 'w').close()
-        open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.srt', 'w').close()
-        scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
-        self.assertEqual(scanned_video.subtitle_languages, {Language('eng'), Language('fra'), Language('und')})
-
-
-def suite():
-    suite = TestSuite()
-    suite.addTest(TestLoader().loadTestsFromTestCase(ApiTestCase))
-    suite.addTest(TestLoader().loadTestsFromTestCase(VideoTestCase))
-    return suite
-
-
-if __name__ == '__main__':
-    TextTestRunner().run(suite())
diff --git a/readme.md b/readme.md
index e1531177ef1c2a32881acc1563153b00a7b84cdf..1028d1a027f64c363395112386c88981164f68b5 100644
--- a/readme.md
+++ b/readme.md
@@ -43,3 +43,16 @@ Automatic Video Library Manager for TV Shows. It watches for new episodes of you
 #### Important
 Before using this with your existing database (sickbeard.db) please make a backup copy of it and delete any other database files such as cache.db and failed.db if present<br/>
 We HIGHLY recommend starting out with no database files at all to make this a fresh start but the choice is at your own risk
+
+###Supported providers
+
+A full list can be found here: [Link](https://github.com/SiCKRAGETV/sickrage-issues/wiki/SickRage-Search-Providers)
+
+###Special Thanks to: 
+![image](https://rarbg.com/favicon.ico)[RARBG](rarbg.to)
+![image](https://torrentproject.se/favicon.ico)[TorrentProject](https://torrentproject.se/about)
+![image](https://thepiratebay.la/favicon.ico)[ThePirateBay](https://thepiratebay.la/)
+![image](http://kat.cr/favicon.ico)[KickAssTorrents](https://kat.cr)
+![image](https://nzb.cat/favicon.ico)[NZB.cat](https://nzb.cat/)
+![image](https://nzbgeek.info/favicon.ico)[NZBGeek](https://nzbgeek.info)
+![image](https://raw.githubusercontent.com/SiCKRAGETV/SickRage/master/gui/slick/images/providers/dognzb.png)[DOGnzb](dognzb.cr)
diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py
index 790bf8c97fc631918c08c96bad54b4bfabca096d..5a41da479d5e1a48570ccb9e2e51967ab12c8b0c 100644
--- a/sickbeard/__init__.py
+++ b/sickbeard/__init__.py
@@ -526,10 +526,20 @@ SUBTITLES_SERVICES_LIST = []
 SUBTITLES_SERVICES_ENABLED = []
 SUBTITLES_HISTORY = False
 EMBEDDED_SUBTITLES_ALL = False
+SUBTITLES_HEARING_IMPAIRED = False
 SUBTITLES_FINDER_FREQUENCY = 1
 SUBTITLES_MULTI = False
 SUBTITLES_EXTRA_SCRIPTS = []
 
+ADDIC7ED_USER = None
+ADDIC7ED_PASS = None
+
+OPENSUBTITLES_USER = None
+OPENSUBTITLES_PASS = None
+
+LEGENDASTV_USER = None
+LEGENDASTV_PASS = None
+
 USE_FAILED_DOWNLOADS = False
 DELETE_FAILED = False
 
@@ -604,9 +614,10 @@ def initialize(consoleLogging=True):
             USE_LISTVIEW, METADATA_KODI, METADATA_KODI_12PLUS, METADATA_MEDIABROWSER, METADATA_PS3, metadata_provider_dict, \
             NEWZBIN, NEWZBIN_USERNAME, NEWZBIN_PASSWORD, GIT_PATH, MOVE_ASSOCIATED_FILES, SYNC_FILES, POSTPONE_IF_SYNC_FILES, dailySearchScheduler, NFO_RENAME, \
             GUI_NAME, HOME_LAYOUT, HISTORY_LAYOUT, DISPLAY_SHOW_SPECIALS, COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, COMING_EPS_MISSED_RANGE, DISPLAY_FILESIZE, FUZZY_DATING, TRIM_ZERO, DATE_PRESET, TIME_PRESET, TIME_PRESET_W_SECONDS, THEME_NAME, FILTER_ROW, \
-            POSTER_SORTBY, POSTER_SORTDIR, HISTORY_LIMIT, \
-            METADATA_WDTV, METADATA_TIVO, METADATA_MEDE8ER, IGNORE_WORDS, IGNORED_SUBS_LIST, REQUIRE_WORDS, CALENDAR_UNPROTECTED, NO_RESTART, CREATE_MISSING_SHOW_DIRS, \
-            ADD_SHOWS_WO_DIR, USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, SUBTITLES_MULTI, EMBEDDED_SUBTITLES_ALL, SUBTITLES_EXTRA_SCRIPTS, subtitlesFinderScheduler, \
+            POSTER_SORTBY, POSTER_SORTDIR, HISTORY_LIMIT, CREATE_MISSING_SHOW_DIRS, ADD_SHOWS_WO_DIR, \
+            METADATA_WDTV, METADATA_TIVO, METADATA_MEDE8ER, IGNORE_WORDS, IGNORED_SUBS_LIST, REQUIRE_WORDS, CALENDAR_UNPROTECTED, NO_RESTART, \
+            USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, SUBTITLES_MULTI, EMBEDDED_SUBTITLES_ALL, SUBTITLES_EXTRA_SCRIPTS, subtitlesFinderScheduler, \
+            SUBTITLES_HEARING_IMPAIRED, ADDIC7ED_USER, ADDIC7ED_PASS, LEGENDASTV_USER, LEGENDASTV_PASS, OPENSUBTITLES_USER, OPENSUBTITLES_PASS, \
             USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, TMDB_API_KEY, DEBUG, DEFAULT_PAGE, PROXY_SETTING, PROXY_INDEXERS, \
             AUTOPOSTPROCESSER_FREQUENCY, SHOWUPDATE_HOUR, DEFAULT_AUTOPOSTPROCESSER_FREQUENCY, MIN_AUTOPOSTPROCESSER_FREQUENCY, \
             ANIME_DEFAULT, NAMING_ANIME, ANIMESUPPORT, USE_ANIDB, ANIDB_USERNAME, ANIDB_PASSWORD, ANIDB_USE_MYLIST, \
@@ -1133,12 +1144,22 @@ def initialize(consoleLogging=True):
         SUBTITLES_DEFAULT = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_default', 0))
         SUBTITLES_HISTORY = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_history', 0))
         EMBEDDED_SUBTITLES_ALL = bool(check_setting_int(CFG, 'Subtitles', 'embedded_subtitles_all', 0))
+        SUBTITLES_HEARING_IMPAIRED = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_hearing_impaired', 0))
         SUBTITLES_FINDER_FREQUENCY = check_setting_int(CFG, 'Subtitles', 'subtitles_finder_frequency', 1)
         SUBTITLES_MULTI = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_multi', 1))
 
         SUBTITLES_EXTRA_SCRIPTS = [x.strip() for x in check_setting_str(CFG, 'Subtitles', 'subtitles_extra_scripts', '').split('|') if
                          x.strip()]
 
+        ADDIC7ED_USER = check_setting_str(CFG, 'Subtitles', 'addic7ed_username', '', censor_log=True)
+        ADDIC7ED_PASS = check_setting_str(CFG, 'Subtitles', 'addic7ed_password', '', censor_log=True)
+
+        LEGENDASTV_USER = check_setting_str(CFG, 'Subtitles', 'legendastv_username', '', censor_log=True)
+        LEGENDASTV_PASS = check_setting_str(CFG, 'Subtitles', 'legendastv_password', '', censor_log=True)
+
+        OPENSUBTITLES_USER = check_setting_str(CFG, 'Subtitles', 'opensubtitles_username', '', censor_log=True)
+        OPENSUBTITLES_PASS = check_setting_str(CFG, 'Subtitles', 'opensubtitles_password', '', censor_log=True)
+
         USE_FAILED_DOWNLOADS = bool(check_setting_int(CFG, 'FailedDownloads', 'use_failed_downloads', 0))
         DELETE_FAILED = bool(check_setting_int(CFG, 'FailedDownloads', 'delete_failed', 0))
 
@@ -2124,10 +2145,20 @@ def save_config():
     new_config['Subtitles']['subtitles_default'] = int(SUBTITLES_DEFAULT)
     new_config['Subtitles']['subtitles_history'] = int(SUBTITLES_HISTORY)
     new_config['Subtitles']['embedded_subtitles_all'] = int(EMBEDDED_SUBTITLES_ALL)
+    new_config['Subtitles']['subtitles_hearing_impaired'] = int(SUBTITLES_HEARING_IMPAIRED)
     new_config['Subtitles']['subtitles_finder_frequency'] = int(SUBTITLES_FINDER_FREQUENCY)
     new_config['Subtitles']['subtitles_multi'] = int(SUBTITLES_MULTI)
     new_config['Subtitles']['subtitles_extra_scripts'] = '|'.join(SUBTITLES_EXTRA_SCRIPTS)
 
+    new_config['Subtitles']['addic7ed_username'] = ADDIC7ED_USER
+    new_config['Subtitles']['addic7ed_password'] = helpers.encrypt(ADDIC7ED_PASS, ENCRYPTION_VERSION)
+
+    new_config['Subtitles']['legendastv_username'] = LEGENDASTV_USER
+    new_config['Subtitles']['legendastv_password'] = helpers.encrypt(LEGENDASTV_PASS, ENCRYPTION_VERSION)
+
+    new_config['Subtitles']['opensubtitles_username'] = OPENSUBTITLES_USER
+    new_config['Subtitles']['opensubtitles_password'] = helpers.encrypt(OPENSUBTITLES_PASS, ENCRYPTION_VERSION)
+
     new_config['FailedDownloads'] = {}
     new_config['FailedDownloads']['use_failed_downloads'] = int(USE_FAILED_DOWNLOADS)
     new_config['FailedDownloads']['delete_failed'] = int(DELETE_FAILED)
diff --git a/sickbeard/common.py b/sickbeard/common.py
index 0c53785ff4fb77a8cda0dca49714085026ba4127..f8b67ffdb6125c3e483f764969a18617c6ccd225 100644
--- a/sickbeard/common.py
+++ b/sickbeard/common.py
@@ -265,15 +265,14 @@ class Quality:
 
             return ret
 
-        if checkName([r"([sp]d.?tv|hd.?tv|dsr|tv(rip|mux)|satrip).(xvid|x26[45]|h.?26[45])"], all) and not checkName([r"(720|1080)[pi]"], all) and\
-                not checkName([r"hr.ws.pdtv.x26[45]"], any):
+        if (checkName([r"480p|web.?dl|web(rip|mux|hd)|[sph]d.?tv|dsr|tv(rip|mux)|satrip", r"xvid|divx|[xh].?26[45]"], all)
+                and not checkName([r"(720|1080)[pi]"], all) and not checkName([r"hr.ws.pdtv.[xh].?26[45]"], any)):
             ret = Quality.SDTV
-        elif checkName([r"web.?dl|web(rip|mux|hd)", r"xvid|x26[45]|h.?26[45]"], all) and not checkName([r"(720|1080)[pi]"], all):
-            ret = Quality.SDTV
-        elif checkName([r"(dvd(rip|mux)|b[rd](rip|mux)|blue?-?ray)(.ws)?.(xvid|divx|[xh].?26[45])"], any) and not checkName([r"(720|1080)[pi]"], all):
+        elif (checkName([r"dvd(rip|mux)|b[rd](rip|mux)|blue?-?ray", r"xvid|divx|[xh].?26[45]"], any)
+              and not checkName([r"(720|1080)[pi]"], all) and not checkName([r"hr.ws.pdtv.[xh].?26[45]"], any)):
             ret = Quality.SDDVD
-        elif checkName([r"720p", r"hd.?tv", r"[xh].?26[45]"], all) or checkName([r"hr.ws.pdtv.[xh].?26[45]"], any) and not checkName(
-                [r"1080[pi]"], all):
+        elif (checkName([r"720p", r"hd.?tv", r"[xh].?26[45]"], all) or checkName([r"hr.ws.pdtv.[xh].?26[45]"], any)
+              and not checkName([r"1080[pi]"], all)):
             ret = Quality.HDTV
         elif checkName([r"720p|1080i", r"hd.?tv", r"mpeg-?2"], all) or checkName([r"1080[pi].hdtv", r"h.?26[45]"], all):
             ret = Quality.RAWHDTV
diff --git a/sickbeard/helpers.py b/sickbeard/helpers.py
index adb65d150d3ef6f78715fb0b9c50f8ef84adde70..82492a048837c48f41097f1c303c0cbff6a80fb5 100644
--- a/sickbeard/helpers.py
+++ b/sickbeard/helpers.py
@@ -541,7 +541,8 @@ def hardlinkFile(srcFile, destFile):
         ek(link, srcFile, destFile)
         fixSetGroupID(destFile)
     except Exception as e:
-        logger.log(u"Failed to create hardlink of %s at %s. Error: %r. Copying instead" % (srcFile, destFile, ex(e)),logger.ERROR)
+        logger.log(u"Failed to create hardlink of %s at %s. Error: %r. Copying instead" 
+        % (srcFile, destFile, ex(e)), logger.ERROR)
         copyFile(srcFile, destFile)
 
 
@@ -574,7 +575,8 @@ def moveAndSymlinkFile(srcFile, destFile):
         fixSetGroupID(destFile)
         ek(symlink, destFile, srcFile)
     except Exception as e:
-        logger.log(u"Failed to create symlink of %s at %s. Error: %r. Copying instead" % (srcFile, destFile, ex(e)),logger.ERROR)
+        logger.log(u"Failed to create symlink of %s at %s. Error: %r. Copying instead" 
+        % (srcFile, destFile, ex(e)), logger.ERROR)
         copyFile(srcFile, destFile)
 
 
@@ -1021,13 +1023,13 @@ def restoreVersionedFile(backup_file, version):
         return False
 
     try:
-        logger.log(u"Trying to backup %s to %s.r%s before restoring backup" % (new_file, new_file, version),
-            logger.DEBUG)
+        logger.log(u"Trying to backup %s to %s.r%s before restoring backup" 
+        % (new_file, new_file, version), logger.DEBUG)
 
         shutil.move(new_file, new_file + '.' + 'r' + str(version))
     except Exception as e:
-        logger.log(u"Error while trying to backup DB file %s before proceeding with restore: %r" % (restore_file, ex(e)),
-            logger.WARNING)
+        logger.log(u"Error while trying to backup DB file %s before proceeding with restore: %r" 
+        % (restore_file, ex(e)), logger.WARNING)
         return False
 
     while not ek(os.path.isfile, new_file):
@@ -1591,8 +1593,8 @@ def getURL(url, post_data=None, params={}, headers={}, timeout=30, session=None,
             resp = session.get(url, timeout=timeout, allow_redirects=True, verify=session.verify)
 
         if not resp.ok:
-            logger.log(u"Requested getURL %s returned status code is %s: %s" % (url, resp.status_code, codeDescription(resp.status_code)),
-            logger.DEBUG)
+            logger.log(u"Requested getURL %s returned status code is %s: %s" 
+            % (url, resp.status_code, codeDescription(resp.status_code)), logger.DEBUG)
             return None
 
         if proxyGlypeProxySSLwarning is not None:
@@ -1600,8 +1602,8 @@ def getURL(url, post_data=None, params={}, headers={}, timeout=30, session=None,
                 resp = session.get(proxyGlypeProxySSLwarning, timeout=timeout, allow_redirects=True, verify=session.verify)
 
                 if not resp.ok:
-                    logger.log(u"GlypeProxySSLwarning: Requested getURL %s returned status code is %s: %s" % (url, resp.status_code, codeDescription(resp.status_code)),
-                    logger.DEBUG)
+                    logger.log(u"GlypeProxySSLwarning: Requested getURL %s returned status code is %s: %s" 
+                    % (url, resp.status_code, codeDescription(resp.status_code)), logger.DEBUG)
                     return None
 
     except (SocketTimeout, TypeError) as e:
@@ -1650,7 +1652,8 @@ def download_file(url, filename, session=None, headers={}):
     try:
         with closing(session.get(url, allow_redirects=True, verify=session.verify)) as resp:
             if not resp.ok:
-                logger.log(u"Requested download url %s returned status code is %s: %s" % ( url, resp.status_code, codeDescription(resp.status_code) ) , logger.DEBUG)
+                logger.log(u"Requested download url %s returned status code is %s: %s" 
+                % (url, resp.status_code, codeDescription(resp.status_code)), logger.DEBUG)
                 return False
 
             try:
@@ -1665,11 +1668,12 @@ def download_file(url, filename, session=None, headers={}):
                 logger.log(u"Problem setting permissions or writing file to: %s" % filename, logger.WARNING)
 
     except (SocketTimeout, TypeError) as e:
+        _remove_file_failed(filename)
         logger.log(u"Connection timed out (sockets) while loading download URL %s Error: %r" % (url, ex(e)), logger.WARNING)
         return None
     except requests.exceptions.HTTPError as e:
         _remove_file_failed(filename)
-        logger.log(u"HTTP error %r while loading download URL %s " % (ex(e), url ), logger.WARNING)
+        logger.log(u"HTTP error %r while loading download URL %s " % (ex(e), url), logger.WARNING)
         return False
     except requests.exceptions.ConnectionError as e:
         _remove_file_failed(filename)
@@ -1677,7 +1681,7 @@ def download_file(url, filename, session=None, headers={}):
         return False
     except requests.exceptions.Timeout as e:
         _remove_file_failed(filename)
-        logger.log(u"Connection timed out %r while loading download URL %s " % (ex(e), url ), logger.WARNING)
+        logger.log(u"Connection timed out %r while loading download URL %s " % (ex(e), url), logger.WARNING)
         return False
     except EnvironmentError as e:
         _remove_file_failed(filename)
@@ -1685,7 +1689,7 @@ def download_file(url, filename, session=None, headers={}):
         return False
     except Exception:
         _remove_file_failed(filename)
-        logger.log(u"Unknown exception while loading download URL %s : %r" % ( url, traceback.format_exc() ), logger.WARNING)
+        logger.log(u"Unknown exception while loading download URL %s : %r" % (url, traceback.format_exc()), logger.WARNING)
         return False
 
     return True
@@ -1817,8 +1821,8 @@ def verify_freespace(src, dest, oldfile=None):
     if diskfree > neededspace:
         return True
     else:
-        logger.log("Not enough free space: Needed: %s bytes ( %s ), found: %s bytes ( %s )" % ( neededspace, pretty_filesize(neededspace), diskfree, pretty_filesize(diskfree) ) ,
-        logger.WARNING)
+        logger.log("Not enough free space: Needed: %s bytes ( %s ), found: %s bytes ( %s )" 
+        % (neededspace, pretty_filesize(neededspace), diskfree, pretty_filesize(diskfree)), logger.WARNING)
         return False
 
 # https://gist.github.com/thatalextaylor/7408395
diff --git a/sickbeard/providers/rarbg.py b/sickbeard/providers/rarbg.py
index 76db48395987edf307393e8e9fea95c2f43bb20d..149f23519e29e87878e526757f78e19b158d0ed0 100644
--- a/sickbeard/providers/rarbg.py
+++ b/sickbeard/providers/rarbg.py
@@ -260,8 +260,8 @@ class RarbgCache(tvcache.TVCache):
 
         tvcache.TVCache.__init__(self, provider_obj)
 
-        # only poll RARbg every 15 minutes max
-        self.minTime = 5
+        # only poll RARBG every 10 minutes max
+        self.minTime = 10
 
     def _getRSSData(self):
         search_params = {'RSS': ['']}
diff --git a/sickbeard/providers/scc.py b/sickbeard/providers/scc.py
index fd865d4721aaae342695d3fa4b66ac2adf514938..f59d9fc710a385a625a8652506920b8020bcf4de 100644
--- a/sickbeard/providers/scc.py
+++ b/sickbeard/providers/scc.py
@@ -48,15 +48,13 @@ class SCCProvider(generic.TorrentProvider):
         self.urls = {'base_url': 'https://sceneaccess.eu',
                      'login': 'https://sceneaccess.eu/login',
                      'detail': 'https://www.sceneaccess.eu/details?id=%s',
-                     'search': 'https://sceneaccess.eu/browse?search=%s&method=1&%s',
-                     'nonscene': 'https://sceneaccess.eu/nonscene?search=%s&method=1&c44=44&c45=44',
-                     'foreign': 'https://sceneaccess.eu/foreign?search=%s&method=1&c34=34&c33=33',
-                     'archive': 'https://sceneaccess.eu/archive?search=%s&method=1&c26=26',
+                     'search': 'https://sceneaccess.eu/all?search=%s&method=1&%s',
                      'download': 'https://www.sceneaccess.eu/%s'}
 
         self.url = self.urls['base_url']
 
-        self.categories = "c27=27&c17=17&c11=11"
+        self.categories = { 'sponly': 'c26=26&c44=44&c45=45', # Archive, non-scene HD, non-scene SD; need to include non-scene because WEB-DL packs get added to those categories
+                            'eponly': 'c27=27&c17=17&c44=44&c45=45&c33=33&c34=34'} # TV HD, TV SD, non-scene HD, non-scene SD, foreign XviD, foreign x264
 
     def isEnabled(self):
         return self.enabled
@@ -100,73 +98,60 @@ class SCCProvider(generic.TorrentProvider):
                 if mode != 'RSS':
                     logger.log(u"Search string: %s " % search_string, logger.DEBUG)
 
-                searchURLS = []
-                if search_mode == 'sponly':
-                    searchURLS += [self.urls['archive'] % (urllib.quote(search_string))]
-                else:
-                    searchURLS += [self.urls['search'] % (urllib.quote(search_string), self.categories)]
-                    searchURLS += [self.urls['nonscene'] % (urllib.quote(search_string))]
-                    searchURLS += [self.urls['foreign'] % (urllib.quote(search_string))]
-
-                for searchURL in searchURLS:
-                    try:
-                        logger.log(u"Search URL: %s" %  searchURL, logger.DEBUG)
-                        data = self.getURL(searchURL)
-                        time.sleep(cpu_presets[sickbeard.CPU_PRESET])
-                    except Exception as e:
-                        logger.log(u"Unable to fetch data. Error: %s" % repr(e), logger.WARNING)
-
-                    if not data:
+                searchURL = self.urls['search'] % (urllib.quote(search_string), self.categories[search_mode])
+
+                try:
+                    logger.log(u"Search URL: %s" %  searchURL, logger.DEBUG)
+                    data = self.getURL(searchURL)
+                    time.sleep(cpu_presets[sickbeard.CPU_PRESET])
+                except Exception as e:
+                    logger.log(u"Unable to fetch data. Error: %s" % repr(e), logger.WARNING)
+
+                if not data:
+                    continue
+
+                with BS4Parser(data, features=["html5lib", "permissive"]) as html:
+                    torrent_table = html.find('table', attrs={'id': 'torrents-table'})
+                    torrent_rows = torrent_table.find_all('tr') if torrent_table else []
+
+                    #Continue only if at least one Release is found
+                    if len(torrent_rows) < 2:
+                        logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                         continue
 
-                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
-                        torrent_table = html.find('table', attrs={'id': 'torrents-table'})
-                        torrent_rows = torrent_table.find_all('tr') if torrent_table else []
+                    for result in torrent_table.find_all('tr')[1:]:
+
+                        try:
+                            link = result.find('td', attrs={'class': 'ttr_name'}).find('a')
+                            url  = result.find('td', attrs={'class': 'td_dl'}).find('a')
+
+                            title = link.string
+                            if re.search(r'\.\.\.', title):
+                                data = self.getURL(self.url + "/" + link['href'])
+                                if data:
+                                    with BS4Parser(data) as details_html:
+                                        title = re.search('(?<=").+(?<!")', details_html.title.string).group(0)
+                            download_url = self.urls['download'] % url['href']
+                            seeders = int(result.find('td', attrs={'class': 'ttr_seeders'}).string)
+                            leechers = int(result.find('td', attrs={'class': 'ttr_leechers'}).string)
+                            size = self._convertSize(result.find('td', attrs={'class': 'ttr_size'}).contents[0])
+                        except (AttributeError, TypeError):
+                            continue
 
-                        #Continue only if at least one Release is found
-                        if len(torrent_rows) < 2:
-                            logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
+                        if not all([title, download_url]):
                             continue
 
-                        for result in torrent_table.find_all('tr')[1:]:
-
-                            try:
-                                link = result.find('td', attrs={'class': 'ttr_name'}).find('a')
-                                all_urls = result.find('td', attrs={'class': 'td_dl'}).find_all('a', limit=2)
-                                # Foreign section contain two links, the others one
-                                if self._isSection('Foreign', data):
-                                    url = all_urls[1]
-                                else:
-                                    url = all_urls[0]
-
-                                title = link.string
-                                if re.search(r'\.\.\.', title):
-                                    data = self.getURL(self.url + "/" + link['href'])
-                                    if data:
-                                        with BS4Parser(data) as details_html:
-                                            title = re.search('(?<=").+(?<!")', details_html.title.string).group(0)
-                                download_url = self.urls['download'] % url['href']
-                                seeders = int(result.find('td', attrs={'class': 'ttr_seeders'}).string)
-                                leechers = int(result.find('td', attrs={'class': 'ttr_leechers'}).string)
-                                #FIXME
-                                size = -1
-                            except (AttributeError, TypeError):
-                                continue
-
-                            if not all([title, download_url]):
-                                continue
-
-                            #Filter unseeded torrent
-                            if seeders < self.minseed or leechers < self.minleech:
-                                if mode != 'RSS':
-                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
-                                continue
-
-                            item = title, download_url, size, seeders, leechers
+                        #Filter unseeded torrent
+                        if seeders < self.minseed or leechers < self.minleech:
                             if mode != 'RSS':
-                                logger.log(u"Found result: %s " % title, logger.DEBUG)
+                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
+                            continue
 
-                            items[mode].append(item)
+                        item = title, download_url, size, seeders, leechers
+                        if mode != 'RSS':
+                            logger.log(u"Found result: %s " % title, logger.DEBUG)
+
+                        items[mode].append(item)
 
             #For each search mode sort all the items by seeders if available
             items[mode].sort(key=lambda tup: tup[3], reverse=True)
@@ -178,6 +163,19 @@ class SCCProvider(generic.TorrentProvider):
     def seedRatio(self):
         return self.ratio
 
+    def _convertSize(self, size):
+        size, base = size.split()
+        size = float(size)
+        if base in 'KB':
+            size = size * 1024
+        elif base in 'MB':
+            size = size * 1024**2
+        elif base in 'GB':
+            size = size * 1024**3
+        elif base in 'TB':
+            size = size * 1024**4
+        return int(size)
+
 
 class SCCCache(tvcache.TVCache):
     def __init__(self, provider_obj):
diff --git a/sickbeard/providers/torrentproject.py b/sickbeard/providers/torrentproject.py
index bf3361d4cb1341baff3224f096c1e5e482ac66ba..228ac0109249063d5ca97e29b5a91624faa448d1 100644
--- a/sickbeard/providers/torrentproject.py
+++ b/sickbeard/providers/torrentproject.py
@@ -20,6 +20,7 @@ from urllib import quote_plus
 
 from sickbeard import logger
 from sickbeard import tvcache
+from sickbeard import helpers
 from sickbeard.providers import generic
 from sickbeard.common import USER_AGENT
 
@@ -52,7 +53,9 @@ class TORRENTPROJECTProvider(generic.TorrentProvider):
                 if mode != 'RSS':
                     logger.log(u"Search string: %s " % search_string, logger.DEBUG)
 
-                searchURL = self.urls['api'] + "?s=%s&out=json&filter=2101" % quote_plus(search_string.encode('utf-8'))
+
+                searchURL = self.urls['api'] + "?s=%s&out=json&filter=2101&num=150" % quote_plus(search_string.encode('utf-8'))
+
                 logger.log(u"Search URL: %s" %  searchURL, logger.DEBUG)
                 torrents = self.getURL(searchURL, json=True)
                 if not (torrents and "total_found" in torrents and int(torrents["total_found"]) > 0):
@@ -64,9 +67,8 @@ class TORRENTPROJECTProvider(generic.TorrentProvider):
                 results = []
                 for i in torrents:
                     title = torrents[i]["title"]
-                    seeders = int(torrents[i]["seeds"])
-                    leechers = int(torrents[i]["leechs"])
-
+                    seeders = helpers.tryInt(torrents[i]["seeds"], 1)
+                    leechers = helpers.tryInt(torrents[i]["leechs"], 0)
                     if seeders < self.minseed or leechers < self.minleech:
                         if mode != 'RSS':
                             logger.log("Torrent doesn't meet minimum seeds & leechers not selecting : %s" % title, logger.DEBUG)
@@ -76,11 +78,15 @@ class TORRENTPROJECTProvider(generic.TorrentProvider):
                     size = int(torrents[i]["torrent_size"])
 
                     if seeders < 10:
-                        logger.log("Torrent has less than 10 seeds getting dyn trackers: " + title, logger.DEBUG)
-                        trackerUrl = self.urls['api'] + "" + t_hash + "/trackers_json"
-                        jdata = self.getURL(trackerUrl, json=True)
-                        download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "".join(["&tr=" + s for s in jdata])
-                        logger.log("Dyn Magnet: " + download_url, logger.DEBUG)
+                        if mode != 'RSS':
+                            logger.log("Torrent has less than 10 seeds getting dyn trackers: " + title, logger.DEBUG)
+                            trackerUrl = self.urls['api'] + "" + t_hash + "/trackers_json"
+                            jdata = self.getURL(trackerUrl, json=True)
+                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "".join(["&tr=" + s for s in jdata])
+                            logger.log("Dyn Magnet: " + download_url, logger.DEBUG)
+                        else:
+                            download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.coppersurfer.tk:6969&tr=udp://open.demonii.com:1337&tr=udp://tracker.leechers-paradise.org:6969&tr=udp://exodus.desync.com:6969"
+                            logger.log("Result has less than 10 seeds but not using Dyn Magnet becouse its from RSS" + title, logger.DEBUG)
                     else:
                         #logger.log("Torrent has more than 10 seeds using hard coded trackers", logger.DEBUG)
                         download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.coppersurfer.tk:6969&tr=udp://open.demonii.com:1337&tr=udp://tracker.leechers-paradise.org:6969&tr=udp://exodus.desync.com:6969"
@@ -108,13 +114,11 @@ class TORRENTPROJECTCache(tvcache.TVCache):
 
         tvcache.TVCache.__init__(self, provider_obj)
 
-        # set this 0 to suppress log line, since we aren't updating it anyways
-        self.minTime = 0
+        self.minTime = 20
 
     def _getRSSData(self):
-        # no rss for torrentproject afaik,& can't search with empty string
-        # newest results are always > 1 day since added anyways
-        # search_strings = {'RSS': ['']}
-        return {'entries': {}}
+        
+        search_params = {'RSS': ['0day']}
+        return {'entries': self.provider._doSearch(search_params)}
 
 provider = TORRENTPROJECTProvider()
diff --git a/sickbeard/search.py b/sickbeard/search.py
index 15112ebd7f6c67d805315a55666174e010415427..a3e22878d5f3a5bb34c8f1f6d538b7da558a2b7f 100644
--- a/sickbeard/search.py
+++ b/sickbeard/search.py
@@ -168,7 +168,12 @@ def snatchEpisode(result, endStatus=SNATCHED):
             sql_l.append(curEpObj.get_sql())
 
         if curEpObj.status not in Quality.DOWNLOADED:
-            notifiers.notify_snatch(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN') + " from " + result.provider.name)
+            try:
+                notifiers.notify_snatch(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN') + " from " + result.provider.name)
+            except:
+                # Without this, when notification fail, it crashes the snatch thread and SR will
+                # keep snatching until notification is sent
+                logger.log(u"Failed to send snatch notification", logger.DEBUG)
 
             trakt_data.append((curEpObj.season, curEpObj.episode))
 
diff --git a/sickbeard/subtitles.py b/sickbeard/subtitles.py
index 822b61a2057546003a9d2f4ff1960e058b8427ff..5e6959caa16df430c0e3919ecc89f1c265700d9f 100644
--- a/sickbeard/subtitles.py
+++ b/sickbeard/subtitles.py
@@ -16,29 +16,33 @@
 # You should have received a copy of the GNU General Public License
 # along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
 
+import io
+import os
+import re
 import datetime
-import sickbeard
 import traceback
-import pkg_resources
 import subliminal
 import subprocess
-from sickbeard.common import *
+import pkg_resources
+from enzyme import MKV, MalformedMKVError
+from subliminal.api import provider_manager
+from babelfish import Error as BabelfishError, Language, language_converters
+
+import sickbeard
 from sickbeard import logger
 from sickbeard import history
 from sickbeard import db
 from sickrage.helper.common import dateTimeFormat
 from sickrage.helper.encoding import ek
 from sickrage.helper.exceptions import ex
-from subliminal.api import provider_manager
-from enzyme import MKV, MalformedMKVError
-from babelfish import Error as BabelfishError, Language, language_converters
 
-distribution = pkg_resources.Distribution(location=os.path.dirname(os.path.dirname(__file__)), 
+distribution = pkg_resources.Distribution(location=os.path.dirname(os.path.dirname(__file__)),
                                           project_name='fake_entry_points', version='1.0.0')
 
 entry_points = {
     'subliminal.providers': [
         'addic7ed = subliminal.providers.addic7ed:Addic7edProvider',
+        'legendastv = subliminal.providers.legendastv:LegendasTvProvider',
         'napiprojekt = subliminal.providers.napiprojekt:NapiProjektProvider',
         'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider',
         'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider',
@@ -47,9 +51,12 @@ entry_points = {
     ],
     'babelfish.language_converters': [
         'addic7ed = subliminal.converters.addic7ed:Addic7edConverter',
+        'legendastv = subliminal.converters.legendastv:LegendasTvConverter',
+        'thesubdb = subliminal.converters.thesubdb:TheSubDBConverter',
         'tvsubtitles = subliminal.converters.tvsubtitles:TVsubtitlesConverter'
     ]
 }
+
 distribution._ep_map = pkg_resources.EntryPoint.parse_map(entry_points, distribution)
 pkg_resources.working_set.add(distribution)
 
@@ -59,6 +66,7 @@ subliminal.region.configure('dogpile.cache.memory')
 
 provider_urls = {
     'addic7ed': 'http://www.addic7ed.com',
+    'legendastv': 'http://www.legendas.tv',
     'napiprojekt': 'http://www.napiprojekt.pl',
     'opensubtitles': 'http://www.opensubtitles.org',
     'podnapisi': 'http://www.podnapisi.net',
@@ -101,8 +109,8 @@ def fromietf(language):
 
 def isValidLanguage(language):
     try:
-        langObj = fromietf(language)
-    except:
+        fromietf(language)
+    except Exception:
         return False
     return True
 
@@ -125,48 +133,86 @@ def downloadSubtitles(subtitles_info):
         video = subliminal.scan_video(video_path, subtitles=False, embedded_subtitles=False)
     except Exception:
         logger.log(u'%s: Exception caught in subliminal.scan_video for S%02dE%02d' %
-        (subtitles_info['show.indexerid'], subtitles_info['season'], subtitles_info['episode']), logger.DEBUG)
+                   (subtitles_info['show.indexerid'], subtitles_info['season'], subtitles_info['episode']), logger.DEBUG)
         return (existing_subtitles, None)
 
+    provider_configs = {'addic7ed': {'username': sickbeard.ADDIC7ED_USER, 'password': sickbeard.ADDIC7ED_PASS},
+                        'legendastv': {'username': sickbeard.LEGENDASTV_USER, 'password': sickbeard.LEGENDASTV_PASS},
+                        'opensubtitles': {'username': sickbeard.OPENSUBTITLES_USER, 'password': sickbeard.OPENSUBTITLES_PASS}}
+
+    pool = subliminal.api.ProviderPool(providers=providers, provider_configs=provider_configs)
+    subtitles_list = pool.list_subtitles(video, languages)
+
     try:
-        # TODO: Add gui option for hearing_impaired parameter ?
-        found_subtitles = subliminal.download_best_subtitles([video], languages=languages, hearing_impaired=False, only_one=not sickbeard.SUBTITLES_MULTI, providers=providers)
+        found_subtitles = pool.download_best_subtitles(subtitles_list, video, languages=languages, hearing_impaired=sickbeard.SUBTITLES_HEARING_IMPAIRED, only_one=not sickbeard.SUBTITLES_MULTI)
         if not found_subtitles:
             logger.log(u'%s: No subtitles found for S%02dE%02d on any provider' % (subtitles_info['show.indexerid'], subtitles_info['season'], subtitles_info['episode']), logger.DEBUG)
             return (existing_subtitles, None)
 
-        for index, subtitle in enumerate(found_subtitles[video]):
-            encoding = subliminal.subtitle.Subtitle.guess_encoding(subtitle)
-            found_subtitles[video][index].encoding = encoding
-
-        subliminal.save_subtitles(video, found_subtitles[video], directory=subtitles_path, single=not sickbeard.SUBTITLES_MULTI)
+        for subtitle in found_subtitles:
+            # Force subtitle encoding to utf-8 for some languages
+            if subtitle.language.alpha3 == 'pol':
+                setattr(subtitle, 'encoding', 'utf-8')
+            elif subtitle.language.alpha3 == 'bul':
+                setattr(subtitle, 'encoding', 'utf-8')
 
-        for video, subtitles in found_subtitles.iteritems():
-            for subtitle in subtitles:
-                new_video_path = subtitles_path + "/" + video.name.rsplit("/", 1)[-1]
-                new_subtitles_path = subliminal.subtitle.get_subtitle_path(new_video_path, subtitle.language if sickbeard.SUBTITLES_MULTI else None)
-                sickbeard.helpers.chmodAsParent(new_subtitles_path)
-                sickbeard.helpers.fixSetGroupID(new_subtitles_path)
+        save_subtitles(video, found_subtitles, directory=subtitles_path, single=not sickbeard.SUBTITLES_MULTI)
 
-        if not sickbeard.EMBEDDED_SUBTITLES_ALL and sickbeard.SUBTITLES_EXTRA_SCRIPTS and video_path.endswith(('.mkv','.mp4')):
+        if not sickbeard.EMBEDDED_SUBTITLES_ALL and sickbeard.SUBTITLES_EXTRA_SCRIPTS and video_path.endswith(('.mkv', '.mp4')):
             run_subs_extra_scripts(subtitles_info, found_subtitles)
 
         current_subtitles = subtitlesLanguages(video_path)[0]
         new_subtitles = frozenset(current_subtitles).difference(existing_subtitles)
 
-    except Exception as e:
-                logger.log("Error occurred when downloading subtitles for: %s" % video_path)
-                logger.log(traceback.format_exc(), logger.ERROR)
-                return (existing_subtitles, None)
+    except Exception:
+        logger.log("Error occurred when downloading subtitles for: %s" % video_path)
+        logger.log(traceback.format_exc(), logger.ERROR)
+        return (existing_subtitles, None)
 
     if sickbeard.SUBTITLES_HISTORY:
-        for video, subtitles in found_subtitles.iteritems():
-            for subtitle in subtitles:
-                logger.log(u'history.logSubtitle %s, %s' % (subtitle.provider_name, subtitle.language.opensubtitles), logger.DEBUG)
-                history.logSubtitle(subtitles_info['show.indexerid'], subtitles_info['season'], subtitles_info['episode'], subtitles_info['status'], subtitle)
+        for subtitle in found_subtitles:
+            logger.log(u'history.logSubtitle %s, %s' % (subtitle.provider_name, subtitle.language.opensubtitles), logger.DEBUG)
+            history.logSubtitle(subtitles_info['show.indexerid'], subtitles_info['season'], subtitles_info['episode'], subtitles_info['status'], subtitle)
 
     return (current_subtitles, new_subtitles)
 
+def save_subtitles(video, subtitles, single=False, directory=None):
+    saved_subtitles = []
+    for subtitle in subtitles:
+        # check content
+        if subtitle.content is None:
+            logger.log("Skipping subtitle %r: no content" % subtitle, logger.DEBUG)
+            continue
+
+        # check language
+        if subtitle.language in set(s.language for s in saved_subtitles):
+            logger.log("Skipping subtitle %r: language already saved" % subtitle, logger.DEBUG)
+            continue
+
+        # create subtitle path
+        subtitle_path = subliminal.subtitle.get_subtitle_path(video.name, None if single else subtitle.language)
+        if directory is not None:
+            subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1])
+
+        # save content as is or in the specified encoding
+        logger.log("Saving %r to %r" % (subtitle, subtitle_path), logger.DEBUG)
+        if subtitle.encoding:
+            with io.open(subtitle_path, 'w', encoding=subtitle.encoding) as f:
+                f.write(subtitle.text)
+        else:
+            with io.open(subtitle_path, 'wb') as f:
+                f.write(subtitle.content)
+
+        # chmod and set group for the saved subtitle
+        sickbeard.helpers.chmodAsParent(subtitle_path)
+        sickbeard.helpers.fixSetGroupID(subtitle_path)
+
+        # check single
+        if single:
+            break
+
+    return saved_subtitles
+
 def getNeededLanguages(current_subtitles):
     languages = set()
     for language in frozenset(wantedLanguages()).difference(current_subtitles):
@@ -175,17 +221,17 @@ def getNeededLanguages(current_subtitles):
     return languages
 
 # TODO: Filter here for non-languages in sickbeard.SUBTITLES_LANGUAGES
-def wantedLanguages(sqlLike = False):
-    wantedLanguages = [x for x in sorted(sickbeard.SUBTITLES_LANGUAGES) if x in subtitleCodeFilter()]
+def wantedLanguages(sqlLike=False):
+    wanted = [x for x in sorted(sickbeard.SUBTITLES_LANGUAGES) if x in subtitleCodeFilter()]
     if sqlLike:
-        return '%' + ','.join(wantedLanguages) + '%'
+        return '%' + ','.join(wanted) + '%'
 
-    return wantedLanguages
+    return wanted
 
 def getSubtitlesPath(video_path):
     if os.path.isabs(sickbeard.SUBTITLES_DIR):
         new_subtitles_path = sickbeard.SUBTITLES_DIR
-    elif sickbeard.SUBTITLES_DIR:    
+    elif sickbeard.SUBTITLES_DIR:
         new_subtitles_path = ek(os.path.join, ek(os.path.dirname, video_path), sickbeard.SUBTITLES_DIR)
         dir_exists = sickbeard.helpers.makeDir(new_subtitles_path)
         if not dir_exists:
@@ -200,7 +246,7 @@ def getSubtitlesPath(video_path):
 def subtitlesLanguages(video_path):
     """Return a list detected subtitles for the given video file"""
     resultList = []
-    save_subtitles = None
+    should_save_subtitles = None
 
     if not sickbeard.EMBEDDED_SUBTITLES_ALL and video_path.endswith('.mkv'):
         embedded_subtitle_languages = getEmbeddedLanguages(video_path.encode(sickbeard.SYS_ENCODING))
@@ -224,20 +270,19 @@ def subtitlesLanguages(video_path):
             currentWantedLanguages = wantedLanguages()
             if len(currentWantedLanguages) == 1 and Language('und') in external_subtitle_languages:
                 if embedded_subtitle_languages not in currentWantedLanguages and Language('und') in embedded_subtitle_languages:
-                    # TODO: Replace with a checkbox
                     subtitle_languages.add(fromietf(currentWantedLanguages[0]))
-                    save_subtitles = True
+                    should_save_subtitles = True
                 elif embedded_subtitle_languages not in currentWantedLanguages and Language('und') not in embedded_subtitle_languages:
                     subtitle_languages.remove(Language('und'))
                     subtitle_languages.add(fromietf(currentWantedLanguages[0]))
-                    save_subtitles = True
+                    should_save_subtitles = True
     else:
         subtitle_languages = scan_subtitle_languages(video_path)
         if not sickbeard.SUBTITLES_MULTI:
             if len(wantedLanguages()) == 1 and Language('und') in subtitle_languages:
                 subtitle_languages.remove(Language('und'))
                 subtitle_languages.add(fromietf(wantedLanguages()[0]))
-                save_subtitles = True
+                should_save_subtitles = True
 
     for language in subtitle_languages:
         if hasattr(language, 'opensubtitles') and language.opensubtitles:
@@ -252,14 +297,14 @@ def subtitlesLanguages(video_path):
     defaultLang = wantedLanguages()
 
     if ('pob' in defaultLang or 'pb' in defaultLang) and ('pt' not in defaultLang and 'por' not in defaultLang):
-            resultList = [x if not x in ['por', 'pt'] else u'pob' for x in resultList]
+        resultList = [x if not x in ['por', 'pt'] else u'pob' for x in resultList]
 
-    return (sorted(resultList), save_subtitles)
+    return (sorted(resultList), should_save_subtitles)
 
 def getEmbeddedLanguages(video_path):
     embedded_subtitle_languages = set()
     try:
-        with open(video_path, 'rb') as f:
+        with io.open(video_path, 'rb') as f:
             mkv = MKV(f)
             if mkv.subtitle_tracks:
                 for st in mkv.subtitle_tracks:
@@ -339,11 +384,11 @@ class SubtitlesFinder():
         myDB = db.DBConnection()
 
         sqlResults = myDB.select('SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.subtitles, ' +
-        'e.subtitles_searchcount AS searchcount, e.subtitles_lastsearch AS lastsearch, e.location, (? - e.airdate) AS airdate_daydiff ' +
-        'FROM tv_episodes AS e INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id) ' +
-        'WHERE s.subtitles = 1 AND e.subtitles NOT LIKE (?) ' +
-        'AND (e.subtitles_searchcount <= 2 OR (e.subtitles_searchcount <= 7 AND airdate_daydiff <= 7)) ' +
-        'AND e.location != ""', [today, wantedLanguages(True)])
+            'e.subtitles_searchcount AS searchcount, e.subtitles_lastsearch AS lastsearch, e.location, (? - e.airdate) AS airdate_daydiff ' +
+            'FROM tv_episodes AS e INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id) ' +
+            'WHERE s.subtitles = 1 AND e.subtitles NOT LIKE (?) ' +
+            'AND (e.subtitles_searchcount <= 2 OR (e.subtitles_searchcount <= 7 AND airdate_daydiff <= 7)) ' +
+            'AND e.location != ""', [today, wantedLanguages(True)])
 
         if len(sqlResults) == 0:
             logger.log('No subtitles to download', logger.INFO)
@@ -361,7 +406,8 @@ class SubtitlesFinder():
             throwaway = datetime.datetime.strptime('20110101', '%Y%m%d')
             if ((epToSub['airdate_daydiff'] > 7 and epToSub['searchcount'] < 2 and now - datetime.datetime.strptime(epToSub['lastsearch'], dateTimeFormat) > datetime.timedelta(hours=rules['old'][epToSub['searchcount']])) or
                 # Recent shows rule
-                (epToSub['airdate_daydiff'] <= 7 and epToSub['searchcount'] < 7 and now - datetime.datetime.strptime(epToSub['lastsearch'], dateTimeFormat) > datetime.timedelta(hours=rules['new'][epToSub['searchcount']]))):
+                    (epToSub['airdate_daydiff'] <= 7 and epToSub['searchcount'] < 7 and now - datetime.datetime.strptime(epToSub['lastsearch'], dateTimeFormat) > datetime.timedelta(hours=rules['new'][epToSub['searchcount']]))):
+
                 logger.log('Downloading subtitles for episode %dx%d of show %s' % (epToSub['season'], epToSub['episode'], epToSub['show_name']), logger.DEBUG)
 
                 showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(epToSub['showid']))
@@ -406,7 +452,6 @@ def run_subs_extra_scripts(epObj, foundSubs):
         logger.log(u"Absolute path to script: " + script_cmd[0], logger.DEBUG)
 
         for video, subs in foundSubs.iteritems():
-            subpaths = []
             for sub in subs:
                 subpath = subliminal.subtitle.get_subtitle_path(video.name, sub.language)
                 if os.path.isabs(sickbeard.SUBTITLES_DIR):
@@ -415,7 +460,7 @@ def run_subs_extra_scripts(epObj, foundSubs):
                     subpath = ek(os.path.join, ek(os.path.dirname, subpath), sickbeard.SUBTITLES_DIR, ek(os.path.basename, subpath))
 
                 inner_cmd = script_cmd + [video.name, subpath, sub.language.opensubtitles, epObj['show.name'],
-                                         str(epObj['season']), str(epObj['episode']), epObj['name'], str(epObj['show.indexerid'])]
+                                str(epObj['season']), str(epObj['episode']), epObj['name'], str(epObj['show.indexerid'])]
 
                 # use subprocess to run the command and capture output
                 logger.log(u"Executing command: %s" % inner_cmd)
diff --git a/sickbeard/tv.py b/sickbeard/tv.py
index b7d7798330fa43ff5f8f266d9e44ad6b3ea8bec1..9e10b4c32f3db7942ba65f3f45135562c11af91f 100644
--- a/sickbeard/tv.py
+++ b/sickbeard/tv.py
@@ -509,7 +509,7 @@ class TVShow(object):
                 try:
                     cachedSeasons[curSeason] = cachedShow[curSeason]
                 except sickbeard.indexer_seasonnotfound, e:
-                    logger.log(u"%s: Error when trying to load the episode from %. Message: %s " %
+                    logger.log(u"%s: Error when trying to load the episode from s%. Message: %s " %
                     (curShowid, sickbeard.indexerApi(self.indexer).name, e.message), logger.WARNING)
                     deleteEp = True
 
diff --git a/sickbeard/webserve.py b/sickbeard/webserve.py
index 64eec882cbb26120a5b1ff985d2281eee335c43f..88b9098439bbc32838b69bf9829b1ef4c6d1c5b6 100644
--- a/sickbeard/webserve.py
+++ b/sickbeard/webserve.py
@@ -4801,7 +4801,8 @@ class ConfigSubtitles(Config):
 
     def saveSubtitles(self, use_subtitles=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None,
                       service_order=None, subtitles_history=None, subtitles_finder_frequency=None,
-                      subtitles_multi=None, embedded_subtitles_all=None, subtitles_extra_scripts=None):
+                      subtitles_multi=None, embedded_subtitles_all=None, subtitles_extra_scripts=None, subtitles_hearing_impaired=None,
+                      addic7ed_user=None, addic7ed_pass=None, legendastv_user=None, legendastv_pass=None, opensubtitles_user=None, opensubtitles_pass=None):
 
         results = []
 
@@ -4812,6 +4813,7 @@ class ConfigSubtitles(Config):
         sickbeard.SUBTITLES_DIR = subtitles_dir
         sickbeard.SUBTITLES_HISTORY = config.checkbox_to_value(subtitles_history)
         sickbeard.EMBEDDED_SUBTITLES_ALL = config.checkbox_to_value(embedded_subtitles_all)
+        sickbeard.SUBTITLES_HEARING_IMPAIRED = config.checkbox_to_value(subtitles_hearing_impaired)
         sickbeard.SUBTITLES_MULTI = config.checkbox_to_value(subtitles_multi)
         sickbeard.SUBTITLES_EXTRA_SCRIPTS = [x.strip() for x in subtitles_extra_scripts.split('|') if x.strip()]
 
@@ -4827,6 +4829,13 @@ class ConfigSubtitles(Config):
         sickbeard.SUBTITLES_SERVICES_LIST = subtitles_services_list
         sickbeard.SUBTITLES_SERVICES_ENABLED = subtitles_services_enabled
 
+        sickbeard.ADDIC7ED_USER = addic7ed_user or ''
+        sickbeard.ADDIC7ED_PASS = addic7ed_pass or ''
+        sickbeard.LEGENDASTV_USER = legendastv_user or ''
+        sickbeard.LEGENDASTV_PASS = legendastv_pass or ''
+        sickbeard.OPENSUBTITLES_USER = opensubtitles_user or ''
+        sickbeard.OPENSUBTITLES_PASS = opensubtitles_pass or ''
+
         sickbeard.save_config()
 
         if len(results) > 0:
diff --git a/sickrage/show/ComingEpisodes.py b/sickrage/show/ComingEpisodes.py
index cb80c604fc10bc90b704231367f4c7b374a2f73d..2f929ec2fb94f51673d802c0dfc38e0d4978f0ea 100644
--- a/sickrage/show/ComingEpisodes.py
+++ b/sickrage/show/ComingEpisodes.py
@@ -63,7 +63,7 @@ class ComingEpisodes:
         today = date.today().toordinal()
         next_week = (date.today() + timedelta(days=7)).toordinal()
         recently = (date.today() - timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
-        qualities_list = Quality.DOWNLOADED + Quality.SNATCHED + Quality.ARCHIVED + [IGNORED]
+        qualities_list = Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST + Quality.SNATCHED_PROPER + Quality.ARCHIVED + [IGNORED]
 
         db = DBConnection()
         fields_to_select = ', '.join(
@@ -83,7 +83,7 @@ class ComingEpisodes:
 
         done_shows_list = [int(result['showid']) for result in results]
         placeholder = ','.join(['?'] * len(done_shows_list))
-        placeholder2 = ','.join(['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED))
+        placeholder2 = ','.join(['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST + Quality.SNATCHED_PROPER))
 
         results += db.select(
             'SELECT %s ' % fields_to_select +
@@ -98,7 +98,7 @@ class ComingEpisodes:
                                                   'AND inner_e.airdate >= ? '
                                                   'ORDER BY inner_e.airdate ASC LIMIT 1) '
                                                   'AND e.status NOT IN (' + placeholder2 + ')',
-            done_shows_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED
+            done_shows_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST + Quality.SNATCHED_PROPER
         )
 
         results += db.select(