diff --git a/SickBeard.py b/SickBeard.py
index 833ebff5e8703fb41c7ccaaf2687c148fa220630..6db397cc1d672490e93caed4e4efa96df2694cc3 100755
--- a/SickBeard.py
+++ b/SickBeard.py
@@ -516,7 +516,7 @@ class SickRage(object):
                                       sys.executable,
                                       sickbeard.MY_FULLNAME]
 
-                if popen_list:
+                if popen_list and not sickbeard.NO_RESTART:
                     popen_list += sickbeard.MY_ARGS
                     if '--nolaunch' not in popen_list:
                         popen_list += ['--nolaunch']
diff --git a/gui/slick/css/dark.css b/gui/slick/css/dark.css
index 0979becb8c61d70cbd10a6dafc2a1a76ba22ab4f..cc7651e5d12b5cd186456ce6e8b72626e4b1033c 100644
--- a/gui/slick/css/dark.css
+++ b/gui/slick/css/dark.css
@@ -786,6 +786,26 @@ td.tvShow a:hover {
     color: #09A2FF;
 }
 
+#popover-target label {
+  margin: 0 5px;
+  display: block;
+}
+#popover-target input {
+  margin-left: 5px;
+}
+.popover {
+  margin-left: -50px;
+  background-color: #333;
+}
+
+.popover-content {
+  background-color: #333;
+}
+
+.popover.bottom .arrow:after {
+  border-bottom-color: #333;
+}
+
 /* =======================================================================
 home_addShows.tmpl
 ========================================================================== */
@@ -3072,3 +3092,26 @@ jquery.confirm.css
 #confirmBox .red:hover {
 	background-color: #A13331;
 }
+
+/* =======================================================================
+bootstarp modal
+========================================================================== */
+
+.modal-content {
+  background-color: #3D3D3D;
+}
+.modal-body {
+  background-color: #3D3D3D;
+}
+
+.modal-header {
+    padding:9px 15px;
+    border-bottom:1px solid #eee;
+    background-color: #15528F;
+    -webkit-border-top-left-radius: 5px;
+    -webkit-border-top-right-radius: 5px;
+    -moz-border-radius-topleft: 5px;
+    -moz-border-radius-topright: 5px;
+     border-top-left-radius: 5px;
+     border-top-right-radius: 5px;
+ }
diff --git a/gui/slick/css/light.css b/gui/slick/css/light.css
index b56fb28529990604419134b9a310fced4578c7cf..834bf260da4603c6f2bb14ace0cfcc61259d7996 100644
--- a/gui/slick/css/light.css
+++ b/gui/slick/css/light.css
@@ -768,6 +768,17 @@ td.tvShow a:hover {
     color: #428BCA;
 }
 
+#popover-target label {
+  margin: 0 5px;
+  display: block;
+}
+#popover-target input {
+  margin-left: 5px;
+}
+.popover {
+  margin-left: -50px;
+}
+
 /* =======================================================================
 home_addShows.tmpl
 ========================================================================== */
@@ -3009,3 +3020,19 @@ jquery.confirm.css
 #confirmBox .red:hover {
 	background-color: #A13331;
 }
+
+/* =======================================================================
+bootstarp modal
+========================================================================== */
+
+.modal-header {
+    padding:9px 15px;
+    border-bottom:1px solid #eee;
+    background-color: #F5F1E4;
+    -webkit-border-top-left-radius: 5px;
+    -webkit-border-top-right-radius: 5px;
+    -moz-border-radius-topleft: 5px;
+    -moz-border-radius-topright: 5px;
+     border-top-left-radius: 5px;
+     border-top-right-radius: 5px;
+ }
diff --git a/gui/slick/css/style.css b/gui/slick/css/style.css
index e53975f4e514977fe75686a46a6d69ad4a59cc39..f4d250dc5c8ffd73d8350ed3549768d7ded9d381 100644
--- a/gui/slick/css/style.css
+++ b/gui/slick/css/style.css
@@ -794,6 +794,17 @@ td.tvShow a:hover {
 	color: #428BCA;
 }
 
+#popover-target label {
+  margin: 0 5px;
+  display: block;
+}
+#popover-target input {
+  margin-left: 5px;
+}
+.popover {
+  margin-left: -50px;
+}
+
 /* =======================================================================
 home_addShows.tmpl
 ========================================================================== */
diff --git a/gui/slick/images/providers/animenzb.gif b/gui/slick/images/providers/animenzb.gif
new file mode 100644
index 0000000000000000000000000000000000000000..9fa65fe495d7885f6f02b96ee3ff8e91511ddba8
Binary files /dev/null and b/gui/slick/images/providers/animenzb.gif differ
diff --git a/gui/slick/images/providers/fanzub.gif b/gui/slick/images/providers/fanzub.gif
deleted file mode 100644
index 1787676a7085bc10a08d939c224916b1b12cb979..0000000000000000000000000000000000000000
Binary files a/gui/slick/images/providers/fanzub.gif and /dev/null differ
diff --git a/gui/slick/images/providers/morethantv.png b/gui/slick/images/providers/morethantv.png
new file mode 100755
index 0000000000000000000000000000000000000000..acdd6fcba4d725f4e6074dfbc19a3fcf37f1018a
Binary files /dev/null and b/gui/slick/images/providers/morethantv.png differ
diff --git a/gui/slick/interfaces/default/comingEpisodes.tmpl b/gui/slick/interfaces/default/comingEpisodes.tmpl
index 66632982dea6d44e6a5d2f63ad3668c13c3ab013..5ce1f51290469f878e1ef87774e22bcb7edd669a 100644
--- a/gui/slick/interfaces/default/comingEpisodes.tmpl
+++ b/gui/slick/interfaces/default/comingEpisodes.tmpl
@@ -124,7 +124,6 @@
         sortList: sortList,
         textExtraction: {
             0: function(node) { return \$(node).find('span').text().toLowerCase() },
-            4: function(node) { return \$(node).find('img').attr('alt') },
             5: function(node) { return \$(node).find('span').text().toLowerCase() }
         },
         headers: {
@@ -249,7 +248,7 @@
 			</td>
 
 			<td align="center">
-				<a href="$sbRoot/home/searchEpisode?show=${cur_result['showid']}&amp;season=$cur_result['season']&amp;episode=$cur_result['episode']" title="Manual Search" id="forceUpdate-${cur_result['showid']}" class="forceUpdate epSearch"><img alt="[search]" height="16" width="16" src="$sbRoot/images/search16.png" id="forceUpdateImage-${cur_result['showid']}" /></a>
+				<a href="$sbRoot/home/searchEpisode?show=${cur_result['showid']}&amp;season=$cur_result['season']&amp;episode=$cur_result['episode']" title="Manual Search" id="forceUpdate-${cur_result['showid']}x${cur_result['season']}x${cur_result['episode']}" class="forceUpdate epSearch"><img alt="[search]" height="16" width="16" src="$sbRoot/images/search16.png" id="forceUpdateImage-${cur_result['showid']}" /></a>
 			</td>
 		</tr>
 		<!-- end $cur_result['show_name'] //-->
diff --git a/gui/slick/interfaces/default/config.tmpl b/gui/slick/interfaces/default/config.tmpl
index 268416899223a44e88554736b30a713acdf2e18e..a697993590ce67f4dd8347b71b4741a71da39c15 100644
--- a/gui/slick/interfaces/default/config.tmpl
+++ b/gui/slick/interfaces/default/config.tmpl
@@ -66,8 +66,11 @@ useLegacyImportMode = False
     <tr><td class="infoTableHeader">SR Config file:</td><td class="infoTableCell">$sickbeard.CONFIG_FILE</td></tr>
     <tr><td class="infoTableHeader">SR Database file:</td><td class="infoTableCell">$db.dbFilename()</td></tr>
     <tr><td class="infoTableHeader">SR Cache Dir:</td><td class="infoTableCell">$sickbeard.CACHE_DIR</td></tr>
+    <tr><td class="infoTableHeader">SR Log Dir:</td><td class="infoTableCell">$sickbeard.LOG_DIR</td></tr>
     <tr><td class="infoTableHeader">SR Arguments:</td><td class="infoTableCell">$sickbeard.MY_ARGS</td></tr>
+#if $sickbeard.WEB_ROOT
     <tr><td class="infoTableHeader">SR Web Root:</td><td class="infoTableCell">$sickbeard.WEB_ROOT</td></tr>
+#end if
     <tr><td class="infoTableHeader">Python Version:</td><td class="infoTableCell">$sys.version[:120]</td></tr>
     <tr class="infoTableSeperator"><td class="infoTableHeader"><i class="icon16-sb"></i> Homepage</td><td class="infoTableCell"><a href="<%= anon_url('http://www.sickrage.tv/') %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;">http://www.sickrage.tv/</a></td></tr>
     <tr><td class="infoTableHeader"><i class="icon16-web"></i> Forums</td><td class="infoTableCell"><a href="<%= anon_url('http://sickrage.tv/forums/') %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;">http://sickrage.tv/forums/</a></td></tr>
diff --git a/gui/slick/interfaces/default/config_general.tmpl b/gui/slick/interfaces/default/config_general.tmpl
index 3a45fbe84ba014f784fc0b2dce5ce1bce3096a78..1f27d446a9e5cdde53351c89e901d8f70ece05e6 100644
--- a/gui/slick/interfaces/default/config_general.tmpl
+++ b/gui/slick/interfaces/default/config_general.tmpl
@@ -518,7 +518,19 @@
 								</span>
 							</label>
 						</div>
+						
+						<div class="field-pair">
+							<label for="no_restart">
+								<span class="component-title">No Restart</span>
+								<span class="component-desc">
+									<input type="checkbox" name="no_restart" id="no_restart" #if $sickbeard.NO_RESTART then 'checked="checked"' else ''#/>
+									<p>Only shutdown when restarting SR.
+									Only select this when you have external software restarting SR automatically when it stops (like FireDaemon)</p>
+								</span>
 
+							</label>
+						</div>
+						
 						<div class="field-pair">
 							<label for="encryption_version">
 								<span class="component-title">Encrypt passwords</span>
diff --git a/gui/slick/interfaces/default/config_notifications.tmpl b/gui/slick/interfaces/default/config_notifications.tmpl
index 2a397300f72f902e27fb3b25356f1b43e667b306..c0743d160b04e7d7ca3ebd21c3f7007a14cd4726 100644
--- a/gui/slick/interfaces/default/config_notifications.tmpl
+++ b/gui/slick/interfaces/default/config_notifications.tmpl
@@ -1496,6 +1496,16 @@
                                     </label>
                                 </div>
                             </div>
+                            <div class="field-pair">
+                                <label for="trakt_blacklist_name">
+                                    <span class="component-title">Traktv BlackList name:</span>
+                                    <input type="text" name="trakt_blacklist_name" id="trakt_blacklist_name" value="$sickbeard.TRAKT_BLACKLIST_NAME" class="form-control input-sm input150" />
+                                </label>
+                                <label>
+                                    <span class="component-title">&nbsp;</span>
+                                    <span class="component-desc">Name(slug) of List on Traktv for blacklisting show on tranding page</span>
+                                </label>
+                            </div>
                             <div class="testNotification" id="testTrakt-result">Click below to test.</div>
                             <input type="button" class="btn" value="Test Trakt" id="testTrakt" />
                             <input type="submit" class="btn config_submitter" value="Save Changes" />
diff --git a/gui/slick/interfaces/default/config_postProcessing.tmpl b/gui/slick/interfaces/default/config_postProcessing.tmpl
index b022142d3b23de95c15f6e5c48948b3c1e3eceb6..3b953750de413b3eddbfa20878235662937449bc 100644
--- a/gui/slick/interfaces/default/config_postProcessing.tmpl
+++ b/gui/slick/interfaces/default/config_postProcessing.tmpl
@@ -1,5 +1,6 @@
 #import os.path
 #import sickbeard
+#import sys
 #from sickbeard.common import *
 #from sickbeard import config
 #from sickbeard import metadata
@@ -71,7 +72,12 @@
                                 <span class="component-desc">
                                     <select name="process_method" id="process_method" class="form-control input-sm">
                                         #set $process_method_text = {'copy': "Copy", 'move': "Move", 'hardlink': "Hard Link", 'symlink' : "Symbolic Link"}
-                                        #for $curAction in ('copy', 'move', 'hardlink', 'symlink'):
+                                        #if sys.platform == 'win32'
+                                            #set $process_action = ('copy', 'move')
+                                        #else
+                                            #set $process_action = ('copy', 'move', 'hardlink', 'symlink')                                        
+                                        #end if
+                                        #for $curAction in $process_action:
                                           #if $sickbeard.PROCESS_METHOD == $curAction:
                                             #set $process_method = "selected=\"selected\""
                                           #else
@@ -188,6 +194,18 @@
                                 <span class="component-desc">eg. NZBMedia w/ NZBGET, sabToSickbeard w/ SABnzbd+!</span>
                             </label>                            
                         </div>
+                        
+                        <div class="field-pair">
+                            <input type="checkbox" name="no_delete" id="no_delete" #if $sickbeard.NO_DELETE == True then "checked=\"checked\"" else ""# />
+                            <label for="no_delete">
+                                <span class="component-title">Don't delete empty folders</span>
+                                <span class="component-desc">Leave empty folders when Post Processing?</span>
+                            </label>
+                            <label class="nocheck" for="no_delete">
+                                <span class="component-title">&nbsp;</span>
+                                <span class="component-desc"><b>NOTE:</b> Can be overridden using manual Post Processing</span>
+                            </label>
+                        </div>
 
                         <div class="field-pair">
                             <label class="nocheck">
@@ -283,6 +301,10 @@
                                         <img src="$sbRoot/images/legend16.png" width="16" height="16" alt="[Toggle Key]" id="show_naming_key" title="Toggle Naming Legend" class="legend" class="legend" />
                                     </span>
                                 </label>
+                                <label class="nocheck">
+                                    <span class="component-title">&nbsp;</span>
+                                    <span class="component-desc"><b>NOTE:</b> Dont' forget to add quality pattern. Otherwise after post-procesing the episode will have UNKNOWN quality</span>
+                                 </label>
                             </div>
 
                             <div id="naming_key" class="nocheck" style="display: none;">
diff --git a/gui/slick/interfaces/default/config_search.tmpl b/gui/slick/interfaces/default/config_search.tmpl
index b6bdedaae0fb2b1d0552bb6a2c8390f7e77069a1..4e656fdd54e9a36691b9030c14c86665cf811e6f 100755
--- a/gui/slick/interfaces/default/config_search.tmpl
+++ b/gui/slick/interfaces/default/config_search.tmpl
@@ -43,20 +43,20 @@
 
 					<fieldset class="component-group-list">
 						<div class="field-pair">
-							<label for="download_propers">
-								<span class="component-title">Download propers</span>
+							<label for="randomize_providers">
+								<span class="component-title">Randomize Providers</span>
 								<span class="component-desc">
-									<input type="checkbox" name="download_propers" id="download_propers" class="enabler" <%= html_checked if sickbeard.DOWNLOAD_PROPERS == True else '' %>/>
-									<p>replace original download with "Proper" or "Repack" if nuked</p>
+									<input type="checkbox" name="randomize_providers" id="randomize_providers" class="enabler" <%= html_checked if sickbeard.RANDOMIZE_PROVIDERS == True else '' %>/>
+									<p>randomize the provider search order instead of going in order of placement</p>
 								</span>
 							</label>
 						</div>
 						<div class="field-pair">
-							<label for="randomize_providers">
-								<span class="component-title">Randomize Providers</span>
+							<label for="download_propers">
+								<span class="component-title">Download propers</span>
 								<span class="component-desc">
-									<input type="checkbox" name="randomize_providers" id="randomize_providers" class="enabler" <%= html_checked if sickbeard.RANDOMIZE_PROVIDERS == True else '' %>/>
-									<p>randomize the provider search order instead of going in order of placement</p>
+									<input type="checkbox" name="download_propers" id="download_propers" class="enabler" <%= html_checked if sickbeard.DOWNLOAD_PROPERS == True else '' %>/>
+									<p>replace original download with "Proper" or "Repack" if nuked</p>
 								</span>
 							</label>
 						</div>
@@ -107,6 +107,16 @@
 							</label>
 						</div>
 
+						<div class="field-pair">
+							<label>
+								<span class="component-title">Missed episodes range</span>
+								<span class="component-desc">
+									<input type="number" step="1" min="7" name="coming_eps_missed_range" id="coming_eps_missed_range" value="$sickbeard.COMING_EPS_MISSED_RANGE" class="form-control input-sm input75" />
+									<p>Set the range in days of the missed episodes</p>
+								</span>
+							</label>
+						</div>
+
 						<div class="field-pair">
 							<label>
 								<span class="component-title">Usenet retention</span>
@@ -309,6 +319,16 @@
 									</span>
 								</label>
 							</div>
+							#if sickbeard.ALLOW_HIGH_PRIORITY == True
+							<div class="field-pair">
+								<label for="sab_forced">
+									<span class="component-title">Use forced priority</span>
+									<span class="component-desc">
+										<input type="checkbox" name="sab_forced" class="enabler" id="sab_forced" <%= html_checked if sickbeard.SAB_FORCED else '' %>/>
+										<p>enable to change priority from HIGH to FORCED</p></span>
+								</label>
+							</div>
+							#end if
 						</div>
 
 						<div id="nzbget_settings">
diff --git a/gui/slick/interfaces/default/displayShow.tmpl b/gui/slick/interfaces/default/displayShow.tmpl
index 510fffbd6eecf8790f34d0548582f9317509cb7d..c442761841ec5cc6be3761b0bf1db7250ebba883 100644
--- a/gui/slick/interfaces/default/displayShow.tmpl
+++ b/gui/slick/interfaces/default/displayShow.tmpl
@@ -1,548 +1,590 @@
-#import sickbeard
-#from sickbeard import subtitles, sbdatetime, network_timezones
-#import sickbeard.helpers
-#from sickbeard.common import *
-#from sickbeard.helpers import anon_url
-#from lib import subliminal
-#import os.path, os
-#import datetime
-#import urllib
-
-#set global $title=$show.name
-##set global $header = '<a></a>' % 
-#set global $topmenu="manageShows"#
-#set $exceptions_string = " | ".join($show.exceptions)
-#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_top.tmpl")
-
-<script type="text/javascript" src="$sbRoot/js/lib/jquery.bookmarkscroll.js?$sbPID"></script>
-<script type="text/javascript" src="$sbRoot/js/jwplayer/jwplayer.js"></script>
-<script type="text/javascript">jwplayer.key="Zq3m618ITHrFxeKGi3Gf33ovC+XtdGQz19MMug==";</script>
-
-<input type="hidden" id="sbRoot" value="$sbRoot" />
-
-<script type="text/javascript" src="$sbRoot/js/displayShow.js?$sbPID"></script>
-<script type="text/javascript" src="$sbRoot/js/plotTooltip.js?$sbPID"></script>
-<script type="text/javascript" src="$sbRoot/js/sceneExceptionsTooltip.js?$sbPID"></script>
-<script type="text/javascript" src="$sbRoot/js/ratingTooltip.js?$sbPID"></script>
-<script type="text/javascript" src="$sbRoot/js/ajaxEpSearch.js?$sbPID"></script>
-<script type="text/javascript" src="$sbRoot/js/ajaxEpSubtitles.js?$sbPID"></script>
-<script type="text/javascript" charset="utf-8">
-<!--
-\$(document).ready(function(){
-    #set $fuzzydate = 'airdate'
-    #if $sickbeard.FUZZY_DATING:
-    fuzzyMoment({
-        containerClass : '.${fuzzydate}',
-        dateHasTime : false,
-        dateFormat : '${sickbeard.DATE_PRESET}',
-        timeFormat : '${sickbeard.TIME_PRESET}',
-        trimZero : #if $sickbeard.TRIM_ZERO then "true" else "false"#
-    });
-    #end if
-    #raw
-    $('.addQTip').each(function () {
-        $(this).css({'cursor':'help', 'text-shadow':'0px 0px 0.5px #666'});
-        $(this).qtip({
-            show: {solo:true},
-            position: {viewport:$(window), my:'left center', adjust:{ y: -10, x: 2 }},
-            style: {tip:{corner:true, method:'polygon'}, classes:'qtip-rounded qtip-shadow ui-tooltip-sb'}
-        });
-    });
-    #end raw
-	
-	\$.fn.generateStars = function() {
-		return this.each(function(i,e){\$(e).html(\$('<span/>').width(\$(e).text()*12));});
-	};
-
-	\$('.imdbstars').generateStars();
-	
-});
-//-->
-</script>
-
-	<div class="pull-left form-inline">
-		Change Show:
-		<div class="navShow"><img id="prevShow" src="$sbRoot/images/prev.png" alt="&lt;&lt;" title="Prev Show" /></div>
-			<select id="pickShow" class="form-control form-control-inline input-sm">
-			#for $curShowList in $sortedShowLists:
-				#set $curShowType = $curShowList[0]
-				#set $curShowList = $curShowList[1]
-
-				#if len($sortedShowLists) > 1:
-					<optgroup label="$curShowType">
-				#end if
-					#for $curShow in $curShowList:
-					<option value="$curShow.indexerid" #if $curShow == $show then "selected=\"selected\"" else ""#>$curShow.name</option>
-					#end for
-				#if len($sortedShowLists) > 1:
-					</optgroup>
-				#end if
-			#end for
-			</select>
-		<div class="navShow"><img id="nextShow" src="$sbRoot/images/next.png" alt="&gt;&gt;" title="Next Show" /></div>
-	</div>
-
-	<div class="clearfix"></div>
-
-	<div id="showtitle" data-showname="$show.name">
-		<h1 class="title" id="scene_exception_$show.indexerid">$show.name</h1>
-	</div>
-
-	
-		#if $seasonResults:
-		##There is a special/season_0?##
-		#if int($seasonResults[-1]["season"]) == 0:
-			#set $season_special = 1
-		#else: 
-			#set $season_special = 0
-		#end if
-
-		#if not $sickbeard.DISPLAY_SHOW_SPECIALS and $season_special:
-			$seasonResults.pop(-1)
-		#end if
-
-		<span class="h2footer displayspecials pull-right">
-			#if $season_special:
-			Display Specials:
-				#if sickbeard.DISPLAY_SHOW_SPECIALS:
-					<a class="inner" href="$sbRoot/toggleDisplayShowSpecials/?show=$show.indexerid">Hide</a>
-				#else:
-					<a class="inner" href="$sbRoot/toggleDisplayShowSpecials/?show=$show.indexerid">Show</a>
-				#end if
-			#end if
-		</span>
-		
-		<div class="h2footer pull-right"> 
-			<span>
-			#if (len($seasonResults) > 14):
-				<select id="seasonJump" class="form-control input-sm" style="position: relative; top: -4px;">
-					<option value="jump">Jump to Season</option>
-				#for $seasonNum in $seasonResults:
-					<option value="#season-$seasonNum["season"]">#if int($seasonNum["season"]) == 0 then "Specials" else "Season " + str($seasonNum["season"])#</option>
-				#end for
-				</select>
-			#else:
-				Season:
-				#for $seasonNum in $seasonResults:
-					#if int($seasonNum["season"]) == 0:
-						<a href="#season-$seasonNum["season"]">Specials</a>
-					#else:
-						<a href="#season-$seasonNum["season"]">${str($seasonNum["season"])}</a>
-					#end if
-					#if $seasonNum != $seasonResults[-1]:
-						<span class="separator">|</span>
-					#end if
-				#end for
-			#end if
-			</span>
-
-		#end if
-		</div>
-	
-	<div class="clearfix"></div>
-    
-#if $show_message:
-	<div class="alert alert-info">
-		$show_message
-	</div>
-#end if
-	
-	<div id="container">
-		<div id="posterCol">
-			<a href="$sbRoot/showPoster/?show=$show.indexerid&amp;which=poster" rel="dialog" title="View Poster for $show.name"><img src="$sbRoot/showPoster/?show=$show.indexerid&amp;which=poster_thumb" class="tvshowImg" alt=""/></a>
-		</div>
-
-		<div id="showCol">
-		
-			<div id="showinfo">
-#if 'rating' in $show.imdb_info:
-    #set $rating_tip = str($show.imdb_info['rating']) + " / 10" + " Stars" + "<br />" + str($show.imdb_info['votes']) + " Votes"
-				<span class="imdbstars" qtip-content="$rating_tip">$show.imdb_info['rating']</span>
-#end if
-				
-#set $_show = $show
-#if not $show.imdbid
-				<span>($show.startyear) - $show.runtime minutes - </span>
-#else
-    #if 'country_codes' in $show.imdb_info:
-        #for $country in $show.imdb_info['country_codes'].split('|')
-				<img src="$sbRoot/images/blank.png" class="country-flag flag-${$country}" width="16" height="11" style="margin-left: 3px; vertical-align:middle;" />
-        #end for
-    #end if
-    #if 'year' in $show.imdb_info:
-				<span>($show.imdb_info['year']) - $show.imdb_info['runtimes'] minutes - </span>
-    #end if
-				<a href="<%= anon_url('http://www.imdb.com/title/', _show.imdbid) %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;" title="http://www.imdb.com/title/$show.imdbid"><img alt="[imdb]" height="16" width="16" src="$sbRoot/images/imdb.png" style="margin-top: -1px; vertical-align:middle;"/></a>
-#end if
-				<a href="<%= anon_url(sickbeard.indexerApi(_show.indexer).config['show_url'], _show.indexerid) %>" onclick="window.open(this.href, '_blank'); return false;" title="$sickbeard.indexerApi($show.indexer).config["show_url"]$show.indexerid"><img alt="$sickbeard.indexerApi($show.indexer).name" height="16" width="16" src="$sbRoot/images/$sickbeard.indexerApi($show.indexer).config["icon"] "style="margin-top: -1px; vertical-align:middle;"/></a>
-#if $xem_numbering or $xem_absolute_numbering:
-				<a href="<%= anon_url('http://thexem.de/search?q=', _show.name) %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;" title="http://thexem.de/search?q-$show.name"><img alt="[xem]" height="16" width="16" src="$sbRoot/images/xem.png" style="margin-top: -1px; vertical-align:middle;"/></a>
-#end if
-			</div>
-
-			<div id="tags">
-				<ul class="tags">
-					#if not $show.imdbid
-					#if $show.genre:
-					#for $genre in $show.genre[1:-1].split('|')
-						<a href="<%= anon_url('http://trakt.tv/shows/popular/', genre.lower()) %>" target="_blank" title="View other popular $genre shows on trakt.tv."><li>$genre</li></a>
-					#end for
-					#end if
-					#end if
-					#if 'year' in $show.imdb_info:
-					#for $imdbgenre in $show.imdb_info['genres'].replace('Sci-Fi','Science-Fiction').split('|')
-						<a href="<%= anon_url('http://trakt.tv/shows/popular/', imdbgenre.lower()) %>" target="_blank" title="View other popular $imdbgenre shows on trakt.tv."><li>$imdbgenre</li></a>
-					#end for
-					#end if
-				</ul>
-			</div>
-		
-			<div id="summary">
-				<table class="summaryTable pull-left">
-				#set $anyQualities, $bestQualities = $Quality.splitQuality(int($show.quality))
-					<tr><td class="showLegend">Quality: </td><td>
-				#if $show.quality in $qualityPresets:
-					<span class="quality $qualityPresetStrings[$show.quality]">$qualityPresetStrings[$show.quality]</span>
-				#else:
-				#if $anyQualities:
-					<i>Initial:</i> <%=", ".join([Quality.qualityStrings[x] for x in sorted(anyQualities)])%> #if $bestQualities then " </br> " else ""#
-				#end if
-				#if $bestQualities:
-					<i>Replace with:</i> <%=", ".join([Quality.qualityStrings[x] for x in sorted(bestQualities)])%>
-				#end if
-				#end if
-
-				#if $show.network and $show.airs:
-					<tr><td class="showLegend">Originally Airs: </td><td>$show.airs #if not $network_timezones.test_timeformat($show.airs) then " <font color='#FF0000'><b>(invalid Timeformat)</b></font> " else ""# on $show.network</td></tr>
-				#else if $show.network:
-					<tr><td class="showLegend">Originally Airs: </td><td>$show.network</td></tr>
-				#else if $show.airs:
-					<tr><td class="showLegend">Originally Airs: </td><td>>$show.airs #if not $network_timezones.test_timeformat($show.airs) then " <font color='#FF0000'><b>(invalid Timeformat)</b></font> " else ""#</td></tr>
-				#end if
-					<tr><td class="showLegend">Show Status: </td><td>$show.status</td></tr>
-					<tr><td class="showLegend">Default EP Status: </td><td>$statusStrings[$show.default_ep_status]</td></tr>
-				#if $showLoc[1]:
-					<tr><td class="showLegend">Location: </td><td>$showLoc[0]</td></tr>
-				#else:
-					<tr><td class="showLegend"><span style="color: red;">Location: </span></td><td><span style="color: red;">$showLoc[0]</span> (dir is missing)</td></tr>
-				#end if
-					<tr><td class="showLegend">Scene Name:</td><td>#if $show.exceptions then $exceptions_string else $show.name#</td></tr>
-					
-				#if $show.rls_require_words:
-					<tr><td class="showLegend">Required Words: </td><td>#echo $show.rls_require_words#</td></tr>
-				#end if
-				#if $show.rls_ignore_words:
-					<tr><td class="showLegend">Ignored Words: </td><td>#echo $show.rls_ignore_words#</td></tr>
-				#end if
-				#if $bwl and $bwl.get_white_keywords_for("release_group"):
-					<tr><td class="showLegend">Wanted Group#if len($bwl.get_white_keywords_for("release_group"))>1 then "s" else ""#:</td>
-					<td>#echo ', '.join($bwl.get_white_keywords_for("release_group"))#</td>
-					</tr>
-				#end if
-				#if $bwl and $bwl.get_black_keywords_for("release_group"):
-					<tr><td class="showLegend">Unwanted Group#if len($bwl.get_black_keywords_for("release_group"))>1 then "s" else ""#:</td>
-					<td>#echo ', '.join($bwl.get_black_keywords_for("release_group"))#</td>
-					</tr>
-				#end if
-
-				<tr><td class="showLegend">Size:</td><td>$sickbeard.helpers.pretty_filesize(sickbeard.helpers.get_size($showLoc[0]))</td></tr>
-
-				</table>
-			
-				<table style="width:180px; float: right; vertical-align: middle; height: 100%;">
-					<tr><td class="showLegend">Info Language:</td><td><img src="$sbRoot/images/flags/${show.lang}.png" width="16" height="11" alt="$show.lang" title="$show.lang" /></td></tr>
-					#if $sickbeard.USE_SUBTITLES
-					<tr><td class="showLegend">Subtitles: </td><td><img src="$sbRoot/images/#if int($show.subtitles) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					#end if
-					<tr><td class="showLegend">Flat Folders: </td><td><img src="$sbRoot/images/#if $show.flatten_folders == 1 or $sickbeard.NAMING_FORCE_FOLDERS then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					<tr><td class="showLegend">Paused: </td><td><img src="$sbRoot/images/#if int($show.paused) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					<tr><td class="showLegend">Air-by-Date: </td><td><img src="$sbRoot/images/#if int($show.air_by_date) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					<tr><td class="showLegend">Sports: </td><td><img src="$sbRoot/images/#if int($show.is_sports) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					<tr><td class="showLegend">Anime: </td><td><img src="$sbRoot/images/#if int($show.is_anime) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					<tr><td class="showLegend">DVD Order: </td><td><img src="$sbRoot/images/#if int($show.dvdorder) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					<tr><td class="showLegend">Scene Numbering: </td><td><img src="$sbRoot/images/#if int($show.scene) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					#if $anyQualities + $bestQualities
-					<tr><td class="showLegend">Archive First Match: </td><td><img src="$sbRoot/images/#if int($show.archive_firstmatch) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
-					#end if
-				</table>
-			</div>
-		</div>
-	</div>
-		
-	<div class="clearfix"></div>
-
-	<div class="pull-left" >
-		Change selected episodes to:</br>
-		<select id="statusSelect" class="form-control form-control-inline input-sm">
-			#for $curStatus in [$WANTED, $SKIPPED, $ARCHIVED, $IGNORED, $FAILED] + sorted($Quality.DOWNLOADED):
-			#if $curStatus == $DOWNLOADED:
-			#continue
-			#end if
-			<option value="$curStatus">$statusStrings[$curStatus]</option>
-			#end for
-		</select>
-		<input type="hidden" id="showID" value="$show.indexerid" />
-		<input type="hidden" id="indexer" value="$show.indexer" />
-		<input class="btn btn-inline" type="button" id="changeStatus" value="Go" />
-	</div>
-
-	</br>
-
-	<div class="pull-right clearfix" id="checkboxControls">
-		<div style="padding-bottom: 5px;">
-			<label for="wanted"><span class="wanted"><input type="checkbox" id="wanted" checked="checked" /> Wanted: <b>$epCounts[$Overview.WANTED]</b></span></label>
-			<label for="qual"><span class="qual"><input type="checkbox" id="qual" checked="checked" /> Low Quality: <b>$epCounts[$Overview.QUAL]</b></span></label>
-			<label for="good"><span class="good"><input type="checkbox" id="good" checked="checked" /> Downloaded: <b>$epCounts[$Overview.GOOD]</b></span></label>
-			<label for="skipped"><span class="skipped"><input type="checkbox" id="skipped" checked="checked" /> Skipped: <b>$epCounts[$Overview.SKIPPED]</b></span></label>
-			<label for="snatched"><span class="snatched"><input type="checkbox" id="snatched" checked="checked" /> Snatched: <b>$epCounts[$Overview.SNATCHED]</b></span></label>
-		</div>
-		
-		<div class="pull-right" >
-			<button class="btn btn-xs seriesCheck">Select Filtered Episodes</button> 
-			<button class="btn btn-xs clearAll">Clear All</button>
-		</div>
-	</div>
-<br />	
-
-<table class="sickbeardTable display_show" cellspacing="0" border="0" cellpadding="0">
-
-#set $curSeason = -1
-#set $odd = 0
-
-	#for $epResult in $sqlResults:
-		#set $epStr = str($epResult["season"]) + "x" + str($epResult["episode"])
-		#if not $epStr in $epCats:
-			#continue
-		#end if
-
-		#if not $sickbeard.DISPLAY_SHOW_SPECIALS and int($epResult["season"]) == 0:
-			#continue
-		#end if	
-
-		#set $scene = False
-		#set $scene_anime = False
-		#if not $show.air_by_date and not $show.is_sports and not $show.is_anime and $show.is_scene:
-			#set $scene = True
-		#elif not $show.air_by_date and not $show.is_sports and $show.is_anime and $show.is_scene:
-			#set $scene_anime = True
-		#end if
-
-		#set ($dfltSeas, $dfltEpis, $dfltAbsolute) = (0, 0, 0)
-
-		#if (epResult["season"], epResult["episode"]) in $xem_numbering:
-			#set ($dfltSeas, $dfltEpis) = $xem_numbering[(epResult["season"], epResult["episode"])]
-		#end if
-
-		#if epResult["absolute_number"] in $xem_absolute_numbering:
-			#set $dfltAbsolute = $xem_absolute_numbering[epResult["absolute_number"]]
-		#end if
-
-		#if epResult["absolute_number"] in $scene_absolute_numbering:
-			#set $scAbsolute = $scene_absolute_numbering[epResult["absolute_number"]]
-			#set $dfltAbsNumbering = False
-		#else
-			#set $scAbsolute = $dfltAbsolute
-			#set $dfltAbsNumbering = True
-		#end if
-
-		#if (epResult["season"], epResult["episode"]) in $scene_numbering:
-			#set ($scSeas, $scEpis) = $scene_numbering[(epResult["season"], epResult["episode"])]
-			#set $dfltEpNumbering = False
-		#else
-			#set ($scSeas, $scEpis) = ($dfltSeas, $dfltEpis)
-			#set $dfltEpNumbering = True
-		#end if
-
-		#if int($epResult["season"]) != $curSeason:
-		    <tr>
-				<th class="row-seasonheader" colspan="13" style="width: auto;"><h3><a name="season-$epResult["season"]"></a>#if int($epResult["season"]) == 0 then "Specials" else "Season " + str($epResult["season"])#</h3></th>
-			</tr>
-			
-			<tr id="season-$epResult["season"]-cols" class="seasoncols">
-				<th class="col-checkbox"><input type="checkbox" class="seasonCheck" id="$epResult["season"]" /></th>
-			#if $sickbeard.PLAY_VIDEOS:
-				<th>Video</th>
-			#end if
-				<th class="col-metadata">NFO</th>
-				<th class="col-metadata">TBN</th>
-				<th class="col-ep">Episode</th>
-			#if $show.is_anime:
-				<th class="col-ep">Absolute</th>
-			#end if
-			#if $scene:
-				<th class="col-ep">Scene</th>
-			#end if
-			#if $scene_anime:
-				<th class="col-ep">Scene Absolute</th>
-			#end if
-				<th class="col-name"
-			#if ($sickbeard.DISPLAY_FILESIZE == True):
-			style="min-width: 190px"
-			#end if
-			>Name</th>
-			#if ($sickbeard.DISPLAY_FILESIZE == True):
-				<th class="col-ep">Size</th>
-			#end if
-				<th class="col-airdate">Airdate</th>
-			#if $sickbeard.DOWNLOAD_URL
-				<th class="col-ep">Download</th>
-			#end if
-			#if $sickbeard.USE_SUBTITLES and $show.subtitles:
-				<th class="col-subtitles">Subtitles</th>
-			#end if
-				<th class="col-status">Status</th>
-				<th class="col-search">Search</th>
-			</tr>
-			#set $curSeason = int($epResult["season"])
-		#end if    
-
-		#set $epLoc = $epResult["location"]
-		
-		<tr class="$Overview.overviewStrings[$epCats[$epStr]] season-$curSeason seasonstyle">
-		
-		<td class="col-checkbox">
-		
-		#if int($epResult["status"]) != $UNAIRED
-			<input type="checkbox" class="epCheck" id="<%=str(epResult["season"])+'x'+str(epResult["episode"])%>" name="<%=str(epResult["season"]) +"x"+str(epResult["episode"]) %>" />
-		#end if
-		</td>
-    
-		#if $sickbeard.PLAY_VIDEOS:
-			<td align="center">
-			#if $epResult["location"]:
-				#set $video_root = $os.path.dirname($show._location)
-				#set $video_source = $sbRoot + $epResult["location"].replace($video_root, '/videos')
-				<div id="$video_source" class="jwvideo">Loading the player...</div>
-			#else:
-				No Video
-			#end if
-			</td>
-		#end if
-		
-		<td align="center"><img src="$sbRoot/images/#if $epResult["hasnfo"] == 1 then "nfo.gif\" alt=\"Y" else "nfo-no.gif\" alt=\"N"#" width="23" height="11" /></td>
-		
-		<td align="center"><img src="$sbRoot/images/#if $epResult["hastbn"] == 1 then "tbn.gif\" alt=\"Y" else "tbn-no.gif\" alt=\"N"#" width="23" height="11" /></td>
-		
-		<td align="center">
-			#if $epLoc and $show._location and $epLoc.lower().startswith($show._location.lower()):
-				#set $epLoc = $epLoc[len($show._location)+1:]
-			#elif $epLoc and (not $epLoc.lower().startswith($show._location.lower()) or not $show._location):
-				#set $epLoc = $epLoc
-			#end if
-		
-			#if $epLoc != "" and $epLoc != None:
-				<span title="$epLoc" class="addQTip">$epResult["episode"]</span>
-			#else
-				$epResult["episode"]
-			#end if
-		</td>
-		
-		#if $show.is_anime:
-			<td align="center">$epResult["absolute_number"]</td>
-		#end if
-
-		#if $scene:
-			<td align="center">
-				<input type="text" placeholder="<%=str(dfltSeas) + 'x' + str(dfltEpis)%>" size="6" maxlength="8"
-					class="sceneSeasonXEpisode form-control input-scene" data-for-season="$epResult["season"]" data-for-episode="$epResult["episode"]"
-					id="sceneSeasonXEpisode_$show.indexerid<%="_"+str(epResult["season"])+"_"+str(epResult["episode"])%>"
-					title="Change the value here if scene numbering differs from the indexer episode numbering"
-					#if $dfltEpNumbering:
-						value=""
-					#else
-						value="<%=str(scSeas) + 'x' + str(scEpis)%>"
-					#end if
-						style="padding: 0; text-align: center; max-width: 60px;" />
-			</td>
-		#elif $scene_anime:
-			<td align="center">
-				<input type="text" placeholder="<%=str(dfltAbsolute)%>" size="6" maxlength="8"
-					class="sceneAbsolute form-control input-scene" data-for-absolute="$epResult["absolute_number"]"
-					id="sceneAbsolute_$show.indexerid<%="_"+str(epResult["absolute_number"])%>"
-					title="Change the value here if scene absolute numbering differs from the indexer absolute numbering"
-					#if $dfltAbsNumbering:
-						value=""
-					#else
-						value="<%=str(scAbsolute)%>"
-					#end if
-						style="padding: 0; text-align: center; max-width: 60px;" />
-			</td>
-		#end if
-
-		<td class="col-name">
-			#if $epResult["description"] != "" and $epResult["description"] != None:
-				<img src="$sbRoot/images/info32.png" width="16" height="16" class="plotInfo" alt="" id="plot_info_$show.indexerid<%="_" + str(epResult["season"]) + "_" + str(epResult["episode"])%>" />
-			#else:
-				<img src="$sbRoot/images/info32.png" width="16" height="16" class="plotInfoNone" alt="" />
-			#end if
-			$epResult["name"]
-		</td>
-		
-		#if ($sickbeard.DISPLAY_FILESIZE == True):
-		<td class="col-ep">
-			#if $epResult["file_size"]:
-				#set $file_size = $sickbeard.helpers.pretty_filesize($epResult["file_size"])
-				$file_size
-			#end if
-		</td>
-		#end if
-		<td class="col-airdate">
-			<span class="${fuzzydate}">#if int($epResult['airdate']) == 1 then 'never' else $sbdatetime.sbdatetime.sbfdate($sbdatetime.sbdatetime.convert_to_setting($network_timezones.parse_date_time($epResult['airdate'],$show.airs,$show.network)))#</span>
-		</td>
-
-		#if $sickbeard.DOWNLOAD_URL and $epResult['location']
-		<td>
-			#set $filename = $epResult['location']
-		  	#for $rootDir in $sickbeard.ROOT_DIRS.split('|')
-		   		#if $rootDir.startswith('/')
-		   			#set $filename = $filename.replace($rootDir, "")
-		   		#end if
-		   	#end for
-			#set $filename = $sickbeard.DOWNLOAD_URL + $urllib.quote($filename.encode('utf8'))
-			<center><a href="$filename">Download</a></center>
-		</td>
-		#elif $sickbeard.DOWNLOAD_URL
-			<td></td>
-		#end if
-		
-		#if $sickbeard.USE_SUBTITLES and $show.subtitles:
-			<td class="col-subtitles" align="center">
-        #if $epResult["subtitles"]:
-            #for $sub_lang in subliminal.language.language_list([x.strip() for x in $epResult["subtitles"].split(',') if x != ""]):
-                #if sub_lang.alpha2 != ""
-                    <img src="$sbRoot/images/flags/${sub_lang.alpha2}.png" width="16" height="11" alt="${sub_lang}" />
-                #else
-                    <img src="$sbRoot/images/flags/unknown.png" width="16" height="11" alt="Unknown" />
-                #end if
-            #end for
-        #end if
-			</td>
-		#end if
-		
-		#set $curStatus, $curQuality = $Quality.splitCompositeStatus(int($epResult["status"]))
-			#if $curQuality != Quality.NONE:   
-				<td class="col-status">$statusStrings[$curStatus] <span class="quality $Quality.qualityStrings[$curQuality].replace("720p","HD720p").replace("1080p","HD1080p").replace("RawHD TV", "RawHD").replace("HD TV", "HD720p")">$Quality.qualityStrings[$curQuality]</span></td>
-			#else:    
-				<td class="col-status">$statusStrings[$curStatus]</td>
-			#end if
-		
-		<td class="col-search">
-			#if int($epResult["season"]) != 0:
-			#if ( int($epResult["status"]) in $Quality.SNATCHED or int($epResult["status"]) in $Quality.DOWNLOADED ) and $sickbeard.USE_FAILED_DOWNLOADS:
-				<a class="epRetry" id="<%=str(epResult["season"])+'x'+str(epResult["episode"])%>" name="<%=str(epResult["season"]) +"x"+str(epResult["episode"]) %>" href="retryEpisode?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/search16.png" height="16" alt="retry" title="Retry Download" /></a>
-			#else:
-				<a class="epSearch" id="<%=str(epResult["season"])+'x'+str(epResult["episode"])%>" name="<%=str(epResult["season"]) +"x"+str(epResult["episode"]) %>" href="searchEpisode?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/search16.png" width="16" height="16" alt="search" title="Manual Search" /></a>
-			#end if
-			#end if
-			
-			#if $sickbeard.USE_SUBTITLES and $show.subtitles and len(set(str($epResult["subtitles"]).split(',')).intersection(set($subtitles.wantedLanguages()))) < len($subtitles.wantedLanguages()) and $epResult["location"]
-				<a class="epSubtitlesSearch" href="searchEpisodeSubtitles?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/closed_captioning.png" height="16" alt="search subtitles" title="Search Subtitles" /></a>
-			#end if
-		</td>
-	</tr>
-
-	#end for
-	
-</table>
-
-#include $os.path.join($sickbeard.PROG_DIR,"gui/slick/interfaces/default/inc_bottom.tmpl")
+#import sickbeard
+#from sickbeard import subtitles, sbdatetime, network_timezones
+#import sickbeard.helpers
+#from sickbeard.common import *
+#from sickbeard.helpers import anon_url
+#from lib import subliminal
+#import os.path, os
+#import datetime
+#import urllib
+
+#set global $title=$show.name
+##set global $header = '<a></a>' % 
+#set global $topmenu="manageShows"#
+#set $exceptions_string = " | ".join($show.exceptions)
+#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_top.tmpl")
+
+<script type="text/javascript" src="$sbRoot/js/lib/jquery.bookmarkscroll.js?$sbPID"></script>
+<script type="text/javascript" src="$sbRoot/js/jwplayer/jwplayer.js"></script>
+<script type="text/javascript">jwplayer.key="Zq3m618ITHrFxeKGi3Gf33ovC+XtdGQz19MMug==";</script>
+
+<input type="hidden" id="sbRoot" value="$sbRoot" />
+
+<script type="text/javascript" src="$sbRoot/js/displayShow.js?$sbPID"></script>
+<script type="text/javascript" src="$sbRoot/js/plotTooltip.js?$sbPID"></script>
+<script type="text/javascript" src="$sbRoot/js/sceneExceptionsTooltip.js?$sbPID"></script>
+<script type="text/javascript" src="$sbRoot/js/ratingTooltip.js?$sbPID"></script>
+<script type="text/javascript" src="$sbRoot/js/ajaxEpSearch.js?$sbPID"></script>
+<script type="text/javascript" src="$sbRoot/js/ajaxEpSubtitles.js?$sbPID"></script>
+<script type="text/javascript" charset="utf-8">
+<!--
+\$(document).ready(function(){
+    #set $fuzzydate = 'airdate'
+    #if $sickbeard.FUZZY_DATING:
+    fuzzyMoment({
+        containerClass : '.${fuzzydate}',
+        dateHasTime : false,
+        dateFormat : '${sickbeard.DATE_PRESET}',
+        timeFormat : '${sickbeard.TIME_PRESET}',
+        trimZero : #if $sickbeard.TRIM_ZERO then "true" else "false"#
+    });
+    #end if
+    #raw
+    $('.addQTip').each(function () {
+        $(this).css({'cursor':'help', 'text-shadow':'0px 0px 0.5px #666'});
+        $(this).qtip({
+            show: {solo:true},
+            position: {viewport:$(window), my:'left center', adjust:{ y: -10, x: 2 }},
+            style: {tip:{corner:true, method:'polygon'}, classes:'qtip-rounded qtip-shadow ui-tooltip-sb'}
+        });
+    });
+    #end raw
+	
+	\$.fn.generateStars = function() {
+		return this.each(function(i,e){\$(e).html(\$('<span/>').width(\$(e).text()*12));});
+	};
+
+	\$('.imdbstars').generateStars();
+	
+});
+//-->
+</script>
+
+	<div class="pull-left form-inline">
+		Change Show:
+		<div class="navShow"><img id="prevShow" src="$sbRoot/images/prev.png" alt="&lt;&lt;" title="Prev Show" /></div>
+			<select id="pickShow" class="form-control form-control-inline input-sm">
+			#for $curShowList in $sortedShowLists:
+				#set $curShowType = $curShowList[0]
+				#set $curShowList = $curShowList[1]
+
+				#if len($sortedShowLists) > 1:
+					<optgroup label="$curShowType">
+				#end if
+					#for $curShow in $curShowList:
+					<option value="$curShow.indexerid" #if $curShow == $show then "selected=\"selected\"" else ""#>$curShow.name</option>
+					#end for
+				#if len($sortedShowLists) > 1:
+					</optgroup>
+				#end if
+			#end for
+			</select>
+		<div class="navShow"><img id="nextShow" src="$sbRoot/images/next.png" alt="&gt;&gt;" title="Next Show" /></div>
+	</div>
+
+	<div class="clearfix"></div>
+
+	<div id="showtitle" data-showname="$show.name">
+		<h1 class="title" id="scene_exception_$show.indexerid">$show.name</h1>
+	</div>
+
+	
+		#if $seasonResults:
+		##There is a special/season_0?##
+		#if int($seasonResults[-1]["season"]) == 0:
+			#set $season_special = 1
+		#else: 
+			#set $season_special = 0
+		#end if
+
+		#if not $sickbeard.DISPLAY_SHOW_SPECIALS and $season_special:
+			$seasonResults.pop(-1)
+		#end if
+
+		<span class="h2footer displayspecials pull-right">
+			#if $season_special:
+			Display Specials:
+				#if sickbeard.DISPLAY_SHOW_SPECIALS:
+					<a class="inner" href="$sbRoot/toggleDisplayShowSpecials/?show=$show.indexerid">Hide</a>
+				#else:
+					<a class="inner" href="$sbRoot/toggleDisplayShowSpecials/?show=$show.indexerid">Show</a>
+				#end if
+			#end if
+		</span>
+		
+		<div class="h2footer pull-right"> 
+			<span>
+			#if (len($seasonResults) > 14):
+				<select id="seasonJump" class="form-control input-sm" style="position: relative; top: -4px;">
+					<option value="jump">Jump to Season</option>
+				#for $seasonNum in $seasonResults:
+					<option value="#season-$seasonNum["season"]">#if int($seasonNum["season"]) == 0 then "Specials" else "Season " + str($seasonNum["season"])#</option>
+				#end for
+				</select>
+			#else:
+				Season:
+				#for $seasonNum in $seasonResults:
+					#if int($seasonNum["season"]) == 0:
+						<a href="#season-$seasonNum["season"]">Specials</a>
+					#else:
+						<a href="#season-$seasonNum["season"]">${str($seasonNum["season"])}</a>
+					#end if
+					#if $seasonNum != $seasonResults[-1]:
+						<span class="separator">|</span>
+					#end if
+				#end for
+			#end if
+			</span>
+
+		#end if
+		</div>
+	
+	<div class="clearfix"></div>
+    
+#if $show_message:
+	<div class="alert alert-info">
+		$show_message
+	</div>
+#end if
+	
+	<div id="container">
+		<div id="posterCol">
+			<a href="$sbRoot/showPoster/?show=$show.indexerid&amp;which=poster" rel="dialog" title="View Poster for $show.name"><img src="$sbRoot/showPoster/?show=$show.indexerid&amp;which=poster_thumb" class="tvshowImg" alt=""/></a>
+		</div>
+
+		<div id="showCol">
+		
+			<div id="showinfo">
+#if 'rating' in $show.imdb_info:
+    #set $rating_tip = str($show.imdb_info['rating']) + " / 10" + " Stars" + "<br />" + str($show.imdb_info['votes']) + " Votes"
+				<span class="imdbstars" qtip-content="$rating_tip">$show.imdb_info['rating']</span>
+#end if
+				
+#set $_show = $show
+#if not $show.imdbid
+				<span>($show.startyear) - $show.runtime minutes - </span>
+#else
+    #if 'country_codes' in $show.imdb_info:
+        #for $country in $show.imdb_info['country_codes'].split('|')
+				<img src="$sbRoot/images/blank.png" class="country-flag flag-${$country}" width="16" height="11" style="margin-left: 3px; vertical-align:middle;" />
+        #end for
+    #end if
+    #if 'year' in $show.imdb_info:
+				<span>($show.imdb_info['year']) - $show.imdb_info['runtimes'] minutes - </span>
+    #end if
+				<a href="<%= anon_url('http://www.imdb.com/title/', _show.imdbid) %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;" title="http://www.imdb.com/title/$show.imdbid"><img alt="[imdb]" height="16" width="16" src="$sbRoot/images/imdb.png" style="margin-top: -1px; vertical-align:middle;"/></a>
+#end if
+				<a href="<%= anon_url(sickbeard.indexerApi(_show.indexer).config['show_url'], _show.indexerid) %>" onclick="window.open(this.href, '_blank'); return false;" title="$sickbeard.indexerApi($show.indexer).config["show_url"]$show.indexerid"><img alt="$sickbeard.indexerApi($show.indexer).name" height="16" width="16" src="$sbRoot/images/$sickbeard.indexerApi($show.indexer).config["icon"] "style="margin-top: -1px; vertical-align:middle;"/></a>
+#if $xem_numbering or $xem_absolute_numbering:
+				<a href="<%= anon_url('http://thexem.de/search?q=', _show.name) %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;" title="http://thexem.de/search?q-$show.name"><img alt="[xem]" height="16" width="16" src="$sbRoot/images/xem.png" style="margin-top: -1px; vertical-align:middle;"/></a>
+#end if
+			</div>
+
+			<div id="tags">
+				<ul class="tags">
+					#if not $show.imdbid
+					#if $show.genre:
+					#for $genre in $show.genre[1:-1].split('|')
+						<a href="<%= anon_url('http://trakt.tv/shows/popular/', genre.lower()) %>" target="_blank" title="View other popular $genre shows on trakt.tv."><li>$genre</li></a>
+					#end for
+					#end if
+					#end if
+					#if 'year' in $show.imdb_info:
+					#for $imdbgenre in $show.imdb_info['genres'].replace('Sci-Fi','Science-Fiction').split('|')
+						<a href="<%= anon_url('http://trakt.tv/shows/popular/', imdbgenre.lower()) %>" target="_blank" title="View other popular $imdbgenre shows on trakt.tv."><li>$imdbgenre</li></a>
+					#end for
+					#end if
+				</ul>
+			</div>
+		
+			<div id="summary">
+				<table class="summaryTable pull-left">
+				#set $anyQualities, $bestQualities = $Quality.splitQuality(int($show.quality))
+					<tr><td class="showLegend">Quality: </td><td>
+				#if $show.quality in $qualityPresets:
+					<span class="quality $qualityPresetStrings[$show.quality]">$qualityPresetStrings[$show.quality]</span>
+				#else:
+				#if $anyQualities:
+					<i>Initial:</i> <%=", ".join([Quality.qualityStrings[x] for x in sorted(anyQualities)])%> #if $bestQualities then " </br> " else ""#
+				#end if
+				#if $bestQualities:
+					<i>Replace with:</i> <%=", ".join([Quality.qualityStrings[x] for x in sorted(bestQualities)])%>
+				#end if
+				#end if
+
+				#if $show.network and $show.airs:
+					<tr><td class="showLegend">Originally Airs: </td><td>$show.airs #if not $network_timezones.test_timeformat($show.airs) then " <font color='#FF0000'><b>(invalid Timeformat)</b></font> " else ""# on $show.network</td></tr>
+				#else if $show.network:
+					<tr><td class="showLegend">Originally Airs: </td><td>$show.network</td></tr>
+				#else if $show.airs:
+					<tr><td class="showLegend">Originally Airs: </td><td>>$show.airs #if not $network_timezones.test_timeformat($show.airs) then " <font color='#FF0000'><b>(invalid Timeformat)</b></font> " else ""#</td></tr>
+				#end if
+					<tr><td class="showLegend">Show Status: </td><td>$show.status</td></tr>
+					<tr><td class="showLegend">Default EP Status: </td><td>$statusStrings[$show.default_ep_status]</td></tr>
+				#if $showLoc[1]:
+					<tr><td class="showLegend">Location: </td><td>$showLoc[0]</td></tr>
+				#else:
+					<tr><td class="showLegend"><span style="color: red;">Location: </span></td><td><span style="color: red;">$showLoc[0]</span> (dir is missing)</td></tr>
+				#end if
+					<tr><td class="showLegend">Scene Name:</td><td>#if $show.exceptions then $exceptions_string else $show.name#</td></tr>
+					
+				#if $show.rls_require_words:
+					<tr><td class="showLegend">Required Words: </td><td>#echo $show.rls_require_words#</td></tr>
+				#end if
+				#if $show.rls_ignore_words:
+					<tr><td class="showLegend">Ignored Words: </td><td>#echo $show.rls_ignore_words#</td></tr>
+				#end if
+				#if $bwl and $bwl.get_white_keywords_for("release_group"):
+					<tr><td class="showLegend">Wanted Group#if len($bwl.get_white_keywords_for("release_group"))>1 then "s" else ""#:</td>
+					<td>#echo ', '.join($bwl.get_white_keywords_for("release_group"))#</td>
+					</tr>
+				#end if
+				#if $bwl and $bwl.get_black_keywords_for("release_group"):
+					<tr><td class="showLegend">Unwanted Group#if len($bwl.get_black_keywords_for("release_group"))>1 then "s" else ""#:</td>
+					<td>#echo ', '.join($bwl.get_black_keywords_for("release_group"))#</td>
+					</tr>
+				#end if
+
+				<tr><td class="showLegend">Size:</td><td>$sickbeard.helpers.pretty_filesize(sickbeard.helpers.get_size($showLoc[0]))</td></tr>
+
+				</table>
+			
+				<table style="width:180px; float: right; vertical-align: middle; height: 100%;">
+					<tr><td class="showLegend">Info Language:</td><td><img src="$sbRoot/images/flags/${show.lang}.png" width="16" height="11" alt="$show.lang" title="$show.lang" /></td></tr>
+					#if $sickbeard.USE_SUBTITLES
+					<tr><td class="showLegend">Subtitles: </td><td><img src="$sbRoot/images/#if int($show.subtitles) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					#end if
+					<tr><td class="showLegend">Flat Folders: </td><td><img src="$sbRoot/images/#if $show.flatten_folders == 1 or $sickbeard.NAMING_FORCE_FOLDERS then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					<tr><td class="showLegend">Paused: </td><td><img src="$sbRoot/images/#if int($show.paused) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					<tr><td class="showLegend">Air-by-Date: </td><td><img src="$sbRoot/images/#if int($show.air_by_date) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					<tr><td class="showLegend">Sports: </td><td><img src="$sbRoot/images/#if int($show.is_sports) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					<tr><td class="showLegend">Anime: </td><td><img src="$sbRoot/images/#if int($show.is_anime) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					<tr><td class="showLegend">DVD Order: </td><td><img src="$sbRoot/images/#if int($show.dvdorder) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					<tr><td class="showLegend">Scene Numbering: </td><td><img src="$sbRoot/images/#if int($show.scene) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					#if $anyQualities + $bestQualities
+					<tr><td class="showLegend">Archive First Match: </td><td><img src="$sbRoot/images/#if int($show.archive_firstmatch) == 1 then "yes16.png\" alt=\"Y" else "no16.png\" alt=\"N"#" width="16" height="16" /></td></tr>
+					#end if
+				</table>
+			</div>
+		</div>
+	</div>
+		
+	<div class="clearfix"></div>
+
+	<div class="pull-left" >
+		Change selected episodes to:</br>
+		<select id="statusSelect" class="form-control form-control-inline input-sm">
+			#for $curStatus in [$WANTED, $SKIPPED, $ARCHIVED, $IGNORED, $FAILED] + sorted($Quality.DOWNLOADED):
+			#if $curStatus == $DOWNLOADED:
+			#continue
+			#end if
+			<option value="$curStatus">$statusStrings[$curStatus]</option>
+			#end for
+		</select>
+		<input type="hidden" id="showID" value="$show.indexerid" />
+		<input type="hidden" id="indexer" value="$show.indexer" />
+		<input class="btn btn-inline" type="button" id="changeStatus" value="Go" />
+	</div>
+
+	</br>
+
+	<div class="pull-right clearfix" id="checkboxControls">
+		<div style="padding-bottom: 5px;">
+			<label for="wanted"><span class="wanted"><input type="checkbox" id="wanted" checked="checked" /> Wanted: <b>$epCounts[$Overview.WANTED]</b></span></label>
+			<label for="qual"><span class="qual"><input type="checkbox" id="qual" checked="checked" /> Low Quality: <b>$epCounts[$Overview.QUAL]</b></span></label>
+			<label for="good"><span class="good"><input type="checkbox" id="good" checked="checked" /> Downloaded: <b>$epCounts[$Overview.GOOD]</b></span></label>
+			<label for="skipped"><span class="skipped"><input type="checkbox" id="skipped" checked="checked" /> Skipped: <b>$epCounts[$Overview.SKIPPED]</b></span></label>
+			<label for="snatched"><span class="snatched"><input type="checkbox" id="snatched" checked="checked" /> Snatched: <b>$epCounts[$Overview.SNATCHED]</b></span></label>
+		</div>
+		
+		<div class="pull-right" >
+			<button class="btn btn-xs seriesCheck">Select Filtered Episodes</button> 
+			<button class="btn btn-xs clearAll">Clear All</button>
+		</div>
+	</div>
+<br />	
+
+<table class="sickbeardTable display_show" cellspacing="0" border="0" cellpadding="0">
+
+#set $curSeason = -1
+#set $odd = 0
+
+	#for $epResult in $sqlResults:
+		#set $epStr = str($epResult["season"]) + "x" + str($epResult["episode"])
+		#if not $epStr in $epCats:
+			#continue
+		#end if
+
+		#if not $sickbeard.DISPLAY_SHOW_SPECIALS and int($epResult["season"]) == 0:
+			#continue
+		#end if	
+
+		#set $scene = False
+		#set $scene_anime = False
+		#if not $show.air_by_date and not $show.is_sports and not $show.is_anime and $show.is_scene:
+			#set $scene = True
+		#elif not $show.air_by_date and not $show.is_sports and $show.is_anime and $show.is_scene:
+			#set $scene_anime = True
+		#end if
+
+		#set ($dfltSeas, $dfltEpis, $dfltAbsolute) = (0, 0, 0)
+
+		#if (epResult["season"], epResult["episode"]) in $xem_numbering:
+			#set ($dfltSeas, $dfltEpis) = $xem_numbering[(epResult["season"], epResult["episode"])]
+		#end if
+
+		#if epResult["absolute_number"] in $xem_absolute_numbering:
+			#set $dfltAbsolute = $xem_absolute_numbering[epResult["absolute_number"]]
+		#end if
+
+		#if epResult["absolute_number"] in $scene_absolute_numbering:
+			#set $scAbsolute = $scene_absolute_numbering[epResult["absolute_number"]]
+			#set $dfltAbsNumbering = False
+		#else
+			#set $scAbsolute = $dfltAbsolute
+			#set $dfltAbsNumbering = True
+		#end if
+
+		#if (epResult["season"], epResult["episode"]) in $scene_numbering:
+			#set ($scSeas, $scEpis) = $scene_numbering[(epResult["season"], epResult["episode"])]
+			#set $dfltEpNumbering = False
+		#else
+			#set ($scSeas, $scEpis) = ($dfltSeas, $dfltEpis)
+			#set $dfltEpNumbering = True
+		#end if
+
+		#if int($epResult["season"]) != $curSeason:
+		    <tr>
+				<th class="row-seasonheader" colspan="13" style="width: auto;"><h3><a name="season-$epResult["season"]"></a>#if int($epResult["season"]) == 0 then "Specials" else "Season " + str($epResult["season"])#</h3></th>
+			</tr>
+			
+			<tr id="season-$epResult["season"]-cols" class="seasoncols">
+				<th class="col-checkbox"><input type="checkbox" class="seasonCheck" id="$epResult["season"]" /></th>
+			#if $sickbeard.PLAY_VIDEOS:
+				<th>Video</th>
+			#end if
+				<th class="col-metadata">NFO</th>
+				<th class="col-metadata">TBN</th>
+				<th class="col-ep">Episode</th>
+			#if $show.is_anime:
+				<th class="col-ep">Absolute</th>
+			#end if
+			#if $scene:
+				<th class="col-ep">Scene</th>
+			#end if
+			#if $scene_anime:
+				<th class="col-ep">Scene Absolute</th>
+			#end if
+				<th class="col-name"
+			#if ($sickbeard.DISPLAY_FILESIZE == True):
+			style="min-width: 190px"
+			#end if
+			>Name</th>
+			#if ($sickbeard.DISPLAY_FILESIZE == True):
+				<th class="col-ep">Size</th>
+			#end if
+				<th class="col-airdate">Airdate</th>
+			#if $sickbeard.DOWNLOAD_URL
+				<th class="col-ep">Download</th>
+			#end if
+			#if $sickbeard.USE_SUBTITLES and $show.subtitles:
+				<th class="col-subtitles">Subtitles</th>
+			#end if
+				<th class="col-status">Status</th>
+				<th class="col-search">Search</th>
+			</tr>
+			#set $curSeason = int($epResult["season"])
+		#end if    
+
+		#set $epLoc = $epResult["location"]
+		
+		<tr class="$Overview.overviewStrings[$epCats[$epStr]] season-$curSeason seasonstyle">
+		
+		<td class="col-checkbox">
+		
+		#if int($epResult["status"]) != $UNAIRED
+			<input type="checkbox" class="epCheck" id="<%=str(epResult["season"])+'x'+str(epResult["episode"])%>" name="<%=str(epResult["season"]) +"x"+str(epResult["episode"]) %>" />
+		#end if
+		</td>
+    
+		#if $sickbeard.PLAY_VIDEOS:
+			<td align="center">
+			#if $epResult["location"]:
+				#set $video_root = $os.path.dirname($show._location)
+				#set $video_source = $sbRoot + $epResult["location"].replace($video_root, '/videos')
+				<div id="$video_source" class="jwvideo">Loading the player...</div>
+			#else:
+				No Video
+			#end if
+			</td>
+		#end if
+		
+		<td align="center"><img src="$sbRoot/images/#if $epResult["hasnfo"] == 1 then "nfo.gif\" alt=\"Y" else "nfo-no.gif\" alt=\"N"#" width="23" height="11" /></td>
+		
+		<td align="center"><img src="$sbRoot/images/#if $epResult["hastbn"] == 1 then "tbn.gif\" alt=\"Y" else "tbn-no.gif\" alt=\"N"#" width="23" height="11" /></td>
+		
+		<td align="center">
+			#if $epLoc and $show._location and $epLoc.lower().startswith($show._location.lower()):
+				#set $epLoc = $epLoc[len($show._location)+1:]
+			#elif $epLoc and (not $epLoc.lower().startswith($show._location.lower()) or not $show._location):
+				#set $epLoc = $epLoc
+			#end if
+		
+			#if $epLoc != "" and $epLoc != None:
+				<span title="$epLoc" class="addQTip">$epResult["episode"]</span>
+			#else
+				$epResult["episode"]
+			#end if
+		</td>
+		
+		#if $show.is_anime:
+			<td align="center">$epResult["absolute_number"]</td>
+		#end if
+
+		#if $scene:
+			<td align="center">
+				<input type="text" placeholder="<%=str(dfltSeas) + 'x' + str(dfltEpis)%>" size="6" maxlength="8"
+					class="sceneSeasonXEpisode form-control input-scene" data-for-season="$epResult["season"]" data-for-episode="$epResult["episode"]"
+					id="sceneSeasonXEpisode_$show.indexerid<%="_"+str(epResult["season"])+"_"+str(epResult["episode"])%>"
+					title="Change the value here if scene numbering differs from the indexer episode numbering"
+					#if $dfltEpNumbering:
+						value=""
+					#else
+						value="<%=str(scSeas) + 'x' + str(scEpis)%>"
+					#end if
+						style="padding: 0; text-align: center; max-width: 60px;" />
+			</td>
+		#elif $scene_anime:
+			<td align="center">
+				<input type="text" placeholder="<%=str(dfltAbsolute)%>" size="6" maxlength="8"
+					class="sceneAbsolute form-control input-scene" data-for-absolute="$epResult["absolute_number"]"
+					id="sceneAbsolute_$show.indexerid<%="_"+str(epResult["absolute_number"])%>"
+					title="Change the value here if scene absolute numbering differs from the indexer absolute numbering"
+					#if $dfltAbsNumbering:
+						value=""
+					#else
+						value="<%=str(scAbsolute)%>"
+					#end if
+						style="padding: 0; text-align: center; max-width: 60px;" />
+			</td>
+		#end if
+
+		<td class="col-name">
+			#if $epResult["description"] != "" and $epResult["description"] != None:
+				<img src="$sbRoot/images/info32.png" width="16" height="16" class="plotInfo" alt="" id="plot_info_$show.indexerid<%="_" + str(epResult["season"]) + "_" + str(epResult["episode"])%>" />
+			#else:
+				<img src="$sbRoot/images/info32.png" width="16" height="16" class="plotInfoNone" alt="" />
+			#end if
+			$epResult["name"]
+		</td>
+		
+		#if ($sickbeard.DISPLAY_FILESIZE == True):
+		<td class="col-ep">
+			#if $epResult["file_size"]:
+				#set $file_size = $sickbeard.helpers.pretty_filesize($epResult["file_size"])
+				$file_size
+			#end if
+		</td>
+		#end if
+		<td class="col-airdate">
+			<span class="${fuzzydate}">#if int($epResult['airdate']) == 1 then 'never' else $sbdatetime.sbdatetime.sbfdate($sbdatetime.sbdatetime.convert_to_setting($network_timezones.parse_date_time($epResult['airdate'],$show.airs,$show.network)))#</span>
+		</td>
+
+		#if $sickbeard.DOWNLOAD_URL and $epResult['location']
+		<td>
+			#set $filename = $epResult['location']
+		  	#for $rootDir in $sickbeard.ROOT_DIRS.split('|')
+		   		#if $rootDir.startswith('/')
+		   			#set $filename = $filename.replace($rootDir, "")
+		   		#end if
+		   	#end for
+			#set $filename = $sickbeard.DOWNLOAD_URL + $urllib.quote($filename.encode('utf8'))
+			<center><a href="$filename">Download</a></center>
+		</td>
+		#elif $sickbeard.DOWNLOAD_URL
+			<td></td>
+		#end if
+		
+		#if $sickbeard.USE_SUBTITLES and $show.subtitles:
+			<td class="col-subtitles" align="center">
+        #if $epResult["subtitles"]:
+            #for $sub_lang in subliminal.language.language_list([x.strip() for x in $epResult["subtitles"].split(',') if x != ""]):
+                #if sub_lang.alpha2 != ""
+                    <img src="$sbRoot/images/flags/${sub_lang.alpha2}.png" width="16" height="11" alt="${sub_lang}" />
+                #else
+                    <img src="$sbRoot/images/flags/unknown.png" width="16" height="11" alt="Unknown" />
+                #end if
+            #end for
+        #end if
+			</td>
+		#end if
+		
+		#set $curStatus, $curQuality = $Quality.splitCompositeStatus(int($epResult["status"]))
+			#if $curQuality != Quality.NONE:   
+				<td class="col-status">$statusStrings[$curStatus] <span class="quality $Quality.qualityStrings[$curQuality].replace("720p","HD720p").replace("1080p","HD1080p").replace("RawHD TV", "RawHD").replace("HD TV", "HD720p")">$Quality.qualityStrings[$curQuality]</span></td>
+			#else:    
+				<td class="col-status">$statusStrings[$curStatus]</td>
+			#end if
+		
+		<td class="col-search">
+			#if int($epResult["season"]) != 0:
+			#if ( int($epResult["status"]) in $Quality.SNATCHED or int($epResult["status"]) in $Quality.DOWNLOADED ) and $sickbeard.USE_FAILED_DOWNLOADS:
+				<a class="epRetry" id="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" name="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" href="retryEpisode?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/search16.png" height="16" alt="retry" title="Retry Download" /></a>
+			#else:
+				<a class="epSearch" id="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" name="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" href="searchEpisode?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/search16.png" width="16" height="16" alt="search" title="Manual Search" /></a>
+			#end if
+			#end if
+			
+			#if $sickbeard.USE_SUBTITLES and $show.subtitles and len(set(str($epResult["subtitles"]).split(',')).intersection(set($subtitles.wantedLanguages()))) < len($subtitles.wantedLanguages()) and $epResult["location"]
+				<a class="epSubtitlesSearch" href="searchEpisodeSubtitles?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/closed_captioning.png" height="16" alt="search subtitles" title="Search Subtitles" /></a>
+			#end if
+		</td>
+	</tr>
+
+	#end for
+	
+</table>
+
+<!--Begin - Bootstrap Modal-->
+
+<div id="manualSearchModalFailed" class="modal fade">
+    <div class="modal-dialog">
+        <div class="modal-content">
+            <div class="modal-header">
+                <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+                <h4 class="modal-title">Manual Search</h4>
+            </div>
+            <div class="modal-body">
+                <p>Do you want to mark this episode as failed?</p>
+                <p class="text-warning"><small>The episode release name will be added to the failed history, preventing it to be downloaded again.</small></p>
+            </div>
+            <div class="modal-footer">
+                <button type="button" class="btn btn-danger" data-dismiss="modal">No</button>
+                <button type="button" class="btn btn-success" data-dismiss="modal">Yes</button>
+            </div>
+        </div>
+    </div>
+</div>
+
+<div id="manualSearchModalQuality" class="modal fade">
+    <div class="modal-dialog">
+        <div class="modal-content">
+            <div class="modal-header">
+                <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+                <h4 class="modal-title">Manual Search</h4>
+            </div>
+            <div class="modal-body">
+                <p>Do you want to include the current episode quality in the search?</p>
+                <p class="text-warning"><small>Choosing No will ignore any releases with the same episode quality as the one currently downloaded/snatched.</small></p>
+            </div>
+            <div class="modal-footer">
+                <button type="button" class="btn btn-danger" data-dismiss="modal">No</button>
+                <button type="button" class="btn btn-success" data-dismiss="modal">Yes</button>
+            </div>
+        </div>
+    </div>
+</div>
+
+<!--End - Bootstrap Modal-->
+
+#include $os.path.join($sickbeard.PROG_DIR,"gui/slick/interfaces/default/inc_bottom.tmpl")
diff --git a/gui/slick/interfaces/default/editShow.tmpl b/gui/slick/interfaces/default/editShow.tmpl
index 75ecfd07e9851ae85c1c80fc42adeacb23c49942..2f24180004b5663e5d24ca95da6edbca6a708c13 100644
--- a/gui/slick/interfaces/default/editShow.tmpl
+++ b/gui/slick/interfaces/default/editShow.tmpl
@@ -81,10 +81,17 @@
 <select name="indexerLang" id="indexerLangSelect" class="form-control form-control-inline input-sm bfh-languages" data-language="en" data-available="#echo ','.join($sickbeard.indexerApi().config['valid_languages'])#"></select><br />
 <br />
 
-<b>Flatten files (no folders):</b> <input type="checkbox" name="flatten_folders" #if $show.flatten_folders == 1 and not $sickbeard.NAMING_FORCE_FOLDERS then "checked=\"checked\"" else ""# #if $sickbeard.NAMING_FORCE_FOLDERS then "disabled=\"disabled\"" else ""#/><br />
-<b>Paused:</b> <input type="checkbox" name="paused" #if $show.paused == 1 then "checked=\"checked\"" else ""# /><br />
-<b>Subtitles:</b> <input type="checkbox" name="subtitles"#if $show.subtitles == 1 and $sickbeard.USE_SUBTITLES then " checked=\"checked\"" else ""##if not $sickbeard.USE_SUBTITLES then " disabled=\"disabled\"" else ""#/><br />
-<br/>
+<b>Flatten files (no folders): </b> <input type="checkbox" name="flatten_folders" #if $show.flatten_folders == 1 and not $sickbeard.NAMING_FORCE_FOLDERS then "checked=\"checked\"" else ""# #if $sickbeard.NAMING_FORCE_FOLDERS then "disabled=\"disabled\"" else ""#/><br />
+(Disabled: episodes folder-grouped by season. Enabled: no season folders)<br/>
+<br />
+
+<b>Paused: </b> <input type="checkbox" name="paused" #if $show.paused == 1 then "checked=\"checked\"" else ""# /><br />
+(check this if you wish to pause this show. Will not download anything until unpause)<br/>
+<br />
+
+<b>Subtitles: </b> <input type="checkbox" name="subtitles"#if $show.subtitles == 1 and $sickbeard.USE_SUBTITLES then " checked=\"checked\"" else ""##if not $sickbeard.USE_SUBTITLES then " disabled=\"disabled\"" else ""#/><br />
+(check this if you wish to search for subtitles in this show)<br/>
+<br />
 
 <b>Scene Numbering: </b>
 <input type="checkbox" name="scene" #if $show.scene == 1 then "checked=\"checked\"" else ""# /><br/>
diff --git a/gui/slick/interfaces/default/home.tmpl b/gui/slick/interfaces/default/home.tmpl
index 5a406c25be462d8babd382a15e61d510f1faf16b..9d3cf6f9e4a8ccffcc5d947b8aea42be8699fff9 100644
--- a/gui/slick/interfaces/default/home.tmpl
+++ b/gui/slick/interfaces/default/home.tmpl
@@ -29,7 +29,8 @@
 #set $sql_statement += ' AND ((airdate <= ' + $today + ' AND (status = ' + str($SKIPPED) + ' OR status = ' + str($WANTED) + ' OR status = ' + str($FAILED) + ')) '
 #set $sql_statement += ' OR (status IN ' + status_quality + ') OR (status IN ' + status_download + '))) AS ep_total, '
 
-#set $sql_statement += ' (SELECT airdate FROM tv_episodes WHERE showid=tv_eps.showid AND airdate >= ' + $today + ' AND (status = ' + str($UNAIRED) + ' OR status = ' + str($WANTED) + ') ORDER BY airdate ASC LIMIT 1) AS ep_airs_next '
+#set $sql_statement += ' (SELECT airdate FROM tv_episodes WHERE showid=tv_eps.showid AND airdate >= ' + $today + ' AND (status = ' + str($UNAIRED) + ' OR status = ' + str($WANTED) + ') ORDER BY airdate ASC LIMIT 1) AS ep_airs_next, '
+#set $sql_statement += ' (SELECT airdate FROM tv_episodes WHERE showid=tv_eps.showid AND airdate <> 1 AND status <> ' + str($UNAIRED) + ' ORDER BY airdate DESC LIMIT 1) AS ep_airs_prev '
 #set $sql_statement += ' FROM tv_episodes tv_eps GROUP BY showid'
 
 #set $sql_result = $myDB.select($sql_statement)
@@ -86,49 +87,57 @@
     });
 
     \$("#showListTableShows:has(tbody tr)").tablesorter({
-        sortList: [[5,1],[1,0]],
+        sortList: [[6,1],[2,0]],
         textExtraction: {
 			0: function(node) { return \$(node).find("span").text().toLowerCase(); },
-			3: function(node) { return \$(node).find("span").text().toLowerCase(); },
-			4: function(node) { return \$(node).find("span").text(); },
-			5: function(node) { return \$(node).find("img").attr("alt"); }
+			1: function(node) { return \$(node).find("span").text().toLowerCase(); },
+			3: function(node) { return \$(node).find("span").prop("title").toLowerCase(); },
+			4: function(node) { return \$(node).find("span").text().toLowerCase(); },
+			5: function(node) { return \$(node).find("span").text(); },
+			6: function(node) { return \$(node).find("img").attr("alt"); }
         },
-        widgets: ['saveSort', 'zebra', 'stickyHeaders', 'filter'],
+        widgets: ['saveSort', 'zebra', 'stickyHeaders', 'filter', 'columnSelector'],
         headers: {
 			0: { sorter: 'isoDate' },
-			1: { sorter: 'loadingNames' },
-			3: { sorter: 'quality' },
-			4: { sorter: 'eps' }
+			1: { columnSelector: false },
+			2: { sorter: 'loadingNames' },
+			4: { sorter: 'quality' },
+			5: { sorter: 'eps' }
         },
 		widgetOptions : {
             filter_columnFilters: false,
-			filter_reset: '.resetshows'
+			filter_reset: '.resetshows',
+			columnSelector_mediaquery: false,
 		},
         sortStable: true,
-        sortAppend: [[1,0]]
+        sortAppend: [[2,0]]
     });
 	
 	\$("#showListTableAnime:has(tbody tr)").tablesorter({
-        sortList: [[5,1],[1,0]],
+        sortList: [[6,1],[2,0]],
         textExtraction: {
-			0: function(node) { return \$(node).find("span").text().toLowerCase(); },
-			3: function(node) { return \$(node).find("span").text().toLowerCase(); },
-			4: function(node) { return \$(node).find("span").text(); },
-			5: function(node) { return \$(node).find("img").attr("alt"); }
+            0: function(node) { return \$(node).find("span").text().toLowerCase(); },
+            1: function(node) { return \$(node).find("span").text().toLowerCase(); },
+            3: function(node) { return \$(node).find("span").prop("title").toLowerCase(); },
+            4: function(node) { return \$(node).find("span").text().toLowerCase(); },
+            5: function(node) { return \$(node).find("span").text(); },
+            6: function(node) { return \$(node).find("img").attr("alt"); }
         },
-        widgets: ['saveSort', 'zebra', 'stickyHeaders', 'filter'],
+        widgets: ['saveSort', 'zebra', 'stickyHeaders', 'filter', 'columnSelector'],
         headers: {
 			0: { sorter: 'isoDate' },
-			1: { sorter: 'loadingNames' },
-			3: { sorter: 'quality' },
-			4: { sorter: 'eps' }
+			1: { columnSelector: false },
+			2: { sorter: 'loadingNames' },
+			4: { sorter: 'quality' },
+			5: { sorter: 'eps' }
         },
 		widgetOptions : {
             filter_columnFilters: false,
-			filter_reset: '.resetanime'
+			filter_reset: '.resetanime',
+			columnSelector_mediaquery: false,
 		},
         sortStable: true,
-        sortAppend: [[1,0]]
+        sortAppend: [[2,0]]
     });
 
 	if (\$("#showListTableShows").find("tbody").find("tr").size() > 0)
@@ -160,7 +169,7 @@
             sortAscending: $sickbeard.POSTER_SORTDIR,
 			layoutMode: 'masonry',
 			masonry: {
-				columnWidth: 12,
+				columnWidth: 13,
 				isFitWidth: true
 			},
 			getSortData: {
@@ -200,6 +209,21 @@
         \$.get(this.options[this.selectedIndex].getAttribute('data-sort'));
 	});
 	
+	\$('#popover')
+	    .popover({
+	      placement: 'bottom',
+	      html: true, // required if content has HTML
+	      content: '<div id="popover-target"></div>'
+	    })
+	    // bootstrap popover event triggered when the popover opens
+	    .on('shown.bs.popover', function () {
+	      // call this function to copy the column selection code into the popover
+	      \$.tablesorter.columnSelector.attachTo( \$('#showListTableShows'), '#popover-target');
+	      #if $sickbeard.ANIME_SPLIT_HOME:
+	    	\$.tablesorter.columnSelector.attachTo( \$('#showListTableAnime'), '#popover-target');
+	      #end if
+	    });
+	
 });
 
 //-->
@@ -212,6 +236,9 @@
 #end if
 
 <div id="HomeLayout" class="pull-right" style="margin-top: -40px;">
+    #if $layout != 'poster':
+        <button id="popover" type="button" class="btn btn-inline">Select Column</button>
+    #end if
 	<span> Layout: 
 		<select name="layout" class="form-control form-control-inline input-sm" onchange="location = this.options[this.selectedIndex].value;">
 			<option value="$sbRoot/setHomeLayout/?layout=poster" #if $sickbeard.HOME_LAYOUT == "poster" then "selected=\"selected\"" else ""#>Poster</option>
@@ -221,7 +248,8 @@
 		</select>
 		#if $layout != 'poster':
         Search:
-			<input class="search form-control form-control-inline input-sm input200" type="search" data-column="1" placeholder="Search Show Name"> <button type="button" class="resetshows resetanime btn btn-inline">Reset Search</button>
+			<input class="search form-control form-control-inline input-sm input200" type="search" data-column="1" placeholder="Search Show Name">
+			<button type="button" class="resetshows resetanime btn btn-inline">Reset Search</button>
 		#end if
 	</span>
 	
@@ -413,12 +441,12 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
 				<td class="show-table">
 				    #if $layout != 'simple':	
 						#if $curShow.network:
-							<img class="show-network-image" src="$sbRoot/images/network/${curShow.network.replace(u"\u00C9",'e').replace(u"\u00E9",'e').lower()}.png" alt="$curShow.network" title="$curShow.network" />	
+							<span title="$curShow.network"><img class="show-network-image" src="$sbRoot/images/network/${curShow.network.replace(u"\u00C9",'e').replace(u"\u00E9",'e').lower()}.png" alt="$curShow.network" title="$curShow.network" /></span>	
 						#else:
-							<img class="show-network-image" src="$sbRoot/images/network/nonetwork.png" alt="No Network" title="No Network" />
+							<span title="No Network"><img class="show-network-image" src="$sbRoot/images/network/nonetwork.png" alt="No Network" title="No Network" /></span>
 						#end if
 					#else:
-						$curShow.network
+						<span title="$curShow.network">$curShow.network</span>
 					#end if	
 				</td>
 				
@@ -447,6 +475,7 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
     <thead>
 		<tr>
 			<th class="nowrap">Next Ep</th>
+			<th class="nowrap">Prev Ep</th>
 			<th>Show</th>
 			<th>Network</th>
 			<th>Quality</th>
@@ -459,12 +488,19 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
     <tfoot>
         <tr>
 			<th rowspan="1" colspan="1" align="center"><a href="$sbRoot/home/addShows/">Add Show</a></th>
-			<th rowspan="1" colspan="6"></th>
+			<th>&nbsp;</th>
+			<th>&nbsp;</th>
+			<th>&nbsp;</th>
+			<th>&nbsp;</th>
+			<th>&nbsp;</th>
+			<th>&nbsp;</th>
+			<th>&nbsp;</th>
         </tr>
     </tfoot>
     
-	<tbody>
 
+#if $sickbeard.showQueueScheduler.action.loadingShowList
+    <tbody class="tablesorter-infoOnly">
 #for $curLoadingShow in $sickbeard.showQueueScheduler.action.loadingShowList:
 
   #if $curLoadingShow.show != None and $curLoadingShow.show in $sickbeard.showList:
@@ -476,7 +512,7 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
     <td></td>
     <td>
     #if $curLoadingShow.show == None:
-    Loading... ($curLoadingShow.show_name)
+    <span title="">Loading... ($curLoadingShow.show_name)</span>
     #else:
     <a href="displayShow?show=$curLoadingShow.show.indexerid">$curLoadingShow.show.name</a>
     #end if
@@ -487,11 +523,16 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
     <td></td>
   </tr>
 #end for
+    </tbody>
+#end if
+
+    <tbody>
 
 $myShowList.sort(lambda x, y: cmp(x.name, y.name))
 #for $curShow in $myShowList:
 
     #set $cur_airs_next = ''
+    #set $cur_airs_prev = ''
     #set $cur_snatched = 0
     #set $cur_downloaded = 0
     #set $cur_total = 0
@@ -499,6 +540,8 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
 
     #if $curShow.indexerid in $show_stat:
         #set $cur_airs_next = $show_stat[$curShow.indexerid]['ep_airs_next']
+        #set $cur_airs_prev = $show_stat[$curShow.indexerid]['ep_airs_prev']
+        
 
         #set $cur_snatched = $show_stat[$curShow.indexerid]['ep_snatched']
         #if not $cur_snatched:
@@ -552,7 +595,20 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
     #else:
     	<td align="center" class="nowrap"></td>
     #end if
-	
+
+    #if $cur_airs_prev
+    #set $pdatetime = $sbdatetime.sbdatetime.convert_to_setting($network_timezones.parse_date_time($cur_airs_prev,$curShow.airs,$curShow.network))
+        <td align="center" class="nowrap"><div class="${fuzzydate}">
+        #try
+          $sbdatetime.sbdatetime.sbfdate($pdatetime)
+        #except ValueError
+          Invalid date
+        #end try
+        </div><span class="sort_data">$calendar.timegm($pdatetime.timetuple())</span></td>
+    #else:
+        <td align="center" class="nowrap"></td>
+    #end if
+
 	#if $layout == 'small':
 	    <td class="tvShow">
 	    	<div class="imgsmallposter $layout">
@@ -577,14 +633,14 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
     #if $layout != 'simple':	
 		<td align="center">
         #if $curShow.network:
-        	<img id="network" width="54" height="27" src="$sbRoot/images/network/${curShow.network.replace(u"\u00C9",'e').replace(u"\u00E9",'e').lower()}.png" alt="$curShow.network" title="$curShow.network" />
+        	<span title="$curShow.network"><img id="network" width="54" height="27" src="$sbRoot/images/network/${curShow.network.replace(u"\u00C9",'e').replace(u"\u00E9",'e').lower()}.png" alt="$curShow.network" title="$curShow.network" /></span>
     	#else:
-    		<img id="network" width="54" height="27" src="$sbRoot/images/network/nonetwork.png" alt="No Network" title="No Network" />
+    		<span title="No Network"><img id="network" width="54" height="27" src="$sbRoot/images/network/nonetwork.png" alt="No Network" title="No Network" /></span>
 		#end if
 		</td>
 	#else:
 		<td>
-			$curShow.network
+			<span title="$curShow.network">$curShow.network</span>
 		</td>
 	#end if		
 
diff --git a/gui/slick/interfaces/default/home_newShow.tmpl b/gui/slick/interfaces/default/home_newShow.tmpl
index 651d7a0133fa08feb86b7b0badf4678d5d012f6f..4c1b435832197f38d29088ce3d6d931a60c212bb 100644
--- a/gui/slick/interfaces/default/home_newShow.tmpl
+++ b/gui/slick/interfaces/default/home_newShow.tmpl
@@ -70,7 +70,7 @@
 				<br /><br />
 				<b>*</b> This will only affect the language of the retrieved metadata file contents and episode filenames.<br />
 				This <b>DOES NOT</b> allow SickRage to download non-english TV episodes!<br />
-				<b>**</b> The indexer implementation doesn't currently support specials.<br />
+				<b>** IMPORTANT: </b> TVRAGE indexer implementation doesn't currently support <b>specials</b> and <b>banners/posters</b>.<br />
 				<br />
 				<div id="searchResults" style="height: 100%;"><br/></div>
 			#end if
diff --git a/gui/slick/interfaces/default/home_postprocess.tmpl b/gui/slick/interfaces/default/home_postprocess.tmpl
index 69fa1068b1f26fb977f8246d58cb4741b46d59e5..404e8c7773beb8d2ab66c8fbb9e3c613d26ff0b1 100644
--- a/gui/slick/interfaces/default/home_postprocess.tmpl
+++ b/gui/slick/interfaces/default/home_postprocess.tmpl
@@ -61,6 +61,15 @@
 				<span style="line-height: 0; font-size: 12px;"><i>&nbsp;(Check it to replace the file even if it exists at higher quality)</i></span>
 			</td>
 		</tr>
+		<tr>
+			<td>
+				<b>Delete files and folders:</b>
+			</td>
+			<td>
+				<input id="delete_on" name="delete_on" type="checkbox">
+				<span style="line-height: 0; font-size: 12px;"><i>&nbsp;(Check it to delete files and folders like auto processing)</i></span>
+			</td>
+		</tr>
 		#if $sickbeard.USE_FAILED_DOWNLOADS:
 		<tr>
 			<td>
diff --git a/gui/slick/interfaces/default/inc_bottom.tmpl b/gui/slick/interfaces/default/inc_bottom.tmpl
index 5c9dfd5730a199c7301dd63e2acccbea13651c26..edc6064962f2e94aee098542bcd601086b59037c 100644
--- a/gui/slick/interfaces/default/inc_bottom.tmpl
+++ b/gui/slick/interfaces/default/inc_bottom.tmpl
@@ -10,20 +10,20 @@
 	<div class="footer clearfix">
 		#set $myDB = $db.DBConnection()
 		#set $today = str($datetime.date.today().toordinal())
-		#set status_quality = '(' + ','.join([str(quality) for quality in $Quality.SNATCHED + $Quality.SNATCHED_PROPER]) + ')'
-		#set status_download = '(' + ','.join([str(quality) for quality in $Quality.DOWNLOADED + [$ARCHIVED]]) + ')'
-
-		#set $sql_statement = 'SELECT '
-
-		#set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 AND status IN ' + $status_quality + ') AS ep_snatched, '
-		#set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 AND status IN ' + $status_download + ') AS ep_downloaded, '
-
-		#set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 '
-		#set $sql_statement += ' AND ((airdate <= ' + $today + ' AND (status = ' + str($SKIPPED) + ' OR status = ' + str($WANTED) + ')) '
-		#set $sql_statement += ' OR (status IN ' + status_quality + ') OR (status IN ' + status_download + '))) AS ep_total '
-
-		#set $sql_statement += ' FROM tv_episodes tv_eps LIMIT 1'
-	
+        #set status_quality = '(%s)' % ','.join([str(quality) for quality in $Quality.SNATCHED + $Quality.SNATCHED_PROPER])
+        #set status_download = '(%s)' % ','.join([str(quality) for quality in $Quality.DOWNLOADED + [$ARCHIVED]])
+        
+        #set $sql_statement = 'SELECT '\
+            + '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 AND status IN %s) AS ep_snatched, '\
+            % $status_quality\
+            + '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 AND status IN %s) AS ep_downloaded, '\
+            % $status_download\
+            + '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 '\
+            + ' AND ((airdate <= %s AND (status = %s OR status = %s)) '\
+            % ($today, str($SKIPPED), str($WANTED))\
+            + ' OR (status IN %s) OR (status IN %s))) AS ep_total FROM tv_episodes tv_eps LIMIT 1'\
+            % ($status_quality, $status_download)
+    
 		#set $sql_result = $myDB.select($sql_statement)
 
 		#set $shows_total = len($sickbeard.showList)
@@ -38,8 +38,34 @@
 			#set $ep_downloaded = 0
 			#set $ep_total = 0
 		#end if
+        #set $ep_percentage = '' if $ep_total == 0 else '(<span class="footerhighlight">%s%%</span>)' % re.sub(r'(\d+)(\.\d)\d+', r'\1\2', str((float($ep_downloaded)/float($ep_total))*100))
 		
-		<div class="footerhighlight">$shows_total</div> Shows (<div class="footerhighlight">$shows_active</div> Active) | <div class="footerhighlight"><%=ep_downloaded%></div>#if $ep_snatched > 0 then " (" + "<div class=\"footerhighlight\">+" + str($ep_snatched) + "</div> snatched)" else ""# / <div class="footerhighlight">$ep_total</div> Episodes Downloaded | Daily Search: <div class="footerhighlight"><%=str(sickbeard.dailySearchScheduler.timeLeft()).split('.')[0]%></div> | Backlog Search: <div class="footerhighlight">$sbdatetime.sbdatetime.sbfdate($sickbeard.backlogSearchScheduler.nextRun())</div>	
+        #try
+            #set $localRoot = $sbRoot
+        #except NotFound
+            #set $localRoot = ''
+        #end try
+        #try
+            #set $localheader = $header
+        #except NotFound
+            #set $localheader = ''
+        #end try        
+            
+		<span class="footerhighlight">$shows_total</span> Shows (<span class="footerhighlight">$shows_active</span> Active) 
+        | <span class="footerhighlight">$ep_downloaded</span>
+                <%= (
+                        '',
+                        ' (<span class="footerhighlight">+%s</span> Snatched)'\
+                        % (
+                            str(ep_snatched),
+                            '<a href="%s/manage/episodeStatuses?whichStatus=2" title="View overview of snatched episodes">%s</a>'\
+                            % (localRoot, str(ep_snatched))
+                        )['Episode Overview' != localheader]
+                    )[0 < ep_snatched]
+                %>
+                &nbsp;/&nbsp;<span class="footerhighlight">$ep_total</span> Episodes Downloaded $ep_percentage       
+        | Daily Search: <span class="footerhighlight"><%=str(sickbeard.dailySearchScheduler.timeLeft()).split('.')[0]%></span> 
+        | Backlog Search: <span class="footerhighlight">$sbdatetime.sbdatetime.sbfdate($sickbeard.backlogSearchScheduler.nextRun())</span>
 
 	</div>
 		<!-- 
diff --git a/gui/slick/interfaces/default/inc_top.tmpl b/gui/slick/interfaces/default/inc_top.tmpl
index 07950ecedc8361cf458c944d8f56b24842035257..f4370bdc3ab64283abbe0e37ccd0046ac3fec0a9 100644
--- a/gui/slick/interfaces/default/inc_top.tmpl
+++ b/gui/slick/interfaces/default/inc_top.tmpl
@@ -54,6 +54,7 @@
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery.selectboxes.min.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery.tablesorter-2.17.7.min.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery.tablesorter.widgets-2.17.7.min.js?$sbPID"></script>
+		<script type="text/javascript" src="$sbRoot/js/lib/jquery.tablesorter.widget-columnSelector-2.17.7.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery.qtip-2.2.1.min.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/pnotify.custom.min.js"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery.form-3.35.js?$sbPID"></script>
diff --git a/gui/slick/interfaces/default/manage.tmpl b/gui/slick/interfaces/default/manage.tmpl
index d32b4f948cab239bfafae0ce86222c5b3f2c0227..d3fd0a5dc486e3add760aa114bd9d7de5c268404 100644
--- a/gui/slick/interfaces/default/manage.tmpl
+++ b/gui/slick/interfaces/default/manage.tmpl
@@ -46,7 +46,8 @@
             4: function(node) { return \$(node).find("img").attr("alt"); },
 			5: function(node) { return \$(node).find("img").attr("alt"); },
 			6: function(node) { return \$(node).find("img").attr("alt"); },
-			7: function(node) { return \$(node).find("img").attr("alt"); }
+			7: function(node) { return \$(node).find("img").attr("alt"); },
+			8: function(node) { return \$(node).find("img").attr("alt"); },
         },
         widgets: ['zebra'],
         headers: {
@@ -58,15 +59,16 @@
             5: { sorter: 'anime'},              
             6: { sorter: 'flatfold'},
             7: { sorter: 'paused'},
-            8: { sorter: 'default_ep_status'},
-            9: { sorter: 'status'},
-           10: { sorter: false},
+            8: { sorter: 'subtitle'},
+            9: { sorter: 'default_ep_status'},
+           10: { sorter: 'status'},
            11: { sorter: false},
            12: { sorter: false},
            13: { sorter: false},
-		   14: { sorter: false},
+           14: { sorter: false},
 		   15: { sorter: false},
-		   16: { sorter: false}
+		   16: { sorter: false},
+		   17: { sorter: false}
         }
     }); 
 });
@@ -91,13 +93,14 @@
 			<th class="col-legend">Anime</th>
 			<th class="col-legend">Flat Folders</th>
 			<th class="col-legend">Paused</th>
+			<th class="col-legend">Subtitle</th>
 			<th class="col-legend">Default Ep<br>Status</th>
 			<th class="col-legend">Status</th>
 			<th width="1%">Update<br/><input type="checkbox" class="bulkCheck" id="updateCheck" /></th>
 			<th width="1%">Rescan<br/><input type="checkbox" class="bulkCheck" id="refreshCheck" /></th>
 			<th width="1%">Rename<br/><input type="checkbox" class="bulkCheck" id="renameCheck" /></th>
 		#if $sickbeard.USE_SUBTITLES:
-			<th width="1%">Subtitle<br/><input type="checkbox" class="bulkCheck" id="subtitleCheck" /></th>
+			<th width="1%">Search Subtitle<br/><input type="checkbox" class="bulkCheck" id="subtitleCheck" /></th>
 		#end if
 			<!-- <th>Force Metadata Regen <input type="checkbox" class="bulkCheck" id="metadataCheck" /></th>//-->
 			<th width="1%">Delete<br/><input type="checkbox" class="bulkCheck" id="deleteCheck" /></th>
@@ -108,7 +111,7 @@
 	<tfoot>
 		<tr>
 			<td rowspan="1" colspan="2" class="align-center alt"><input class="btn pull-left" type="button" value="Edit Selected" id="submitMassEdit" /></td>
-			<td rowspan="1" colspan="#if $sickbeard.USE_SUBTITLES then 13 else 12#" class="align-right alt"><input class="btn pull-right" type="button" value="Submit" id="submitMassUpdate" /></td>
+			<td rowspan="1" colspan="#if $sickbeard.USE_SUBTITLES then 15 else 14#" class="align-right alt"><input class="btn pull-right" type="button" value="Submit" id="submitMassUpdate" /></td>
 		</tr>
 	</tfoot>
 	
@@ -174,6 +177,7 @@
 			<td align="center"><img src="$sbRoot/images/#if int($curShow.is_anime) == 1 then "yes16.png\" alt=\"Y\"" else "no16.png\" alt=\"N\""# width="16" height="16" /></td>
 			<td align="center"><img src="$sbRoot/images/#if int($curShow.flatten_folders) == 1 then "yes16.png\" alt=\"Y\"" else "no16.png\" alt=\"N\""# width="16" height="16" /></td>
 			<td align="center"><img src="$sbRoot/images/#if int($curShow.paused) == 1 then "yes16.png\" alt=\"Y\"" else "no16.png\" alt=\"N\""# width="16" height="16" /></td>
+			<td align="center"><img src="$sbRoot/images/#if int($curShow.subtitles) == 1 then "yes16.png\" alt=\"Y\"" else "no16.png\" alt=\"N\""# width="16" height="16" /></td>
 			<td align="center">$statusStrings[$curShow.default_ep_status]</td>
 			<td align="center">$curShow.status</td>
 			<td align="center">$curUpdate</td>
diff --git a/gui/slick/interfaces/default/manage_manageSearches.tmpl b/gui/slick/interfaces/default/manage_manageSearches.tmpl
index f8d0e8b375997b8a72edc192e88d76f14a4fc43c..033be41d9500387417d723fd9bb2688455f84955 100644
--- a/gui/slick/interfaces/default/manage_manageSearches.tmpl
+++ b/gui/slick/interfaces/default/manage_manageSearches.tmpl
@@ -39,7 +39,7 @@ In Progress<br />
 <br />
 
 <h3>Find Propers Search:</h3>
-<a class="btn" href="$sbRoot/manage/manageSearches/forceFindPropers"><i class="icon-exclamation-sign"></i> Force</a>
+<a class="#if not $sickbeard.DOWNLOAD_PROPERS then 'btn disabled' else 'btn' #" href="$sbRoot/manage/manageSearches/forceFindPropers"><i class="icon-exclamation-sign"></i> Force</a>
 #if not $findPropersStatus:
 Not in progress<br />
 #else:
diff --git a/gui/slick/interfaces/default/trendingShows.tmpl b/gui/slick/interfaces/default/trendingShows.tmpl
index 894928377eabeb8482d5702ae9a562337b33b353..1eb9ae88cfcba5310bf5c16ce6dedbbfe5f5d754 100644
--- a/gui/slick/interfaces/default/trendingShows.tmpl
+++ b/gui/slick/interfaces/default/trendingShows.tmpl
@@ -92,10 +92,13 @@
             <i>$cur_show['show']['votes'] votes</i>
             <div class="traktShowTitleIcons">
                 <a href="$sbRoot/home/addShows/addTraktShow?indexer_id=${cur_show['show']['ids']['tvdb'] or cur_show['show']['ids']['tvrage']}&amp;showName=${cur_show['show']['title']}" class="btn btn-xs">Add Show</a>
+#if $blacklist
+                <a href="$sbRoot/home/addShows/addShowToBlacklist?indexer_id=${cur_show['show']['ids']['tvdb'] or cur_show['show']['ids']['tvrage']}" class="btn btn-xs">Remove Show</a>
+#end if
             </div>
         </div>
         </div>
     </div>
 #end for
 #end if
-</div>
\ No newline at end of file
+</div>
diff --git a/gui/slick/js/ajaxEpSearch.js b/gui/slick/js/ajaxEpSearch.js
index 6aa2db662e092a45cbcc21384360421bfa5e0c00..2434b77dfa328f4d012a6d9e82f92f058f4593ce 100644
--- a/gui/slick/js/ajaxEpSearch.js
+++ b/gui/slick/js/ajaxEpSearch.js
@@ -1,184 +1,264 @@
-var search_status_url = sbRoot + '/home/getManualSearchStatus';
-PNotify.prototype.options.maxonscreen = 5;
-
-$.fn.manualSearches = [];
-
-function check_manual_searches() {
-    var poll_interval = 5000;
-    showId = $('#showID').val()
-    if ( showId !== undefined) {
-        $.ajax({
-            url: search_status_url + '?show=' + showId,
-            success: function (data) {
-                if (data.episodes) {
-                	poll_interval = 5000;
-                }
-                else {
-                	poll_interval = 15000;
-                }
-            	
-                updateImages(data);
-                //cleanupManualSearches(data);
-            },
-            error: function () {
-                poll_interval = 30000;
-            },
-            type: "GET",
-            dataType: "json",
-            complete: function () {
-                setTimeout(check_manual_searches, poll_interval);
-            },
-            timeout: 15000 // timeout every 15 secs
-        });
+var search_status_url = sbRoot + '/home/getManualSearchStatus';
+var failedDownload = false
+var qualityDownload = false
+var selectedEpisode = ""
+PNotify.prototype.options.maxonscreen = 5;
+
+$.fn.manualSearches = [];
+
+function check_manual_searches() {
+    var poll_interval = 5000;
+    showId = $('#showID').val()
+    var url = ""
+    if ( showId !== undefined) {
+    	var url = search_status_url + '?show=' + showId;
+    } else {
+    	var url = search_status_url;
     }
-}
-
-
-function updateImages(data) {
-	$.each(data.episodes, function (name, ep) {
-		// Get td element for current ep
-		var loadingImage = 'loading16.gif';
-        var queuedImage = 'queued.png';
-        var searchImage = 'search16.png';
-        var status = null;
-        //Try to get the <a> Element
-        el=$('a[id=' + ep.season + 'x' + ep.episode+']');
-        img=el.children('img');
-        parent=el.parent();        
-        if (el) {
-        	if (ep.searchstatus == 'searching') {
-				//el=$('td#' + ep.season + 'x' + ep.episode + '.search img');
-				img.prop('title','Searching');
-				img.prop('alt','Searching');
-				img.prop('src',sbRoot+'/images/' + loadingImage);
-				disableLink(el);
-				// Update Status and Quality
-				var rSearchTerm = /(\w+)\s\((.+?)\)/;
-	            HtmlContent = ep.searchstatus;
-	            
-        	}
-        	else if (ep.searchstatus == 'queued') {
-				//el=$('td#' + ep.season + 'x' + ep.episode + '.search img');
-				img.prop('title','Queued');
-				img.prop('alt','queued');
-				img.prop('src',sbRoot+'/images/' + queuedImage );
-				disableLink(el);
-				HtmlContent = ep.searchstatus;
-			}
-        	else if (ep.searchstatus == 'finished') {
-				//el=$('td#' + ep.season + 'x' + ep.episode + '.search img');
-				img.prop('title','Searching');
-				img.prop('alt','searching');
-				img.parent().prop('class','epRetry');
-				img.prop('src',sbRoot+'/images/' + searchImage);
-				enableLink(el);
-				
-				// Update Status and Quality
-				var rSearchTerm = /(\w+)\s\((.+?)\)/;
-	            HtmlContent = ep.status.replace(rSearchTerm,"$1"+' <span class="quality '+ep.quality+'">'+"$2"+'</span>');
-	            parent.closest('tr').prop("class", ep.overview + " season-" + ep.season + " seasonstyle")
-		        
-			}
-        	// update the status column if it exists
-	        parent.siblings('.col-status').html(HtmlContent)
-        	
-        }
-		
-	});
-}
-
-$(document).ready(function () {
-
-	check_manual_searches();
-
-});
-
-function enableLink(el) {
-	el.on('click.disabled', false);
-	el.prop('enableClick', '1');
-	el.fadeTo("fast", 1)
-}
-
-function disableLink(el) {
-	el.off('click.disabled');
-	el.prop('enableClick', '0');
-	el.fadeTo("fast", .5)
-}
-
-(function(){
-
-	$.ajaxEpSearch = {
-	    defaults: {
-	        size:				16,
-	        colorRow:         	false,
-	        loadingImage:		'loading16.gif',
-	        queuedImage:		'queued.png',
-	        noImage:			'no16.png',
-	        yesImage:			'yes16.png'
-	    }
-	};
-
-	$.fn.ajaxEpSearch = function(options){
-		options = $.extend({}, $.ajaxEpSearch.defaults, options);
-		
-	    $('.epSearch, .epRetry').click(function(event){
-	    	event.preventDefault();
-	        
-	    	// Check if we have disabled the click
-	    	if ( $(this).prop('enableClick') == '0' ) {
-	    		return false;
-	    	}
-	    	
-	    	if ( $(this).prop('class') == "epRetry" ) {
-	    		if ( !confirm("Mark download as bad and retry?") )
-	                return false;
-	    	};
-	    	
-	    	var parent = $(this).parent();
-	        
-	    	// Create var for anchor
-	    	link = $(this);
-	    	
-	    	// Create var for img under anchor and set options for the loading gif
-	        img=$(this).children('img');
-	        img.prop('title','loading');
-			img.prop('alt','');
-			img.prop('src',sbRoot+'/images/' + options.loadingImage);
-			
-	        
-	        $.getJSON($(this).prop('href'), function(data){
-	            
-	        	// if they failed then just put the red X
-	            if (data.result == 'failure') {
-	                img_name = options.noImage;
-	                img_result = 'failed';
-
-	            // if the snatch was successful then apply the corresponding class and fill in the row appropriately
-	            } else {
-	                img_name = options.loadingImage;
-	                img_result = 'success';
-	                // color the row
-	                if (options.colorRow)
-	                	parent.parent().removeClass('skipped wanted qual good unaired').addClass('snatched');
-	                // applying the quality class
-                    var rSearchTerm = /(\w+)\s\((.+?)\)/;
-	                    HtmlContent = data.result.replace(rSearchTerm,"$1"+' <span class="quality '+data.quality+'">'+"$2"+'</span>');
-	                // update the status column if it exists
-                    parent.siblings('.col-status').html(HtmlContent)
-                    // Only if the queuing was successful, disable the onClick event of the loading image
-                    disableLink(link);
-	            }
-
-	            // put the corresponding image as the result of queuing of the manual search
-	            img.prop('title',img_result);
-				img.prop('alt',img_result);
-				img.prop('height', options.size);
-				img.prop('src',sbRoot+"/images/"+img_name);
-	        });
-	        // 
-	        
-	        // don't follow the link
-	        return false;
-	    });
-	}
-})();
+    
+    $.ajax({
+        url: url,
+        success: function (data) {
+            if (data.episodes) {
+            	poll_interval = 5000;
+            }
+            else {
+            	poll_interval = 15000;
+            }
+        	
+            updateImages(data);
+            //cleanupManualSearches(data);
+        },
+        error: function () {
+            poll_interval = 30000;
+        },
+        type: "GET",
+        dataType: "json",
+        complete: function () {
+            setTimeout(check_manual_searches, poll_interval);
+        },
+        timeout: 15000 // timeout every 15 secs
+    });
+}
+
+
+function updateImages(data) {
+	$.each(data.episodes, function (name, ep) {
+		// Get td element for current ep
+		var loadingImage = 'loading16.gif';
+        var queuedImage = 'queued.png';
+        var searchImage = 'search16.png';
+        var status = null;
+        //Try to get the <a> Element
+        el=$('a[id=' + ep.show + 'x' + ep.season + 'x' + ep.episode+']');
+        img=el.children('img');
+        parent=el.parent();        
+        if (el) {
+        	if (ep.searchstatus == 'searching') {
+				//el=$('td#' + ep.season + 'x' + ep.episode + '.search img');
+				img.prop('title','Searching');
+				img.prop('alt','Searching');
+				img.prop('src',sbRoot+'/images/' + loadingImage);
+				disableLink(el);
+				// Update Status and Quality
+				var rSearchTerm = /(\w+)\s\((.+?)\)/;
+	            HtmlContent = ep.searchstatus;
+	            
+        	}
+        	else if (ep.searchstatus == 'queued') {
+				//el=$('td#' + ep.season + 'x' + ep.episode + '.search img');
+				img.prop('title','Queued');
+				img.prop('alt','queued');
+				img.prop('src',sbRoot+'/images/' + queuedImage );
+				disableLink(el);
+				HtmlContent = ep.searchstatus;
+			}
+        	else if (ep.searchstatus == 'finished') {
+				//el=$('td#' + ep.season + 'x' + ep.episode + '.search img');
+				img.prop('title','Searching');
+				img.prop('alt','searching');
+				img.parent().prop('class','epRetry');
+				img.prop('src',sbRoot+'/images/' + searchImage);
+				enableLink(el);
+				
+				// Update Status and Quality
+				var rSearchTerm = /(\w+)\s\((.+?)\)/;
+	            HtmlContent = ep.status.replace(rSearchTerm,"$1"+' <span class="quality '+ep.quality+'">'+"$2"+'</span>');
+	            parent.closest('tr').prop("class", ep.overview + " season-" + ep.season + " seasonstyle")
+		        
+			}
+        	// update the status column if it exists
+	        parent.siblings('.col-status').html(HtmlContent)
+        	
+        }
+        el_comEps=$('a[id=forceUpdate-' + ep.show + 'x' + ep.season + 'x' + ep.episode+']');
+        img_comEps=el_comEps.children('img');
+        if (el_comEps) {
+        	if (ep.searchstatus == 'searching') {
+        		img_comEps.prop('title','Searching');
+        		img_comEps.prop('alt','Searching');
+        		img_comEps.prop('src',sbRoot+'/images/' + loadingImage);
+        		disableLink(el_comEps);
+        	} else if (ep.searchstatus == 'queued') {
+        		img_comEps.prop('title','Queued');
+        		img_comEps.prop('alt','queued');
+        		img_comEps.prop('src',sbRoot+'/images/' + queuedImage );
+        	} else if (ep.searchstatus == 'finished') {
+        		img_comEps.prop('title','Manual Search');
+        		img_comEps.prop('alt','[search]');
+        		img_comEps.prop('src',sbRoot+'/images/' + searchImage);
+        		if (ep.overview == 'snatched') {
+        			el_comEps.closest('tr').remove();
+        		} else {
+        			enableLink(el_comEps);
+        		}
+        	}
+        }
+	});
+}
+
+$(document).ready(function () {
+
+	check_manual_searches();
+
+});
+
+function enableLink(el) {
+	el.on('click.disabled', false);
+	el.prop('enableClick', '1');
+	el.fadeTo("fast", 1)
+}
+
+function disableLink(el) {
+	el.off('click.disabled');
+	el.prop('enableClick', '0');
+	el.fadeTo("fast", .5)
+}
+
+(function(){
+
+	$.ajaxEpSearch = {
+	    defaults: {
+	        size:				16,
+	        colorRow:         	false,
+	        loadingImage:		'loading16.gif',
+	        queuedImage:		'queued.png',
+	        noImage:			'no16.png',
+	        yesImage:			'yes16.png'
+	    }
+	};
+
+	$.fn.ajaxEpSearch = function(options){
+		options = $.extend({}, $.ajaxEpSearch.defaults, options);
+		
+		$('.epRetry').click(function(event){
+	    	event.preventDefault();
+			
+			// Check if we have disabled the click
+	    	if ( $(this).prop('enableClick') == '0' ) {
+	    		return false;
+	    	};
+			
+			selectedEpisode = $(this)
+			
+			$("#manualSearchModalFailed").modal('show');
+		});
+		
+		$('.epSearch').click(function(event){
+	    	event.preventDefault();
+			
+			// Check if we have disabled the click
+	    	if ( $(this).prop('enableClick') == '0' ) {
+	    		return false;
+	    	};
+			
+			selectedEpisode = $(this);
+			
+			if ($(this).parent().parent().children(".col-status").children(".quality").length) {
+				$("#manualSearchModalQuality").modal('show');
+			} else {
+				manualSearch();
+			}
+		});
+		
+		$('#manualSearchModalFailed .btn').click(function(){
+			val=$(this).text();
+			if(val=='Yes'){
+				failedDownload = true;
+			} else {
+				failedDownload = false;
+			}
+			$("#manualSearchModalQuality").modal('show');
+		});
+		
+		$('#manualSearchModalQuality .btn').click(function(){
+			val=$(this).text();
+			if(val=='Yes'){
+				qualityDownload = true;
+			} else {
+				qualityDownload = false;
+			}
+			manualSearch();
+		});
+		
+		function manualSearch(){
+			var parent = selectedEpisode.parent();
+	        
+	    	// Create var for anchor
+	    	link = selectedEpisode;
+	    	
+	    	// Create var for img under anchor and set options for the loading gif
+	        img=selectedEpisode.children('img');
+	        img.prop('title','loading');
+			img.prop('alt','');
+			img.prop('src',sbRoot+'/images/' + options.loadingImage);
+			
+			var url = selectedEpisode.prop('href');
+			
+			if (failedDownload === false) {
+				url = url.replace("retryEpisode", "searchEpisode"); 
+			}
+			
+			if (qualityDownload === true) {
+				url = url + "&downCurQuality=1";
+			} else {
+				url = url + "&downCurQuality=0";
+			}
+			
+	        $.getJSON(url, function(data){
+	            
+	        	// if they failed then just put the red X
+	            if (data.result == 'failure') {
+	                img_name = options.noImage;
+	                img_result = 'failed';
+
+	            // if the snatch was successful then apply the corresponding class and fill in the row appropriately
+	            } else {
+	                img_name = options.loadingImage;
+	                img_result = 'success';
+	                // color the row
+	                if (options.colorRow)
+	                	parent.parent().removeClass('skipped wanted qual good unaired').addClass('snatched');
+	                // applying the quality class
+                    var rSearchTerm = /(\w+)\s\((.+?)\)/;
+	                    HtmlContent = data.result.replace(rSearchTerm,"$1"+' <span class="quality '+data.quality+'">'+"$2"+'</span>');
+	                // update the status column if it exists
+                    parent.siblings('.col-status').html(HtmlContent)
+                    // Only if the queuing was successful, disable the onClick event of the loading image
+                    disableLink(link);
+	            }
+
+	            // put the corresponding image as the result of queuing of the manual search
+	            img.prop('title',img_result);
+				img.prop('alt',img_result);
+				img.prop('height', options.size);
+				img.prop('src',sbRoot+"/images/"+img_name);
+	        });
+	        // 
+	        
+	        // don't follow the link
+	        return false;
+		};
+		
+	};
+})();
diff --git a/gui/slick/js/lib/jquery.tablesorter.widget-columnSelector-2.17.7.js b/gui/slick/js/lib/jquery.tablesorter.widget-columnSelector-2.17.7.js
new file mode 100644
index 0000000000000000000000000000000000000000..22d641b83eeda0c3453995426da262916872ec58
--- /dev/null
+++ b/gui/slick/js/lib/jquery.tablesorter.widget-columnSelector-2.17.7.js
@@ -0,0 +1,317 @@
+/* Column Selector/Responsive table widget (beta) for TableSorter 5/22/2014 (v2.17.0)
+ * Requires tablesorter v2.8+ and jQuery 1.7+
+ * by Justin Hallett & Rob Garrison
+ */
+/*jshint browser:true, jquery:true, unused:false */
+/*global jQuery: false */
+;(function($){
+"use strict";
+
+var ts = $.tablesorter,
+namespace = '.tscolsel',
+tsColSel = ts.columnSelector = {
+
+	queryAll   : '@media only all { [columns] { display: none; } }',
+	queryBreak : '@media all and (min-width: [size]) { [columns] { display: table-cell; } }',
+
+	init: function(table, c, wo) {
+		var $t, colSel;
+
+		// abort if no input is contained within the layout
+		$t = $(wo.columnSelector_layout);
+		if (!$t.find('input').add( $t.filter('input') ).length) {
+			if (c.debug) {
+				ts.log('*** ERROR: Column Selector aborting, no input found in the layout! ***');
+			}
+			return;
+		}
+
+		// unique table class name
+		c.tableId = 'tablesorter' + new Date().getTime();
+		c.$table.addClass( c.tableId );
+
+		// build column selector/state array
+		colSel = c.selector = { $container : $(wo.columnSelector_container || '<div>') };
+		colSel.$style = $('<style></style>').prop('disabled', true).appendTo('head');
+		colSel.$breakpoints = $('<style></style>').prop('disabled', true).appendTo('head');
+
+		colSel.isInitializing = true;
+		tsColSel.setupSelector(table, c, wo);
+
+		if (wo.columnSelector_mediaquery) {
+			tsColSel.setupBreakpoints(c, wo);
+		}
+
+		colSel.isInitializing = false;
+		if (colSel.$container.length) {
+			tsColSel.updateCols(c, wo);
+		}
+
+		c.$table
+			.off('refreshColumnSelector' + namespace)
+			.on('refreshColumnSelector' + namespace, function(){
+				// make sure we're using current config settings
+				var c = this.config;
+				tsColSel.updateBreakpoints(c, c.widgetOptions);
+				tsColSel.updateCols(c, c.widgetOptions);
+			});
+
+	},
+
+	setupSelector: function(table, c, wo) {
+		var name,
+			colSel = c.selector,
+			$container = colSel.$container,
+			useStorage = wo.columnSelector_saveColumns && ts.storage,
+			// get stored column states
+			saved = useStorage ? ts.storage( table, 'tablesorter-columnSelector' ) : [],
+			state = useStorage ? ts.storage( table, 'tablesorter-columnSelector-auto') : {};
+
+		// initial states
+		colSel.auto = $.isEmptyObject(state) || $.type(state.auto) !== "boolean" ? wo.columnSelector_mediaqueryState : state.auto;
+		colSel.states = [];
+		colSel.$column = [];
+		colSel.$wrapper = [];
+		colSel.$checkbox = [];
+		// populate the selector container
+		c.$table.children('thead').find('tr:first th', table).each(function() {
+			var $this = $(this),
+				// if no data-priority is assigned, default to 1, but don't remove it from the selector list
+				priority = $this.attr(wo.columnSelector_priority) || 1,
+				colId = $this.attr('data-column'),
+				state = ts.getData(this, c.headers[colId], 'columnSelector');
+
+
+			// if this column not hidable at all
+			// include getData check (includes "columnSelector-false" class, data attribute, etc)
+			if ( isNaN(priority) && priority.length > 0 || state === 'disable' ||
+				( wo.columnSelector_columns[colId] && wo.columnSelector_columns[colId] === 'disable') ) {
+				return true; // goto next
+			}
+
+			// set default state; storage takes priority
+			colSel.states[colId] = saved && typeof(saved[colId]) !== 'undefined' ?
+				saved[colId] : typeof(wo.columnSelector_columns[colId]) !== 'undefined' ?
+				wo.columnSelector_columns[colId] : (state === 'true' || !(state === 'false'));
+			colSel.$column[colId] = $(this);
+
+			// set default col title
+			name = $this.attr(wo.columnSelector_name) || $this.text();
+
+			if ($container.length) {
+				colSel.$wrapper[colId] = $(wo.columnSelector_layout.replace(/\{name\}/g, name)).appendTo($container);
+				colSel.$checkbox[colId] = colSel.$wrapper[colId]
+					// input may not be wrapped within the layout template
+					.find('input').add( colSel.$wrapper[colId].filter('input') )
+					.attr('data-column', colId)
+					.prop('checked', colSel.states[colId])
+					.on('change', function(){
+						colSel.states[colId] = this.checked;
+						tsColSel.updateCols(c, wo);
+					}).change();
+			}
+		});
+
+	},
+
+	setupBreakpoints: function(c, wo){
+		var colSel = c.selector;
+
+		// add responsive breakpoints
+		if (wo.columnSelector_mediaquery) {
+			// used by window resize function
+			colSel.lastIndex = -1;
+			wo.columnSelector_breakpoints.sort();
+			tsColSel.updateBreakpoints(c, wo);
+			c.$table
+				.off('updateAll' + namespace)
+				.on('updateAll' + namespace, function(){
+					tsColSel.updateBreakpoints(c, wo);
+					tsColSel.updateCols(c, wo);
+				});
+		}
+
+		if (colSel.$container.length) {
+			// Add media queries toggle
+			if (wo.columnSelector_mediaquery) {
+				colSel.$auto = $( wo.columnSelector_layout.replace(/\{name\}/g, wo.columnSelector_mediaqueryName) ).prependTo(colSel.$container);
+				colSel.$auto
+					// needed in case the input in the layout is not wrapped
+					.find('input').add( colSel.$auto.filter('input') )
+					.attr('data-column', 'auto')
+					.prop('checked', colSel.auto)
+					.on('change', function(){
+						colSel.auto = this.checked;
+						$.each( colSel.$checkbox, function(i, $cb){
+							if ($cb) {
+								$cb[0].disabled = colSel.auto;
+								colSel.$wrapper[i].toggleClass('disabled', colSel.auto);
+							}
+						});
+						if (wo.columnSelector_mediaquery) {
+							tsColSel.updateBreakpoints(c, wo);
+						}
+						tsColSel.updateCols(c, wo);
+						// copy the column selector to a popup/tooltip
+						if (c.selector.$popup) {
+							c.selector.$popup.find('.tablesorter-column-selector')
+								.html( colSel.$container.html() )
+								.find('input').each(function(){
+									var indx = $(this).attr('data-column');
+									$(this).prop( 'checked', indx === 'auto' ? colSel.auto : colSel.states[indx] );
+								});
+						}
+						if (wo.columnSelector_saveColumns && ts.storage) {
+							ts.storage( c.$table[0], 'tablesorter-columnSelector-auto', { auto : colSel.auto } );
+						}
+					}).change();
+			}
+			// Add a bind on update to re-run col setup
+			c.$table.off('update' + namespace).on('update' + namespace, function() {
+				tsColSel.updateCols(c, wo);
+			});
+		}
+	},
+
+	updateBreakpoints: function(c, wo) {
+		var priority, column, breaks,
+			colSel = c.selector,
+			prefix = '.' + c.tableId,
+			mediaAll = [],
+			breakpts = '';
+		if (wo.columnSelector_mediaquery && !colSel.auto) {
+			colSel.$breakpoints.prop('disabled', true);
+			colSel.$style.prop('disabled', false);
+			return;
+		}
+
+		// only 6 breakpoints (same as jQuery Mobile)
+		for (priority = 0; priority < 6; priority++){
+			/*jshint loopfunc:true */
+			breaks = [];
+			c.$headers.filter('[' + wo.columnSelector_priority + '=' + (priority + 1) + ']').each(function(){
+				column = parseInt($(this).attr('data-column'), 10) + 1;
+				breaks.push(prefix + ' tr th:nth-child(' + column + ')');
+				breaks.push(prefix + ' tr td:nth-child(' + column + ')');
+			});
+			if (breaks.length) {
+				mediaAll = mediaAll.concat( breaks );
+				breakpts += tsColSel.queryBreak
+					.replace(/\[size\]/g, wo.columnSelector_breakpoints[priority])
+					.replace(/\[columns\]/g, breaks.join(','));
+			}
+		}
+		if (colSel.$style) {
+			colSel.$style.prop('disabled', true);
+		}
+		colSel.$breakpoints
+			.prop('disabled', false)
+			.html( tsColSel.queryAll.replace(/\[columns\]/g, mediaAll.join(',')) + breakpts );
+	},
+
+	updateCols: function(c, wo) {
+		if (wo.columnSelector_mediaquery && c.selector.auto || c.selector.isInitializing) {
+			return;
+		}
+		var column,
+			colSel = c.selector,
+			styles = [],
+			prefix = '.' + c.tableId;
+		colSel.$container.find('input[data-column]').filter('[data-column!="auto"]').each(function(){
+			if (!this.checked) {
+				column = parseInt( $(this).attr('data-column'), 10 ) + 1;
+				styles.push(prefix + ' tr th:nth-child(' + column + ')');
+				styles.push(prefix + ' tr td:nth-child(' + column + ')');
+			}
+		});
+		if (wo.columnSelector_mediaquery){
+			colSel.$breakpoints.prop('disabled', true);
+		}
+		if (colSel.$style) {
+			colSel.$style.prop('disabled', false).html( styles.length ? styles.join(',') + ' { display: none; }' : '' );
+		}
+		if (wo.columnSelector_saveColumns && ts.storage) {
+			ts.storage( c.$table[0], 'tablesorter-columnSelector', colSel.states );
+		}
+	},
+
+	attachTo : function(table, elm) {
+		table = $(table)[0];
+		var colSel, wo, indx,
+			c = table.config,
+			$popup = $(elm);
+		if ($popup.length && c) {
+			if (!$popup.find('.tablesorter-column-selector').length) {
+				// add a wrapper to add the selector into, in case the popup has other content
+				$popup.append('<span class="tablesorter-column-selector"></span>');
+			}
+			colSel = c.selector;
+			wo = c.widgetOptions;
+			$popup.find('.tablesorter-column-selector')
+				.html( colSel.$container.html() )
+				.find('input').each(function(){
+					var indx = $(this).attr('data-column');
+					$(this).prop( 'checked', indx === 'auto' ? colSel.auto : colSel.states[indx] );
+				});
+			colSel.$popup = $popup.on('change', 'input', function(){
+				// data input
+				indx = $(this).attr('data-column');
+				// update original popup
+				colSel.$container.find('input[data-column="' + indx + '"]')
+					.prop('checked', this.checked)
+					.trigger('change');
+			});
+		}
+	}
+
+};
+
+ts.addWidget({
+	id: "columnSelector",
+	priority: 10,
+	options: {
+		// target the column selector markup
+		columnSelector_container : null,
+		// column status, true = display, false = hide
+		// disable = do not display on list
+		columnSelector_columns : {},
+		// remember selected columns
+		columnSelector_saveColumns: true,
+
+		// container layout
+		columnSelector_layout : '<label><input type="checkbox">{name}</label>',
+		// data attribute containing column name to use in the selector container
+		columnSelector_name  : 'data-selector-name',
+
+		/* Responsive Media Query settings */
+		// enable/disable mediaquery breakpoints
+		columnSelector_mediaquery: true,
+		// toggle checkbox name
+		columnSelector_mediaqueryName: 'Auto: ',
+		// breakpoints checkbox initial setting
+		columnSelector_mediaqueryState: true,
+		// responsive table hides columns with priority 1-6 at these breakpoints
+		// see http://view.jquerymobile.com/1.3.2/dist/demos/widgets/table-column-toggle/#Applyingapresetbreakpoint
+		// *** set to false to disable ***
+		columnSelector_breakpoints : [ '20em', '30em', '40em', '50em', '60em', '70em' ],
+		// data attribute containing column priority
+		// duplicates how jQuery mobile uses priorities:
+		// http://view.jquerymobile.com/1.3.2/dist/demos/widgets/table-column-toggle/
+		columnSelector_priority : 'data-priority'
+
+	},
+	init: function(table, thisWidget, c, wo) {
+		tsColSel.init(table, c, wo);
+	},
+	remove: function(table, c){
+		var csel = c.selector;
+		csel.$container.empty();
+		if (csel.$popup) { csel.$popup.empty(); }
+		csel.$style.remove();
+		csel.$breakpoints.remove();
+		c.$table.off('updateAll' + namespace + ' update' + namespace);
+	}
+
+});
+
+})(jQuery);
diff --git a/gui/slick/js/recommendedShows.js b/gui/slick/js/recommendedShows.js
index 9f7165b343182e89c9663034ae49c6da40f9fdf4..c1d1e28f93f529dc398ad26a3ad3d57a7da14482 100644
--- a/gui/slick/js/recommendedShows.js
+++ b/gui/slick/js/recommendedShows.js
@@ -10,37 +10,39 @@ $(document).ready(function () {
                 resultStr += '<b>No recommended shows found, update your watched shows list on trakt.tv.</b>';
             } else {
                 $.each(data.results, function (index, obj) {
-                    if (firstResult) {
-                        checked = ' checked';
-                        firstResult = false;
-                    } else {
-                        checked = '';
-                    }
-
-                    var whichSeries = obj.join('|');
-
-                    resultStr += '<input type="radio" id="whichSeries" name="whichSeries" value="' + whichSeries + '"' + checked + ' /> ';
-                    resultStr += '<a href="' + anonURL + obj[1] + '" onclick="window.open(this.href, \'_blank\'); return false;"><b>' + obj[2] + '</b></a>';
-
-                    if (obj[4] !== null) {
-                        var startDate = new Date(obj[4]);
-                        var today = new Date();
-                        if (startDate > today) {
-                            resultStr += ' (will debut on ' + obj[4] + ')';
+                    if (obj[2] !== null) {
+                        if (firstResult) {
+                            checked = ' checked';
+                            firstResult = false;
                         } else {
-                            resultStr += ' (started on ' + obj[4] + ')';
+                            checked = '';
                         }
+                        
+                        var whichSeries = obj.join('|');
+                        
+                        resultStr += '<input type="radio" id="whichSeries" name="whichSeries" value="' + whichSeries + '"' + checked + ' /> ';
+                        resultStr += '<a href="' + anonURL + obj[1] + '" onclick="window.open(this.href, \'_blank\'); return false;"><b>' + obj[2] + '</b></a>';
+                        
+                        if (obj[4] !== null) {
+                            var startDate = new Date(obj[4]);
+                            var today = new Date();
+                            if (startDate > today) {
+                                resultStr += ' (will debut on ' + obj[4] + ')';
+                            } else {
+                                resultStr += ' (started on ' + obj[4] + ')';
+                            }
+                        }
+                        
+                        if (obj[0] !== null) {
+                            resultStr += ' [' + obj[0] + ']';
+                        }
+                        
+                        if (obj[3] !== null) {
+                            resultStr += '<br />' + obj[3];
+                        }
+                        
+                        resultStr += '<p /><br />';
                     }
-
-                    if (obj[0] !== null) {
-                        resultStr += ' [' + obj[0] + ']';
-                    }
-
-                    if (obj[3] !== null) {
-                        resultStr += '<br />' + obj[3];
-                    }
-
-                    resultStr += '<p /><br />';
                 });
                 resultStr += '</ul>';
             }
diff --git a/init.upstart b/init.upstart
new file mode 100755
index 0000000000000000000000000000000000000000..07e02588da19aa5bbedde5bba7fdcfadc1ba4d9c
--- /dev/null
+++ b/init.upstart
@@ -0,0 +1,48 @@
+# SickBeard
+#
+# Configuration Notes
+#
+#    - Adjust setuid and setgid to the user/group you want Sickbeard to run as.
+#    - For all other settings edit /etc/default/sickbeard instead of this file.
+#      The following settings can be set in /etc/default/sickbeard and are used to run Sickbeard.
+#       SB_HOME=         #$APP_PATH, the location of SickBeard.py, the default is /opt/sickbeard
+#       SB_DATA=         #$DATA_DIR, the location of sickbeard.db, cache, logs, the default is /opt/sickbeard
+#       PYTHON_BIN=      #$DAEMON, the location of the python binary, the default is /usr/bin/python
+#       SB_OPTS=         #$EXTRA_DAEMON_OPTS, extra cli option for sickbeard, i.e. " --config=/home/sickbeard/config.ini"
+
+description "SickBeard Daemon"
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+# give time to shutdown
+kill timeout 30
+
+setuid sickbeard
+setgid sickbeard
+
+respawn
+respawn limit 10 5
+
+script
+    if [ -f /etc/default/sickbeard ]; then
+        . /etc/default/sickbeard
+    else
+        echo "/etc/default/sickbeard not found using default settings.";
+    fi
+    # Path to app SB_HOME=path_to_app_SickBeard.py
+    APP_PATH=${SB_HOME-/opt/sickbeard}
+
+    # Data directory where sickbeard.db, cache and logs are stored
+    DATA_DIR=${SB_DATA-/opt/sickbeard}
+
+    # path to python bin
+    DAEMON=${PYTHON_BIN-/usr/bin/python}
+
+    # Extra daemon option like: SB_OPTS=" --config=/home/sickbeard/config.ini"
+    EXTRA_DAEMON_OPTS=${SB_OPTS-}
+
+    DAEMON_OPTS=" SickBeard.py -q --daemon --nolaunch --datadir=${DATA_DIR} ${EXTRA_DAEMON_OPTS}"
+    chdir $APP_PATH
+    exec $DAEMON $DAEMON_OPTS
+end script
diff --git a/lib/subliminal/videos.py b/lib/subliminal/videos.py
index fce64fc5ca53dca385abab0e6b7cfa193c21a275..b249cf69a13f215fa010ed47906b2128a897a3e6 100644
--- a/lib/subliminal/videos.py
+++ b/lib/subliminal/videos.py
@@ -1,303 +1,311 @@
-# -*- coding: utf-8 -*-
-# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
-#
-# This file is part of subliminal.
-#
-# subliminal is free software; you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation; either version 3 of the License, or
-# (at your option) any later version.
-#
-# subliminal is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with subliminal.  If not, see <http://www.gnu.org/licenses/>.
-from . import subtitles
-from .language import Language
-from .utils import to_unicode
-import enzyme.core
-import guessit
-import hashlib
-import logging
-import mimetypes
-import os
-import struct
-import sys
-
-from sickbeard import encodingKludge as ek
-import sickbeard
-
-
-__all__ = ['EXTENSIONS', 'MIMETYPES', 'Video', 'Episode', 'Movie', 'UnknownVideo',
-           'scan', 'hash_opensubtitles', 'hash_thesubdb']
-logger = logging.getLogger("subliminal")
-
-#: Video extensions
-EXTENSIONS = ['.avi', '.mkv', '.mpg', '.mp4', '.m4v', '.mov', '.ogm', '.ogv', '.wmv',
-              '.divx', '.asf']
-
-#: Video mimetypes
-MIMETYPES = ['video/mpeg', 'video/mp4', 'video/quicktime', 'video/x-ms-wmv', 'video/x-msvideo',
-             'video/x-flv', 'video/x-matroska', 'video/x-matroska-3d']
-
-
-class Video(object):
-    """Base class for videos
-
-    :param string path: path
-    :param guess: guessed informations
-    :type guess: :class:`~guessit.guess.Guess`
-    :param string imdbid: imdbid
-
-    """
-    def __init__(self, path, guess, imdbid=None):
-        self.guess = guess
-        self.imdbid = imdbid
-        self._path = None
-        self.hashes = {}
-
-        if isinstance(path, unicode):
-            path = path.encode('utf-8')
-
-        self.release = path
-
-        if os.path.exists(path):
-            self._path = path
-            self.size = os.path.getsize(self._path)
-            self._compute_hashes()
-
-    @classmethod
-    def from_path(cls, path):
-        """Create a :class:`Video` subclass guessing all informations from the given path
-
-        :param string path: path
-        :return: video object
-        :rtype: :class:`Episode` or :class:`Movie` or :class:`UnknownVideo`
-
-        """
-        guess = guessit.guess_file_info(path, 'autodetect')
-        result = None
-        if guess['type'] == 'episode' and 'series' in guess and 'season' in guess and 'episodeNumber' in guess:
-            title = None
-            if 'title' in guess:
-                title = guess['title']
-            result = Episode(path, guess['series'], guess['season'], guess['episodeNumber'], title, guess)
-        if guess['type'] == 'movie' and 'title' in guess:
-            year = None
-            if 'year' in guess:
-                year = guess['year']
-            result = Movie(path, guess['title'], year, guess)
-        if not result:
-            result = UnknownVideo(path, guess)
-        if not isinstance(result, cls):
-            raise ValueError('Video is not of requested type')
-        return result
-
-    @property
-    def exists(self):
-        """Whether the video exists or not"""
-        if self._path:
-            return os.path.exists(self._path)
-        return False
-
-    @property
-    def path(self):
-        """Path to the video"""
-        return self._path
-
-    @path.setter
-    def path(self, value):
-        if not os.path.exists(value):
-            raise ValueError('Path does not exists')
-        self._path = value
-        self.size = os.path.getsize(self._path)
-        self._compute_hashes()
-
-    def _compute_hashes(self):
-        """Compute different hashes"""
-        self.hashes['OpenSubtitles'] = hash_opensubtitles(self.path)
-        self.hashes['TheSubDB'] = hash_thesubdb(self.path)
-
-    def scan(self):
-        """Scan and return associated subtitles
-
-        :return: associated subtitles
-        :rtype: list of :class:`~subliminal.subtitles.Subtitle`
-
-        """
-        if not self.exists:
-            return []
-        basepath = os.path.splitext(self.path)[0]
-        results = []
-        video_infos = None
-        try:
-            video_infos = enzyme.parse(self.path)
-            logger.debug(u'Succeeded parsing %s with enzyme: %r' % (self.path, video_infos)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-        except:
-            logger.debug(u'Failed parsing %s with enzyme' % self.path) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-        if isinstance(video_infos, enzyme.core.AVContainer):
-            results.extend([subtitles.EmbeddedSubtitle.from_enzyme(self.path, s) for s in video_infos.subtitles])
-        # cannot use glob here because it chokes if there are any square
-        # brackets inside the filename, so we have to use basic string
-        # startswith/endswith comparisons
-        folder, basename = os.path.split(basepath)
-        if folder == '':
-            folder = '.'
-        existing = [f for f in os.listdir(folder) if f.startswith(basename)]
-        if sickbeard.SUBTITLES_DIR:
-            subsDir = ek.ek(os.path.join, folder, sickbeard.SUBTITLES_DIR)
-            if ek.ek(os.path.isdir, subsDir):
-                existing.extend([f for f in os.listdir(subsDir) if f.startswith(basename)])
-        for path in existing:
-            for ext in subtitles.EXTENSIONS:
-                if path.endswith(ext):
-                    language = Language(path[len(basename) + 1:-len(ext)], strict=False)
-                    results.append(subtitles.ExternalSubtitle(path, language))
-        return results
-
-    def __unicode__(self):
-        return to_unicode(self.path or self.release)
-
-    def __str__(self):
-        return unicode(self).encode('utf-8')
-
-    def __repr__(self):
-        return '%s(%s)' % (self.__class__.__name__, self)
-
-    def __hash__(self):
-        return hash(self.path or self.release)
-
-
-class Episode(Video):
-    """Episode :class:`Video`
-
-    :param string path: path
-    :param string series: series
-    :param int season: season number
-    :param int episode: episode number
-    :param string title: title
-    :param guess: guessed informations
-    :type guess: :class:`~guessit.guess.Guess`
-    :param string tvdbid: tvdbid
-    :param string imdbid: imdbid
-
-    """
-    def __init__(self, path, series, season, episode, title=None, guess=None, tvdbid=None, imdbid=None):
-        super(Episode, self).__init__(path, guess, imdbid)
-        self.series = series
-        self.title = title
-        self.season = season
-        self.episode = episode
-        self.tvdbid = tvdbid
-
-
-class Movie(Video):
-    """Movie :class:`Video`
-
-    :param string path: path
-    :param string title: title
-    :param int year: year
-    :param guess: guessed informations
-    :type guess: :class:`~guessit.guess.Guess`
-    :param string imdbid: imdbid
-
-    """
-    def __init__(self, path, title, year=None, guess=None, imdbid=None):
-        super(Movie, self).__init__(path, guess, imdbid)
-        self.title = title
-        self.year = year
-
-
-class UnknownVideo(Video):
-    """Unknown video"""
-    pass
-
-
-def scan(entry, max_depth=3, scan_filter=None, depth=0):
-    """Scan a path for videos and subtitles
-
-    :param string entry: path
-    :param int max_depth: maximum folder depth
-    :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
-    :param int depth: starting depth
-    :return: found videos and subtitles
-    :rtype: list of (:class:`Video`, [:class:`~subliminal.subtitles.Subtitle`])
-
-    """
-
-    if isinstance(entry, unicode):
-        entry = entry.encode('utf-8')
-
-    if depth > max_depth and max_depth != 0:  # we do not want to search the whole file system except if max_depth = 0
-        return []
-    if os.path.isdir(entry):  # a dir? recurse
-        logger.debug(u'Scanning directory %s with depth %d/%d' % (entry, depth, max_depth)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-        result = []
-        for e in os.listdir(entry):
-            result.extend(scan(os.path.join(entry, e), max_depth, scan_filter, depth + 1))
-        return result
-    if os.path.isfile(entry) or depth == 0:
-        logger.debug(u'Scanning file %s with depth %d/%d' % (entry, depth, max_depth)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-        if depth != 0:  # trust the user: only check for valid format if recursing
-            if mimetypes.guess_type(entry)[0] not in MIMETYPES and os.path.splitext(entry)[1] not in EXTENSIONS:
-                return []
-            if scan_filter is not None and scan_filter(entry):
-                return []
-        video = Video.from_path(entry)
-        return [(video, video.scan())]
-    logger.warning(u'Scanning entry %s failed with depth %d/%d' % (entry, depth, max_depth)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-    return []  # anything else
-
-
-def hash_opensubtitles(path):
-    """Compute a hash using OpenSubtitles' algorithm
-
-    :param string path: path
-    :return: hash
-    :rtype: string
-
-    """
-    longlongformat = 'q'  # long long
-    bytesize = struct.calcsize(longlongformat)
-    with open(path, 'rb') as f:
-        filesize = os.path.getsize(path)
-        filehash = filesize
-        if filesize < 65536 * 2:
-            return None
-        for _ in range(65536 / bytesize):
-            filebuffer = f.read(bytesize)
-            (l_value,) = struct.unpack(longlongformat, filebuffer)
-            filehash += l_value
-            filehash = filehash & 0xFFFFFFFFFFFFFFFF  # to remain as 64bit number
-        f.seek(max(0, filesize - 65536), 0)
-        for _ in range(65536 / bytesize):
-            filebuffer = f.read(bytesize)
-            (l_value,) = struct.unpack(longlongformat, filebuffer)
-            filehash += l_value
-            filehash = filehash & 0xFFFFFFFFFFFFFFFF
-    returnedhash = '%016x' % filehash
-    logger.debug(u'Computed OpenSubtitle hash %s for %s' % (returnedhash, path)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-    return returnedhash
-
-
-def hash_thesubdb(path):
-    """Compute a hash using TheSubDB's algorithm
-
-    :param string path: path
-    :return: hash
-    :rtype: string
-
-    """
-    readsize = 64 * 1024
-    if os.path.getsize(path) < readsize:
-        return None
-    with open(path, 'rb') as f:
-        data = f.read(readsize)
-        f.seek(-readsize, os.SEEK_END)
-        data += f.read(readsize)
-    returnedhash = hashlib.md5(data).hexdigest()
-    logger.debug(u'Computed TheSubDB hash %s for %s' % (returnedhash, path)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-    return returnedhash
+# -*- coding: utf-8 -*-
+# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
+#
+# This file is part of subliminal.
+#
+# subliminal is free software; you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# subliminal is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with subliminal.  If not, see <http://www.gnu.org/licenses/>.
+from . import subtitles
+from .language import Language
+from .utils import to_unicode
+import enzyme.core
+import guessit
+import hashlib
+import logging
+import mimetypes
+import os
+import struct
+import sys
+
+from sickbeard import encodingKludge as ek
+import sickbeard
+
+
+__all__ = ['EXTENSIONS', 'MIMETYPES', 'Video', 'Episode', 'Movie', 'UnknownVideo',
+           'scan', 'hash_opensubtitles', 'hash_thesubdb']
+logger = logging.getLogger("subliminal")
+
+#: Video extensions
+EXTENSIONS = ['.avi', '.mkv', '.mpg', '.mp4', '.m4v', '.mov', '.ogm', '.ogv', '.wmv',
+              '.divx', '.asf']
+
+#: Video mimetypes
+MIMETYPES = ['video/mpeg', 'video/mp4', 'video/quicktime', 'video/x-ms-wmv', 'video/x-msvideo',
+             'video/x-flv', 'video/x-matroska', 'video/x-matroska-3d']
+
+
+class Video(object):
+    """Base class for videos
+
+    :param string path: path
+    :param guess: guessed informations
+    :type guess: :class:`~guessit.guess.Guess`
+    :param string imdbid: imdbid
+
+    """
+    def __init__(self, path, guess, imdbid=None):
+        self.guess = guess
+        self.imdbid = imdbid
+        self._path = None
+        self.hashes = {}
+
+        if sys.platform == 'win32':
+            if isinstance(path, str):
+                path = unicode(path.encode('utf-8'))
+        else:
+            if isinstance(path, unicode):
+                path = path.encode('utf-8')
+
+        self.release = path
+
+        if os.path.exists(path):
+            self._path = path
+            self.size = os.path.getsize(self._path)
+            self._compute_hashes()
+
+    @classmethod
+    def from_path(cls, path):
+        """Create a :class:`Video` subclass guessing all informations from the given path
+
+        :param string path: path
+        :return: video object
+        :rtype: :class:`Episode` or :class:`Movie` or :class:`UnknownVideo`
+
+        """
+        guess = guessit.guess_file_info(path, 'autodetect')
+        result = None
+        if guess['type'] == 'episode' and 'series' in guess and 'season' in guess and 'episodeNumber' in guess:
+            title = None
+            if 'title' in guess:
+                title = guess['title']
+            result = Episode(path, guess['series'], guess['season'], guess['episodeNumber'], title, guess)
+        if guess['type'] == 'movie' and 'title' in guess:
+            year = None
+            if 'year' in guess:
+                year = guess['year']
+            result = Movie(path, guess['title'], year, guess)
+        if not result:
+            result = UnknownVideo(path, guess)
+        if not isinstance(result, cls):
+            raise ValueError('Video is not of requested type')
+        return result
+
+    @property
+    def exists(self):
+        """Whether the video exists or not"""
+        if self._path:
+            return os.path.exists(self._path)
+        return False
+
+    @property
+    def path(self):
+        """Path to the video"""
+        return self._path
+
+    @path.setter
+    def path(self, value):
+        if not os.path.exists(value):
+            raise ValueError('Path does not exists')
+        self._path = value
+        self.size = os.path.getsize(self._path)
+        self._compute_hashes()
+
+    def _compute_hashes(self):
+        """Compute different hashes"""
+        self.hashes['OpenSubtitles'] = hash_opensubtitles(self.path)
+        self.hashes['TheSubDB'] = hash_thesubdb(self.path)
+
+    def scan(self):
+        """Scan and return associated subtitles
+
+        :return: associated subtitles
+        :rtype: list of :class:`~subliminal.subtitles.Subtitle`
+
+        """
+        if not self.exists:
+            return []
+        basepath = os.path.splitext(self.path)[0]
+        results = []
+        video_infos = None
+        try:
+            video_infos = enzyme.parse(self.path)
+            logger.debug(u'Succeeded parsing %s with enzyme: %r' % (self.path, video_infos)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+        except:
+            logger.debug(u'Failed parsing %s with enzyme' % self.path) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+        if isinstance(video_infos, enzyme.core.AVContainer):
+            results.extend([subtitles.EmbeddedSubtitle.from_enzyme(self.path, s) for s in video_infos.subtitles])
+        # cannot use glob here because it chokes if there are any square
+        # brackets inside the filename, so we have to use basic string
+        # startswith/endswith comparisons
+        folder, basename = os.path.split(basepath)
+        if folder == '':
+            folder = '.'
+        existing = [f for f in os.listdir(folder) if f.startswith(basename)]
+        if sickbeard.SUBTITLES_DIR:
+            subsDir = ek.ek(os.path.join, folder, sickbeard.SUBTITLES_DIR)
+            if ek.ek(os.path.isdir, subsDir):
+                existing.extend([f for f in os.listdir(subsDir) if f.startswith(basename)])
+        for path in existing:
+            for ext in subtitles.EXTENSIONS:
+                if path.endswith(ext):
+                    language = Language(path[len(basename) + 1:-len(ext)], strict=False)
+                    results.append(subtitles.ExternalSubtitle(path, language))
+        return results
+
+    def __unicode__(self):
+        return to_unicode(self.path or self.release)
+
+    def __str__(self):
+        return unicode(self).encode('utf-8')
+
+    def __repr__(self):
+        return '%s(%s)' % (self.__class__.__name__, self)
+
+    def __hash__(self):
+        return hash(self.path or self.release)
+
+
+class Episode(Video):
+    """Episode :class:`Video`
+
+    :param string path: path
+    :param string series: series
+    :param int season: season number
+    :param int episode: episode number
+    :param string title: title
+    :param guess: guessed informations
+    :type guess: :class:`~guessit.guess.Guess`
+    :param string tvdbid: tvdbid
+    :param string imdbid: imdbid
+
+    """
+    def __init__(self, path, series, season, episode, title=None, guess=None, tvdbid=None, imdbid=None):
+        super(Episode, self).__init__(path, guess, imdbid)
+        self.series = series
+        self.title = title
+        self.season = season
+        self.episode = episode
+        self.tvdbid = tvdbid
+
+
+class Movie(Video):
+    """Movie :class:`Video`
+
+    :param string path: path
+    :param string title: title
+    :param int year: year
+    :param guess: guessed informations
+    :type guess: :class:`~guessit.guess.Guess`
+    :param string imdbid: imdbid
+
+    """
+    def __init__(self, path, title, year=None, guess=None, imdbid=None):
+        super(Movie, self).__init__(path, guess, imdbid)
+        self.title = title
+        self.year = year
+
+
+class UnknownVideo(Video):
+    """Unknown video"""
+    pass
+
+
+def scan(entry, max_depth=3, scan_filter=None, depth=0):
+    """Scan a path for videos and subtitles
+
+    :param string entry: path
+    :param int max_depth: maximum folder depth
+    :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
+    :param int depth: starting depth
+    :return: found videos and subtitles
+    :rtype: list of (:class:`Video`, [:class:`~subliminal.subtitles.Subtitle`])
+
+    """
+
+    if sys.platform == 'win32':
+        if isinstance(entry, str):
+            entry = unicode(entry.encode('utf-8'))
+    else:
+        if isinstance(entry, unicode):
+            entry = entry.encode('utf-8')
+
+    if depth > max_depth and max_depth != 0:  # we do not want to search the whole file system except if max_depth = 0
+        return []
+    if os.path.isdir(entry):  # a dir? recurse
+        logger.debug(u'Scanning directory %s with depth %d/%d' % (entry, depth, max_depth)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+        result = []
+        for e in os.listdir(entry):
+            result.extend(scan(os.path.join(entry, e), max_depth, scan_filter, depth + 1))
+        return result
+    if os.path.isfile(entry) or depth == 0:
+        logger.debug(u'Scanning file %s with depth %d/%d' % (entry, depth, max_depth)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+        if depth != 0:  # trust the user: only check for valid format if recursing
+            if mimetypes.guess_type(entry)[0] not in MIMETYPES and os.path.splitext(entry)[1] not in EXTENSIONS:
+                return []
+            if scan_filter is not None and scan_filter(entry):
+                return []
+        video = Video.from_path(entry)
+        return [(video, video.scan())]
+    logger.warning(u'Scanning entry %s failed with depth %d/%d' % (entry, depth, max_depth)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+    return []  # anything else
+
+
+def hash_opensubtitles(path):
+    """Compute a hash using OpenSubtitles' algorithm
+
+    :param string path: path
+    :return: hash
+    :rtype: string
+
+    """
+    longlongformat = 'q'  # long long
+    bytesize = struct.calcsize(longlongformat)
+    with open(path, 'rb') as f:
+        filesize = os.path.getsize(path)
+        filehash = filesize
+        if filesize < 65536 * 2:
+            return None
+        for _ in range(65536 / bytesize):
+            filebuffer = f.read(bytesize)
+            (l_value,) = struct.unpack(longlongformat, filebuffer)
+            filehash += l_value
+            filehash = filehash & 0xFFFFFFFFFFFFFFFF  # to remain as 64bit number
+        f.seek(max(0, filesize - 65536), 0)
+        for _ in range(65536 / bytesize):
+            filebuffer = f.read(bytesize)
+            (l_value,) = struct.unpack(longlongformat, filebuffer)
+            filehash += l_value
+            filehash = filehash & 0xFFFFFFFFFFFFFFFF
+    returnedhash = '%016x' % filehash
+    logger.debug(u'Computed OpenSubtitle hash %s for %s' % (returnedhash, path)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+    return returnedhash
+
+
+def hash_thesubdb(path):
+    """Compute a hash using TheSubDB's algorithm
+
+    :param string path: path
+    :return: hash
+    :rtype: string
+
+    """
+    readsize = 64 * 1024
+    if os.path.getsize(path) < readsize:
+        return None
+    with open(path, 'rb') as f:
+        data = f.read(readsize)
+        f.seek(-readsize, os.SEEK_END)
+        data += f.read(readsize)
+    returnedhash = hashlib.md5(data).hexdigest()
+    logger.debug(u'Computed TheSubDB hash %s for %s' % (returnedhash, path)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+    return returnedhash
diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py
index b70069de6f8416f5885bbb0d2555820b80cd6b00..066a97c77a926ee65805f38247ec561297508e75 100755
--- a/sickbeard/__init__.py
+++ b/sickbeard/__init__.py
@@ -37,8 +37,8 @@ from github import Github
 from sickbeard import providers, metadata, config, webserveInit
 from sickbeard.providers.generic import GenericProvider
 from providers import ezrss, btn, newznab, womble, thepiratebay, oldpiratebay, torrentleech, kat, iptorrents, \
-    omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, hounddawgs, nextgen, speedcd, nyaatorrents, fanzub, torrentbytes, animezb, \
-    freshontv, bitsoup, t411, tokyotoshokan, shazbat, rarbg, alpharatio, tntvillage, binsearch
+    omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, hounddawgs, nextgen, speedcd, nyaatorrents, animenzb, torrentbytes, animezb, \
+    freshontv, morethantv, bitsoup, t411, tokyotoshokan, shazbat, rarbg, alpharatio, tntvillage, binsearch
 from sickbeard.config import CheckSection, check_setting_int, check_setting_str, check_setting_float, ConfigMigrator, \
     naming_ep_type
 from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
@@ -223,6 +223,7 @@ TORRENT_DIR = None
 DOWNLOAD_PROPERS = False
 CHECK_PROPERS_INTERVAL = None
 ALLOW_HIGH_PRIORITY = False
+SAB_FORCED = False
 RANDOMIZE_PROVIDERS = False
 
 AUTOPOSTPROCESSER_FREQUENCY = None
@@ -250,6 +251,7 @@ CREATE_MISSING_SHOW_DIRS = False
 RENAME_EPISODES = False
 AIRDATE_EPISODES = False
 PROCESS_AUTOMATICALLY = False
+NO_DELETE = False
 KEEP_PROCESSED_DIR = False
 PROCESS_METHOD = None
 DELRARCONTENTS = False
@@ -420,6 +422,7 @@ TRAKT_SYNC = False
 TRAKT_DEFAULT_INDEXER = None
 TRAKT_DISABLE_SSL_VERIFY = False
 TRAKT_TIMEOUT = 60
+TRAKT_BLACKLIST_NAME = ''
 
 USE_PYTIVO = False
 PYTIVO_NOTIFY_ONSNATCH = False
@@ -500,6 +503,7 @@ REQUIRE_WORDS = ""
 SYNC_FILES = "!sync,lftp-pget-status,part,bts"
 
 CALENDAR_UNPROTECTED = False
+NO_RESTART = False
 
 TMDB_API_KEY = 'edc5f123313769de83a71e157758030b'
 TRAKT_API_KEY = 'd4161a7a106424551add171e5470112e4afdaf2438e6ef2fe0548edc75924868'
@@ -517,13 +521,13 @@ def initialize(consoleLogging=True):
     with INIT_LOCK:
 
         global BRANCH, GIT_RESET, GIT_REMOTE, GIT_REMOTE_URL, CUR_COMMIT_HASH, CUR_COMMIT_BRANCH, ACTUAL_LOG_DIR, LOG_DIR, LOG_NR, LOG_SIZE, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, WEB_COOKIE_SECRET, API_KEY, API_ROOT, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
-            HANDLE_REVERSE_PROXY, USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, RANDOMIZE_PROVIDERS, CHECK_PROPERS_INTERVAL, ALLOW_HIGH_PRIORITY, TORRENT_METHOD, \
+            HANDLE_REVERSE_PROXY, USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, RANDOMIZE_PROVIDERS, CHECK_PROPERS_INTERVAL, ALLOW_HIGH_PRIORITY, SAB_FORCED, TORRENT_METHOD, \
             SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_CATEGORY_ANIME, SAB_HOST, \
             NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_CATEGORY_ANIME, NZBGET_PRIORITY, NZBGET_HOST, NZBGET_USE_HTTPS, backlogSearchScheduler, \
             TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_SEED_TIME, TORRENT_PAUSED, TORRENT_HIGH_BANDWIDTH, TORRENT_LABEL, TORRENT_LABEL_ANIME, TORRENT_VERIFY_CERT, TORRENT_RPCURL, TORRENT_AUTH_TYPE, \
             USE_KODI, KODI_ALWAYS_ON, KODI_NOTIFY_ONSNATCH, KODI_NOTIFY_ONDOWNLOAD, KODI_NOTIFY_ONSUBTITLEDOWNLOAD, KODI_UPDATE_FULL, KODI_UPDATE_ONLYFIRST, \
             KODI_UPDATE_LIBRARY, KODI_HOST, KODI_USERNAME, KODI_PASSWORD, BACKLOG_FREQUENCY, \
-            USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_REMOVE_WATCHLIST, TRAKT_SYNC_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_DISABLE_SSL_VERIFY, TRAKT_TIMEOUT, \
+            USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_REMOVE_WATCHLIST, TRAKT_SYNC_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_DISABLE_SSL_VERIFY, TRAKT_TIMEOUT, TRAKT_BLACKLIST_NAME, \
             USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_NOTIFY_ONSUBTITLEDOWNLOAD, PLEX_UPDATE_LIBRARY, \
             PLEX_SERVER_HOST, PLEX_SERVER_TOKEN, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, DEFAULT_BACKLOG_FREQUENCY, MIN_BACKLOG_FREQUENCY, BACKLOG_STARTUP, SKIP_REMOVED_FILES, \
             showUpdateScheduler, __INITIALIZED__, LAUNCH_BROWSER, UPDATE_SHOWS_ON_START, UPDATE_SHOWS_ON_SNATCH, TRASH_REMOVE_SHOW, TRASH_ROTATE_LOGS, SORT_ARTICLE, showList, loadingShowList, \
@@ -535,7 +539,7 @@ def initialize(consoleLogging=True):
             USE_NMA, NMA_NOTIFY_ONSNATCH, NMA_NOTIFY_ONDOWNLOAD, NMA_NOTIFY_ONSUBTITLEDOWNLOAD, NMA_API, NMA_PRIORITY, \
             USE_PUSHALOT, PUSHALOT_NOTIFY_ONSNATCH, PUSHALOT_NOTIFY_ONDOWNLOAD, PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHALOT_AUTHORIZATIONTOKEN, \
             USE_PUSHBULLET, PUSHBULLET_NOTIFY_ONSNATCH, PUSHBULLET_NOTIFY_ONDOWNLOAD, PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD, PUSHBULLET_API, PUSHBULLET_DEVICE, \
-            versionCheckScheduler, VERSION_NOTIFY, AUTO_UPDATE, NOTIFY_ON_UPDATE, PROCESS_AUTOMATICALLY, UNPACK, CPU_PRESET, \
+            versionCheckScheduler, VERSION_NOTIFY, AUTO_UPDATE, NOTIFY_ON_UPDATE, PROCESS_AUTOMATICALLY, NO_DELETE, UNPACK, CPU_PRESET, \
             KEEP_PROCESSED_DIR, PROCESS_METHOD, DELRARCONTENTS, TV_DOWNLOAD_DIR, MIN_DAILYSEARCH_FREQUENCY, DEFAULT_UPDATE_FREQUENCY, MIN_UPDATE_FREQUENCY, UPDATE_FREQUENCY, \
             showQueueScheduler, searchQueueScheduler, ROOT_DIRS, CACHE_DIR, ACTUAL_CACHE_DIR, TIMEZONE_DISPLAY, \
             NAMING_PATTERN, NAMING_MULTI_EP, NAMING_ANIME_MULTI_EP, NAMING_FORCE_FOLDERS, NAMING_ABD_PATTERN, NAMING_CUSTOM_ABD, NAMING_SPORTS_PATTERN, NAMING_CUSTOM_SPORTS, NAMING_ANIME_PATTERN, NAMING_CUSTOM_ANIME, NAMING_STRIP_YEAR, \
@@ -552,7 +556,7 @@ def initialize(consoleLogging=True):
             NEWZBIN, NEWZBIN_USERNAME, NEWZBIN_PASSWORD, GIT_PATH, MOVE_ASSOCIATED_FILES, SYNC_FILES, POSTPONE_IF_SYNC_FILES, dailySearchScheduler, NFO_RENAME, \
             GUI_NAME, HOME_LAYOUT, HISTORY_LAYOUT, DISPLAY_SHOW_SPECIALS, COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, COMING_EPS_MISSED_RANGE, DISPLAY_FILESIZE, FUZZY_DATING, TRIM_ZERO, DATE_PRESET, TIME_PRESET, TIME_PRESET_W_SECONDS, THEME_NAME, \
             POSTER_SORTBY, POSTER_SORTDIR, \
-            METADATA_WDTV, METADATA_TIVO, METADATA_MEDE8ER, IGNORE_WORDS, REQUIRE_WORDS, CALENDAR_UNPROTECTED, CREATE_MISSING_SHOW_DIRS, \
+            METADATA_WDTV, METADATA_TIVO, METADATA_MEDE8ER, IGNORE_WORDS, REQUIRE_WORDS, CALENDAR_UNPROTECTED, NO_RESTART, CREATE_MISSING_SHOW_DIRS, \
             ADD_SHOWS_WO_DIR, USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, SUBTITLES_MULTI, subtitlesFinderScheduler, \
             USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, TMDB_API_KEY, DEBUG, PROXY_SETTING, PROXY_INDEXERS, \
             AUTOPOSTPROCESSER_FREQUENCY, SHOWUPDATE_HOUR, DEFAULT_AUTOPOSTPROCESSER_FREQUENCY, MIN_AUTOPOSTPROCESSER_FREQUENCY, \
@@ -585,6 +589,9 @@ def initialize(consoleLogging=True):
         CheckSection(CFG, 'Pushbullet')
         CheckSection(CFG, 'Subtitles')
 
+        # Need to be before any passwords
+        ENCRYPTION_VERSION = check_setting_int(CFG, 'General', 'encryption_version', 0)
+
         GIT_AUTOISSUES = bool(check_setting_int(CFG, 'General', 'git_autoissues', 0))
 
         # git login info
@@ -701,7 +708,6 @@ def initialize(consoleLogging=True):
         WEB_IPV6 = bool(check_setting_int(CFG, 'General', 'web_ipv6', 0))
         WEB_ROOT = check_setting_str(CFG, 'General', 'web_root', '').rstrip("/")
         WEB_LOG = bool(check_setting_int(CFG, 'General', 'web_log', 0))
-        ENCRYPTION_VERSION = check_setting_int(CFG, 'General', 'encryption_version', 0)
         WEB_USERNAME = check_setting_str(CFG, 'General', 'web_username', '', censor_log=True)
         WEB_PASSWORD = check_setting_str(CFG, 'General', 'web_password', '', censor_log=True)
         WEB_COOKIE_SECRET = check_setting_str(CFG, 'General', 'web_cookie_secret', helpers.generateCookieSecret(), censor_log=True)
@@ -828,6 +834,7 @@ def initialize(consoleLogging=True):
 
         TV_DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'tv_download_dir', '')
         PROCESS_AUTOMATICALLY = bool(check_setting_int(CFG, 'General', 'process_automatically', 0))
+        NO_DELETE = bool(check_setting_int(CFG, 'General', 'no_delete', 0))
         UNPACK = bool(check_setting_int(CFG, 'General', 'unpack', 0))
         RENAME_EPISODES = bool(check_setting_int(CFG, 'General', 'rename_episodes', 1))
         AIRDATE_EPISODES = bool(check_setting_int(CFG, 'General', 'airdate_episodes', 0))
@@ -855,6 +862,7 @@ def initialize(consoleLogging=True):
         SAB_CATEGORY = check_setting_str(CFG, 'SABnzbd', 'sab_category', 'tv')
         SAB_CATEGORY_ANIME = check_setting_str(CFG, 'SABnzbd', 'sab_category_anime', 'anime')
         SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '')
+        SAB_FORCED = bool(check_setting_int(CFG, 'SABnzbd', 'sab_forced', 0))
 
         NZBGET_USERNAME = check_setting_str(CFG, 'NZBget', 'nzbget_username', 'nzbget', censor_log=True)
         NZBGET_PASSWORD = check_setting_str(CFG, 'NZBget', 'nzbget_password', 'tegbzn6789', censor_log=True)
@@ -989,6 +997,7 @@ def initialize(consoleLogging=True):
         TRAKT_DEFAULT_INDEXER = check_setting_int(CFG, 'Trakt', 'trakt_default_indexer', 1)
         TRAKT_DISABLE_SSL_VERIFY = bool(check_setting_int(CFG, 'Trakt', 'trakt_disable_ssl_verify', 0))
         TRAKT_TIMEOUT = check_setting_int(CFG, 'Trakt', 'trakt_timeout', 30)
+        TRAKT_BLACKLIST_NAME = check_setting_str(CFG, 'Trakt', 'trakt_blacklist_name', '')
 
         CheckSection(CFG, 'pyTivo')
         USE_PYTIVO = bool(check_setting_int(CFG, 'pyTivo', 'use_pytivo', 0))
@@ -1057,6 +1066,8 @@ def initialize(consoleLogging=True):
         REQUIRE_WORDS = check_setting_str(CFG, 'General', 'require_words', REQUIRE_WORDS)
 
         CALENDAR_UNPROTECTED = bool(check_setting_int(CFG, 'General', 'calendar_unprotected', 0))
+        
+        NO_RESTART = bool(check_setting_int(CFG, 'General', 'no_restart', 0))
 
         EXTRA_SCRIPTS = [x.strip() for x in check_setting_str(CFG, 'General', 'extra_scripts', '').split('|') if
                          x.strip()]
@@ -1613,6 +1624,7 @@ def save_config():
     new_config['General']['postpone_if_sync_files'] = int(POSTPONE_IF_SYNC_FILES)
     new_config['General']['nfo_rename'] = int(NFO_RENAME)
     new_config['General']['process_automatically'] = int(PROCESS_AUTOMATICALLY)
+    new_config['General']['no_delete'] = int(NO_DELETE)
     new_config['General']['unpack'] = int(UNPACK)
     new_config['General']['rename_episodes'] = int(RENAME_EPISODES)
     new_config['General']['airdate_episodes'] = int(AIRDATE_EPISODES)
@@ -1624,6 +1636,7 @@ def save_config():
     new_config['General']['ignore_words'] = IGNORE_WORDS
     new_config['General']['require_words'] = REQUIRE_WORDS
     new_config['General']['calendar_unprotected'] = int(CALENDAR_UNPROTECTED)
+    new_config['General']['no_restart'] = int(NO_RESTART)
     new_config['General']['developer'] = int(DEVELOPER)
 
     new_config['Blackhole'] = {}
@@ -1737,6 +1750,7 @@ def save_config():
     new_config['SABnzbd']['sab_category'] = SAB_CATEGORY
     new_config['SABnzbd']['sab_category_anime'] = SAB_CATEGORY_ANIME
     new_config['SABnzbd']['sab_host'] = SAB_HOST
+    new_config['SABnzbd']['sab_forced'] = int(SAB_FORCED)
 
     new_config['NZBget'] = {}
 
@@ -1884,6 +1898,7 @@ def save_config():
     new_config['Trakt']['trakt_default_indexer'] = int(TRAKT_DEFAULT_INDEXER)
     new_config['Trakt']['trakt_disable_ssl_verify'] = int(TRAKT_DISABLE_SSL_VERIFY)
     new_config['Trakt']['trakt_timeout'] = int(TRAKT_TIMEOUT)
+    new_config['Trakt']['trakt_blacklist_name'] = TRAKT_BLACKLIST_NAME
 
     new_config['pyTivo'] = {}
     new_config['pyTivo']['use_pytivo'] = int(USE_PYTIVO)
diff --git a/sickbeard/common.py b/sickbeard/common.py
index bc8e9cd7d40248a837c1284713c15027d2be4dbf..fbcf8befc7ded74ab91297fb838778b244bc8c5d 100644
--- a/sickbeard/common.py
+++ b/sickbeard/common.py
@@ -29,7 +29,7 @@ mediaExtensions = ['avi', 'mkv', 'mpg', 'mpeg', 'wmv',
                    'ogm', 'mp4', 'iso', 'img', 'divx',
                    'm2ts', 'm4v', 'ts', 'flv', 'f4v',
                    'mov', 'rmvb', 'vob', 'dvr-ms', 'wtv',
-                   'ogv', '3gp', 'webm']
+                   'ogv', '3gp', 'webm', 'tp']
 
 subtitleExtensions = ['srt', 'sub', 'ass', 'idx', 'ssa']
 
diff --git a/sickbeard/config.py b/sickbeard/config.py
index b5217f18d607b9411182f48db912c52202a9a1a0..060cc74c236215f5e383ad9fc613a5d0c2f6746b 100644
--- a/sickbeard/config.py
+++ b/sickbeard/config.py
@@ -28,6 +28,11 @@ from sickbeard import logger
 from sickbeard import naming
 from sickbeard import db
 
+# Address poor support for scgi over unix domain sockets
+# this is not nicely handled by python currently
+# http://bugs.python.org/issue23636
+urlparse.uses_netloc.append('scgi')
+
 naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                   "s%(seasonnumber)02de%(episodenumber)02d",
                   "S%(seasonnumber)02dE%(episodenumber)02d",
diff --git a/sickbeard/helpers.py b/sickbeard/helpers.py
index 773617c5481cf8ee13ada85e95e2f071b2186c62..193771bf1a4cdc462f8495381a33fae55702380a 100644
--- a/sickbeard/helpers.py
+++ b/sickbeard/helpers.py
@@ -1240,7 +1240,7 @@ def _getTempDir():
 
     return os.path.join(tempfile.gettempdir(), "sickrage-%s" % (uid))
 
-def getURL(url, post_data=None, params=None, headers={}, timeout=30, session=None, json=False):
+def getURL(url, post_data=None, params=None, headers={}, timeout=30, session=None, json=False, proxyGlypeProxySSLwarning=None):
     """
     Returns a byte-string retrieved from the url provider.
     """
@@ -1279,6 +1279,15 @@ def getURL(url, post_data=None, params=None, headers={}, timeout=30, session=Non
                 resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.DEBUG)
             return
 
+        if proxyGlypeProxySSLwarning is not None:
+            if re.search('The site you are attempting to browse is on a secure connection', resp.text):
+                resp = session.get(proxyGlypeProxySSLwarning)
+
+                if not resp.ok:
+                    logger.log(u"GlypeProxySSLwarning: Requested url " + url + " returned status code is " + str(
+                        resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.DEBUG)
+                    return
+
     except requests.exceptions.HTTPError, e:
         logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
         return
diff --git a/sickbeard/logger.py b/sickbeard/logger.py
index 1d3de0a2284429c81bc37d3482c584e597c41230..726614d2a582867e93175b601e0b5d4e518f12e4 100644
--- a/sickbeard/logger.py
+++ b/sickbeard/logger.py
@@ -199,7 +199,7 @@ class Logger(object):
                         gist = 'No ERROR found'
 
                 message = u"### INFO\n"
-                message += u"Python Version: **" + sys.version[:120] + "**\n"
+                message += u"Python Version: **" + sys.version[:120].replace('\n','') + "**\n"
                 message += u"Operating System: **" + platform.platform() + "**\n"
                 if not 'Windows' in platform.platform():
                     try:
diff --git a/sickbeard/processTV.py b/sickbeard/processTV.py
index 131d3e3559af4c17d0ffc6b2ebe0e7ada9655ae3..9712dc90cdbdb18736c2b7b7d8a42f462dc702df 100644
--- a/sickbeard/processTV.py
+++ b/sickbeard/processTV.py
@@ -43,8 +43,10 @@ shutil.copyfile = lib.shutil_custom.copyfile_custom
 
 class ProcessResult:
     def __init__(self):
-       self.result = True
-       self.output = ''
+        self.result = True
+        self.output = ''
+        self.missedfiles = []
+        self.aggresult = True
 
 def delete_folder(folder, check_empty=True):
 
@@ -117,7 +119,7 @@ def logHelper(logMessage, logLevel=logger.INFO):
     return logMessage + u"\n"
 
 
-def processDir(dirName, nzbName=None, process_method=None, force=False, is_priority=None, failed=False, type="auto"):
+def processDir(dirName, nzbName=None, process_method=None, force=False, is_priority=None, delete_on=False, failed=False, type="auto"):
     """
     Scans through the files in dirName and processes whatever media files it finds
 
@@ -133,7 +135,7 @@ def processDir(dirName, nzbName=None, process_method=None, force=False, is_prior
     result.output += logHelper(u"Processing folder " + dirName, logger.DEBUG)
 
     result.output += logHelper(u"TV_DOWNLOAD_DIR: " + sickbeard.TV_DOWNLOAD_DIR, logger.DEBUG)
-
+    postpone = False
     # if they passed us a real dir then assume it's the one we want
     if ek.ek(os.path.isdir, dirName):
         dirName = ek.ek(os.path.realpath, dirName)
@@ -157,49 +159,54 @@ def processDir(dirName, nzbName=None, process_method=None, force=False, is_prior
 
     # Don't post process if files are still being synced and option is activated
     if SyncFiles and sickbeard.POSTPONE_IF_SYNC_FILES:
-        result.output += logHelper(u"Found temporary sync files, skipping post processing", logger.WARNING)
-        result.output += logHelper(u"Sync Files: " + str(SyncFiles) + " in path " + path, logger.WARNING)
-        return result.output
+        postpone = True
+        
+    nzbNameOriginal = nzbName
 
-    result.output += logHelper(u"PostProcessing Path: " + path, logger.DEBUG)
-    result.output += logHelper(u"PostProcessing Dirs: " + str(dirs), logger.DEBUG)
+    if not postpone:
+        result.output += logHelper(u"PostProcessing Path: " + path, logger.DEBUG)
+        result.output += logHelper(u"PostProcessing Dirs: " + str(dirs), logger.DEBUG)
 
-    rarFiles = filter(helpers.isRarFile, files)
-    rarContent = unRAR(path, rarFiles, force, result)
-    files += rarContent
-    videoFiles = filter(helpers.isMediaFile, files)
-    videoInRar = filter(helpers.isMediaFile, rarContent)
+        rarFiles = filter(helpers.isRarFile, files)
+        rarContent = unRAR(path, rarFiles, force, result)
+        files += rarContent
+        videoFiles = filter(helpers.isMediaFile, files)
+        videoInRar = filter(helpers.isMediaFile, rarContent)
 
-    result.output += logHelper(u"PostProcessing Files: " + str(files), logger.DEBUG)
-    result.output += logHelper(u"PostProcessing VideoFiles: " + str(videoFiles), logger.DEBUG)
-    result.output += logHelper(u"PostProcessing RarContent: " + str(rarContent), logger.DEBUG)
-    result.output += logHelper(u"PostProcessing VideoInRar: " + str(videoInRar), logger.DEBUG)
+        result.output += logHelper(u"PostProcessing Files: " + str(files), logger.DEBUG)
+        result.output += logHelper(u"PostProcessing VideoFiles: " + str(videoFiles), logger.DEBUG)
+        result.output += logHelper(u"PostProcessing RarContent: " + str(rarContent), logger.DEBUG)
+        result.output += logHelper(u"PostProcessing VideoInRar: " + str(videoInRar), logger.DEBUG)
 
-    # If nzbName is set and there's more than one videofile in the folder, files will be lost (overwritten).
-    nzbNameOriginal = nzbName
-    if len(videoFiles) >= 2:
-        nzbName = None
-
-    if not process_method:
-        process_method = sickbeard.PROCESS_METHOD
-
-    result.result = True
-
-    #Don't Link media when the media is extracted from a rar in the same path
-    if process_method in ('hardlink', 'symlink') and videoInRar:
-        process_media(path, videoInRar, nzbName, 'move', force, is_priority, result)
-        delete_files(path, rarContent, result)
-        for video in set(videoFiles) - set(videoInRar):
-            process_media(path, [video], nzbName, process_method, force, is_priority, result)
-    elif sickbeard.DELRARCONTENTS and videoInRar:
-        process_media(path, videoInRar, nzbName, process_method, force, is_priority, result)
-        delete_files(path, rarContent, result, True)
-        for video in set(videoFiles) - set(videoInRar):
-            process_media(path, [video], nzbName, process_method, force, is_priority, result)
-    else:
-        for video in videoFiles:
-            process_media(path, [video], nzbName, process_method, force, is_priority, result)
+        # If nzbName is set and there's more than one videofile in the folder, files will be lost (overwritten).
+        if len(videoFiles) >= 2:
+            nzbName = None
 
+        if not process_method:
+            process_method = sickbeard.PROCESS_METHOD
+    
+        result.result = True
+    
+        #Don't Link media when the media is extracted from a rar in the same path
+        if process_method in ('hardlink', 'symlink') and videoInRar:
+            process_media(path, videoInRar, nzbName, 'move', force, is_priority, result)
+            delete_files(path, rarContent, result)
+            for video in set(videoFiles) - set(videoInRar):
+                process_media(path, [video], nzbName, process_method, force, is_priority, result)
+        elif sickbeard.DELRARCONTENTS and videoInRar:
+            process_media(path, videoInRar, nzbName, process_method, force, is_priority, result)
+            delete_files(path, rarContent, result, True)
+            for video in set(videoFiles) - set(videoInRar):
+                process_media(path, [video], nzbName, process_method, force, is_priority, result)
+        else:
+            for video in videoFiles:
+                process_media(path, [video], nzbName, process_method, force, is_priority, result)
+        
+    else:
+        result.output += logHelper(u"Found temporary sync files, skipping post processing for folder " + str(path), logger.WARNING)
+        result.output += logHelper(u"Sync Files: " + str(SyncFiles) + " in path: " + path, logger.WARNING)
+        result.missedfiles.append(path + " : Syncfiles found")
+        
     #Process Video File in all TV Subdir
     for dir in [x for x in dirs if validateDir(path, x, nzbNameOriginal, failed, result)]:
 
@@ -210,53 +217,65 @@ def processDir(dirName, nzbName=None, process_method=None, force=False, is_prior
             if (not validateDir(path, processPath, nzbNameOriginal, failed, result)):
                 continue
             
+            postpone = False
+            
             SyncFiles = filter(helpers.isSyncFile, fileList)
 
             # Don't post process if files are still being synced and option is activated
             if SyncFiles and sickbeard.POSTPONE_IF_SYNC_FILES:
-                result.output += logHelper(u"Found temporary sync files, skipping post processing", logger.WARNING)
-                result.output += logHelper(u"Sync Files: " + str(SyncFiles) + " in path " + processPath, logger.WARNING)
-                return result.output
-
-            rarFiles = filter(helpers.isRarFile, fileList)
-            rarContent = unRAR(processPath, rarFiles, force, result)
-            fileList = set(fileList + rarContent)
-            videoFiles = filter(helpers.isMediaFile, fileList)
-            videoInRar = filter(helpers.isMediaFile, rarContent)
-            notwantedFiles = [x for x in fileList if x not in videoFiles]
-            result.output += logHelper(u"Found unwanted files: " + str(notwantedFiles), logger.INFO)
-
-            #Don't Link media when the media is extracted from a rar in the same path
-            if process_method in ('hardlink', 'symlink') and videoInRar:
-                process_media(processPath, videoInRar, nzbName, 'move', force, is_priority, result)
-                process_media(processPath, set(videoFiles) - set(videoInRar), nzbName, process_method, force,
-                              is_priority, result)
-                delete_files(processPath, rarContent, result)
-            elif sickbeard.DELRARCONTENTS and videoInRar:
-                process_media(processPath, videoInRar, nzbName, process_method, force, is_priority, result)
-                process_media(processPath, set(videoFiles) - set(videoInRar), nzbName, process_method, force,
-                              is_priority, result)
-                delete_files(processPath, rarContent, result, True)
+                postpone = True
+
+            if not postpone:
+                rarFiles = filter(helpers.isRarFile, fileList)
+                rarContent = unRAR(processPath, rarFiles, force, result)
+                fileList = set(fileList + rarContent)
+                videoFiles = filter(helpers.isMediaFile, fileList)
+                videoInRar = filter(helpers.isMediaFile, rarContent)
+                notwantedFiles = [x for x in fileList if x not in videoFiles]
+                if notwantedFiles:
+                    result.output += logHelper(u"Found unwanted files: " + str(notwantedFiles), logger.INFO)
+    
+                #Don't Link media when the media is extracted from a rar in the same path
+                if process_method in ('hardlink', 'symlink') and videoInRar:
+                    process_media(processPath, videoInRar, nzbName, 'move', force, is_priority, result)
+                    process_media(processPath, set(videoFiles) - set(videoInRar), nzbName, process_method, force,
+                                  is_priority, result)
+                    delete_files(processPath, rarContent, result)
+                elif sickbeard.DELRARCONTENTS and videoInRar:
+                    process_media(processPath, videoInRar, nzbName, process_method, force, is_priority, result)
+                    process_media(processPath, set(videoFiles) - set(videoInRar), nzbName, process_method, force,
+                                  is_priority, result)
+                    delete_files(processPath, rarContent, result, True)
+                else:
+                    process_media(processPath, videoFiles, nzbName, process_method, force, is_priority, result)
+    
+                    #Delete all file not needed
+                    if process_method != "move" or not result.result \
+                            or (type == "manual" and not delete_on):  #Avoid to delete files if is Manual PostProcessing
+                        continue
+    
+                    delete_files(processPath, notwantedFiles, result)
+    
+                    if (not sickbeard.NO_DELETE or type == "manual") and process_method == "move" and \
+                                    ek.ek(os.path.normpath, processPath) != ek.ek(os.path.normpath,
+                                                                                  sickbeard.TV_DOWNLOAD_DIR):
+                        if delete_folder(processPath, check_empty=True):
+                            result.output += logHelper(u"Deleted folder: " + processPath, logger.DEBUG)
             else:
-                process_media(processPath, videoFiles, nzbName, process_method, force, is_priority, result)
-
-                #Delete all file not needed
-                if process_method != "move" or not result.result \
-                        or type == "manual":  #Avoid to delete files if is Manual PostProcessing
-                    continue
-
-                delete_files(processPath, notwantedFiles, result)
-
-                if process_method == "move" and \
-                                ek.ek(os.path.normpath, processPath) != ek.ek(os.path.normpath,
-                                                                              sickbeard.TV_DOWNLOAD_DIR):
-                    if delete_folder(processPath, check_empty=True):
-                        result.output += logHelper(u"Deleted folder: " + processPath, logger.DEBUG)
-
-    if result.result:
+                result.output += logHelper(u"Found temporary sync files, skipping post processing for folder: " + str(processPath), logger.WARNING)
+                result.output += logHelper(u"Sync Files: " + str(SyncFiles) + " in path: " + processPath, logger.WARNING)
+                result.missedfiles.append(processPath + " : Syncfiles found")
+                                
+    if result.aggresult:
         result.output += logHelper(u"Successfully processed")
+        if result.missedfiles:
+            result.output += logHelper(u"I did encounter some unprocessable items: ")
+            for missedfile in result.missedfiles:
+                result.output += logHelper(u"[" + missedfile + "]")
     else:
-        result.output += logHelper(u"Problem(s) during processing", logger.WARNING)
+        result.output += logHelper(u"Problem(s) during processing, failed the following files/folders:  ", logger.WARNING)
+        for missedfile in result.missedfiles:
+            result.output += logHelper(u"[" + missedfile + "]", logger.WARNING)
 
     return result.output
 
@@ -275,14 +294,17 @@ def validateDir(path, dirName, nzbNameOriginal, failed, result):
     elif ek.ek(os.path.basename, dirName).upper().startswith('_UNPACK'):
         result.output += logHelper(u"The directory name indicates that this release is in the process of being unpacked.",
                                logger.DEBUG)
+        result.missedfiles.append(dirName + " : Being unpacked")
         return False
 
     if failed:
         process_failed(os.path.join(path, dirName), nzbNameOriginal, result)
+        result.missedfiles.append(dirName + " : Failed download")
         return False
 
     if helpers.is_hidden_folder(os.path.join(path, dirName)):
         result.output += logHelper(u"Ignoring hidden folder: " + dirName, logger.DEBUG)
+        result.missedfiles.append(dirName + " : Hidden folder")
         return False
 
     # make sure the dir isn't inside a show dir
@@ -296,6 +318,7 @@ def validateDir(path, dirName, nzbNameOriginal, failed, result):
             result.output += logHelper(
                 u"You're trying to post process an episode that's already been moved to its show dir, skipping",
                 logger.ERROR)
+            result.missedfiles.append(dirName + " : Already processed")
             return False
 
     # Get the videofile list for the next checks
@@ -333,7 +356,8 @@ def validateDir(path, dirName, nzbNameOriginal, failed, result):
                 return True
             except (InvalidNameException, InvalidShowException):
                 pass
-
+            
+    result.missedfiles.append(dirName + " : No processable items found in folder")
     return False
 
 def unRAR(path, rarFiles, force, result):
@@ -359,6 +383,7 @@ def unRAR(path, rarFiles, force, result):
                             u"Archive file already post-processed, extraction skipped: " + file_in_archive,
                             logger.DEBUG)
                         skip_file = True
+                        result.missedfiles.append(archive + " : RAR already processed")
                         break
 
                 if skip_file:
@@ -375,26 +400,32 @@ def unRAR(path, rarFiles, force, result):
             except FatalRARError:
                 result.output += logHelper(u"Failed Unrar archive {0}: Unrar: Fatal Error".format(archive), logger.ERROR)
                 result.result = False
+                result.missedfiles.append(archive + " : Fatal error unpacking archive")
                 continue
             except CRCRARError:
                 result.output += logHelper(u"Failed Unrar archive {0}: Unrar: Archive CRC Error".format(archive), logger.ERROR)
                 result.result = False
+                result.missedfiles.append(archive + " : CRC error unpacking archive")
                 continue
             except IncorrectRARPassword:
                 result.output += logHelper(u"Failed Unrar archive {0}: Unrar: Invalid Password".format(archive), logger.ERROR)
                 result.result = False
+                result.missedfiles.append(archive + " : Password protected RAR")
                 continue
             except NoFileToExtract:
                 result.output += logHelper(u"Failed Unrar archive {0}: Unrar: No file extracted, check the parent folder and destination file permissions.".format(archive), logger.ERROR)
                 result.result = False
+                result.missedfiles.append(archive + " : Nothing was unpacked (file permissions?)")
                 continue
             except GenericRARError:
                 result.output += logHelper(u"Failed Unrar archive {0}: Unrar: Generic Error".format(archive), logger.ERROR)
                 result.result = False
+                result.missedfiles.append(archive + " : Unpacking Failed with a Generic Error")
                 continue
             except Exception, e:
                 result.output += logHelper(u"Failed Unrar archive " + archive + ': ' + ex(e), logger.ERROR)
                 result.result = False
+                result.missedfiles.append(archive + " : Unpacking failed for an unknown reason")
                 continue
 
         result.output += logHelper(u"UnRar content: " + str(unpacked_files), logger.DEBUG)
@@ -463,6 +494,7 @@ def process_media(processPath, videoFiles, nzbName, process_method, force, is_pr
     for cur_video_file in videoFiles:
 
         if already_postprocessed(processPath, cur_video_file, force, result):
+            result.missedfiles.append(ek.ek(os.path.join, processPath, cur_video_file) + " : Already processed")
             continue
 
         cur_video_file_path = ek.ek(os.path.join, processPath, cur_video_file)
@@ -483,10 +515,9 @@ def process_media(processPath, videoFiles, nzbName, process_method, force, is_pr
         else:
             result.output += logHelper(u"Processing failed for " + cur_video_file_path + ": " + process_fail_message,
                                    logger.WARNING)
+            result.missedfiles.append(cur_video_file_path + " : Processing failed: " + process_fail_message)
+            result.aggresult = False
 
-        #If something fail abort the processing on dir
-        if not result.result:
-            break
 
 def get_path_dir_files(dirName, nzbName, type):
     path = ""
diff --git a/sickbeard/properFinder.py b/sickbeard/properFinder.py
index 21507c3c038c89de41b86530eb91687675404d6f..9319d7eecf4476f3cd1a13c99115108ceeda6107 100644
--- a/sickbeard/properFinder.py
+++ b/sickbeard/properFinder.py
@@ -1,276 +1,277 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import datetime
-import operator
-import threading
-import traceback
-from search import pickBestResult
-
-import sickbeard
-
-from sickbeard import db
-from sickbeard import exceptions
-from sickbeard.exceptions import ex
-from sickbeard import helpers, logger
-from sickbeard import search
-from sickbeard import history
-
-from sickbeard.common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, Quality
-
-from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-
-
-class ProperFinder():
-    def __init__(self):
-        self.amActive = False
-
-    def run(self, force=False):
-
-        if not sickbeard.DOWNLOAD_PROPERS:
-            return
-
-        logger.log(u"Beginning the search for new propers")
-
-        self.amActive = True
-
-        propers = self._getProperList()
-
-        if propers:
-            self._downloadPropers(propers)
-
-        self._set_lastProperSearch(datetime.datetime.today().toordinal())
-
-        run_at = ""
-        if None is sickbeard.properFinderScheduler.start_time:
-            run_in = sickbeard.properFinderScheduler.lastRun + sickbeard.properFinderScheduler.cycleTime - datetime.datetime.now()
-            hours, remainder = divmod(run_in.seconds, 3600)
-            minutes, seconds = divmod(remainder, 60)
-            run_at = u", next check in approx. " + (
-                "%dh, %dm" % (hours, minutes) if 0 < hours else "%dm, %ds" % (minutes, seconds))
-
-        logger.log(u"Completed the search for new propers%s" % run_at)
-
-        self.amActive = False
-
-    def _getProperList(self):
-        propers = {}
-
-        search_date = datetime.datetime.today() - datetime.timedelta(days=2)
-
-        # for each provider get a list of the
-        origThreadName = threading.currentThread().name
-        providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive()]
-        for curProvider in providers:
-            threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
-
-            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
-
-            try:
-                curPropers = curProvider.findPropers(search_date)
-            except exceptions.AuthException, e:
-                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
-                continue
-            except Exception, e:
-                logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
-                logger.log(traceback.format_exc(), logger.DEBUG)
-                continue
-            finally:
-                threading.currentThread().name = origThreadName
-
-            # if they haven't been added by a different provider than add the proper to the list
-            for x in curPropers:
-                name = self._genericName(x.name)
-                if not name in propers:
-                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
-                    x.provider = curProvider
-                    propers[name] = x
-
-        # take the list of unique propers and get it sorted by
-        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
-        finalPropers = []
-
-        for curProper in sortedPropers:
-
-            try:
-                myParser = NameParser(False)
-                parse_result = myParser.parse(curProper.name)
-            except InvalidNameException:
-                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
-                continue
-            except InvalidShowException:
-                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid show", logger.DEBUG)
-                continue
-
-            if not parse_result.series_name:
-                continue
-
-            if not parse_result.episode_numbers:
-                logger.log(
-                    u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode",
-                    logger.DEBUG)
-                continue
-
-            logger.log(
-                u"Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name,
-                logger.DEBUG)
-
-            # set the indexerid in the db to the show's indexerid
-            curProper.indexerid = parse_result.show.indexerid
-
-            # set the indexer in the db to the show's indexer
-            curProper.indexer = parse_result.show.indexer
-
-            # populate our Proper instance
-            curProper.show = parse_result.show
-            curProper.season = parse_result.season_number if parse_result.season_number is not None else 1
-            curProper.episode = parse_result.episode_numbers[0]
-            curProper.release_group = parse_result.release_group
-            curProper.version = parse_result.version
-            curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
-            curProper.content = None
-
-            # filter release
-            bestResult = pickBestResult(curProper, parse_result.show)
-            if not bestResult:
-                logger.log(u"Proper " + curProper.name + " were rejected by our release filters.", logger.DEBUG)
-                continue
-
-            # only get anime proper if it has release group and version
-            if bestResult.show.is_anime:
-                if not bestResult.release_group and bestResult.version == -1:
-                    logger.log(u"Proper " + bestResult.name + " doesn't have a release group and version, ignoring it",
-                               logger.DEBUG)
-                    continue
-
-            # check if we actually want this proper (if it's the right quality)
-            myDB = db.DBConnection()
-            sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
-                                     [bestResult.indexerid, bestResult.season, bestResult.episode])
-            if not sqlResults:
-                continue
-
-            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
-            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))
-            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != bestResult.quality:
-                continue
-
-            # check if we actually want this proper (if it's the right release group and a higher version)
-            if bestResult.show.is_anime:
-                myDB = db.DBConnection()
-                sqlResults = myDB.select(
-                    "SELECT release_group, version FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
-                    [bestResult.indexerid, bestResult.season, bestResult.episode])
-
-                oldVersion = int(sqlResults[0]["version"])
-                oldRelease_group = (sqlResults[0]["release_group"])
-
-                if oldVersion > -1 and oldVersion < bestResult.version:
-                    logger.log("Found new anime v" + str(bestResult.version) + " to replace existing v" + str(oldVersion))
-                else:
-                    continue
-
-                if oldRelease_group != bestResult.release_group:
-                    logger.log("Skipping proper from release group: " + bestResult.release_group + ", does not match existing release group: " + oldRelease_group)
-                    continue
-
-            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
-            if bestResult.indexerid != -1 and (bestResult.indexerid, bestResult.season, bestResult.episode) not in map(
-                    operator.attrgetter('indexerid', 'season', 'episode'), finalPropers):
-                logger.log(u"Found a proper that we need: " + str(bestResult.name))
-                finalPropers.append(bestResult)
-
-        return finalPropers
-
-    def _downloadPropers(self, properList):
-
-        for curProper in properList:
-
-            historyLimit = datetime.datetime.today() - datetime.timedelta(days=30)
-
-            # make sure the episode has been downloaded before
-            myDB = db.DBConnection()
-            historyResults = myDB.select(
-                "SELECT resource FROM history " +
-                "WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? " +
-                "AND action IN (" + ",".join([str(x) for x in Quality.SNATCHED]) + ")",
-                [curProper.indexerid, curProper.season, curProper.episode, curProper.quality,
-                 historyLimit.strftime(history.dateFormat)])
-
-            # if we didn't download this episode in the first place we don't know what quality to use for the proper so we can't do it
-            if len(historyResults) == 0:
-                logger.log(
-                    u"Unable to find an original history entry for proper " + curProper.name + " so I'm not downloading it.")
-                continue
-
-            else:
-
-                # make sure that none of the existing history downloads are the same proper we're trying to download
-                clean_proper_name = self._genericName(helpers.remove_non_release_groups(curProper.name))
-                isSame = False
-                for curResult in historyResults:
-                    # if the result exists in history already we need to skip it
-                    if self._genericName(helpers.remove_non_release_groups(curResult["resource"])) == clean_proper_name:
-                        isSame = True
-                        break
-                if isSame:
-                    logger.log(u"This proper is already in history, skipping it", logger.DEBUG)
-                    continue
-
-                # get the episode object
-                epObj = curProper.show.getEpisode(curProper.season, curProper.episode)
-
-                # make the result object
-                result = curProper.provider.getResult([epObj])
-                result.show = curProper.show
-                result.url = curProper.url
-                result.name = curProper.name
-                result.quality = curProper.quality
-                result.release_group = curProper.release_group
-                result.version = curProper.version
-
-                # snatch it
-                search.snatchEpisode(result, SNATCHED_PROPER)
-
-    def _genericName(self, name):
-        return name.replace(".", " ").replace("-", " ").replace("_", " ").lower()
-
-    def _set_lastProperSearch(self, when):
-
-        logger.log(u"Setting the last Proper search in the DB to " + str(when), logger.DEBUG)
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select("SELECT * FROM info")
-
-        if len(sqlResults) == 0:
-            myDB.action("INSERT INTO info (last_backlog, last_indexer, last_proper_search) VALUES (?,?,?)",
-                        [0, 0, str(when)])
-        else:
-            myDB.action("UPDATE info SET last_proper_search=" + str(when))
-
-    def _get_lastProperSearch(self):
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select("SELECT * FROM info")
-
-        try:
-            last_proper_search = datetime.date.fromordinal(int(sqlResults[0]["last_proper_search"]))
-        except:
-            return datetime.date.fromordinal(1)
-
-        return last_proper_search
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+import operator
+import threading
+import traceback
+from search import pickBestResult
+
+import sickbeard
+
+from sickbeard import db
+from sickbeard import exceptions
+from sickbeard.exceptions import ex
+from sickbeard import helpers, logger
+from sickbeard import search
+from sickbeard import history
+
+from sickbeard.common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, Quality
+
+from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+
+
+class ProperFinder():
+    def __init__(self):
+        self.amActive = False
+
+    def run(self, force=False):
+
+        if not sickbeard.DOWNLOAD_PROPERS:
+            return
+
+        logger.log(u"Beginning the search for new propers")
+
+        self.amActive = True
+
+        propers = self._getProperList()
+
+        if propers:
+            self._downloadPropers(propers)
+
+        self._set_lastProperSearch(datetime.datetime.today().toordinal())
+
+        run_at = ""
+        if None is sickbeard.properFinderScheduler.start_time:
+            run_in = sickbeard.properFinderScheduler.lastRun + sickbeard.properFinderScheduler.cycleTime - datetime.datetime.now()
+            hours, remainder = divmod(run_in.seconds, 3600)
+            minutes, seconds = divmod(remainder, 60)
+            run_at = u", next check in approx. " + (
+                "%dh, %dm" % (hours, minutes) if 0 < hours else "%dm, %ds" % (minutes, seconds))
+
+        logger.log(u"Completed the search for new propers%s" % run_at)
+
+        self.amActive = False
+
+    def _getProperList(self):
+        propers = {}
+
+        search_date = datetime.datetime.today() - datetime.timedelta(days=2)
+
+        # for each provider get a list of the
+        origThreadName = threading.currentThread().name
+        providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive()]
+        for curProvider in providers:
+            threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
+
+            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
+
+            try:
+                curPropers = curProvider.findPropers(search_date)
+            except exceptions.AuthException, e:
+                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
+                continue
+            except Exception, e:
+                logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
+                logger.log(traceback.format_exc(), logger.DEBUG)
+                continue
+            finally:
+                threading.currentThread().name = origThreadName
+
+            # if they haven't been added by a different provider than add the proper to the list
+            for x in curPropers:
+                name = self._genericName(x.name)
+                if not name in propers:
+                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
+                    x.provider = curProvider
+                    propers[name] = x
+
+        # take the list of unique propers and get it sorted by
+        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
+        finalPropers = []
+
+        for curProper in sortedPropers:
+
+            try:
+                myParser = NameParser(False)
+                parse_result = myParser.parse(curProper.name)
+            except InvalidNameException:
+                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
+                continue
+            except InvalidShowException:
+                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid show", logger.DEBUG)
+                continue
+
+            if not parse_result.series_name:
+                continue
+
+            if not parse_result.episode_numbers:
+                logger.log(
+                    u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode",
+                    logger.DEBUG)
+                continue
+
+            logger.log(
+                u"Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name,
+                logger.DEBUG)
+
+            # set the indexerid in the db to the show's indexerid
+            curProper.indexerid = parse_result.show.indexerid
+
+            # set the indexer in the db to the show's indexer
+            curProper.indexer = parse_result.show.indexer
+
+            # populate our Proper instance
+            curProper.show = parse_result.show
+            curProper.season = parse_result.season_number if parse_result.season_number is not None else 1
+            curProper.episode = parse_result.episode_numbers[0]
+            curProper.release_group = parse_result.release_group
+            curProper.version = parse_result.version
+            curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
+            curProper.content = None
+
+            # filter release
+            bestResult = pickBestResult(curProper, parse_result.show)
+            if not bestResult:
+                logger.log(u"Proper " + curProper.name + " were rejected by our release filters.", logger.DEBUG)
+                continue
+
+            # only get anime proper if it has release group and version
+            if bestResult.show.is_anime:
+                if not bestResult.release_group and bestResult.version == -1:
+                    logger.log(u"Proper " + bestResult.name + " doesn't have a release group and version, ignoring it",
+                               logger.DEBUG)
+                    continue
+
+            # check if we actually want this proper (if it's the right quality)
+            myDB = db.DBConnection()
+            sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
+                                     [bestResult.indexerid, bestResult.season, bestResult.episode])
+            if not sqlResults:
+                continue
+
+            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
+            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))
+            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != bestResult.quality:
+                continue
+
+            # check if we actually want this proper (if it's the right release group and a higher version)
+            if bestResult.show.is_anime:
+                myDB = db.DBConnection()
+                sqlResults = myDB.select(
+                    "SELECT release_group, version FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
+                    [bestResult.indexerid, bestResult.season, bestResult.episode])
+
+                oldVersion = int(sqlResults[0]["version"])
+                oldRelease_group = (sqlResults[0]["release_group"])
+
+                if oldVersion > -1 and oldVersion < bestResult.version:
+                    logger.log("Found new anime v" + str(bestResult.version) + " to replace existing v" + str(oldVersion))
+                else:
+                    continue
+
+                if oldRelease_group != bestResult.release_group:
+                    logger.log("Skipping proper from release group: " + bestResult.release_group + ", does not match existing release group: " + oldRelease_group)
+                    continue
+
+            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
+            if bestResult.indexerid != -1 and (bestResult.indexerid, bestResult.season, bestResult.episode) not in map(
+                    operator.attrgetter('indexerid', 'season', 'episode'), finalPropers):
+                logger.log(u"Found a proper that we need: " + str(bestResult.name))
+                finalPropers.append(bestResult)
+
+        return finalPropers
+
+    def _downloadPropers(self, properList):
+
+        for curProper in properList:
+
+            historyLimit = datetime.datetime.today() - datetime.timedelta(days=30)
+
+            # make sure the episode has been downloaded before
+            myDB = db.DBConnection()
+            historyResults = myDB.select(
+                "SELECT resource FROM history " +
+                "WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? " +
+                "AND action IN (" + ",".join([str(x) for x in Quality.SNATCHED]) + ")",
+                [curProper.indexerid, curProper.season, curProper.episode, curProper.quality,
+                 historyLimit.strftime(history.dateFormat)])
+
+            # if we didn't download this episode in the first place we don't know what quality to use for the proper so we can't do it
+            if len(historyResults) == 0:
+                logger.log(
+                    u"Unable to find an original history entry for proper " + curProper.name + " so I'm not downloading it.")
+                continue
+
+            else:
+
+                # make sure that none of the existing history downloads are the same proper we're trying to download
+                clean_proper_name = self._genericName(helpers.remove_non_release_groups(curProper.name))
+                isSame = False
+                for curResult in historyResults:
+                    # if the result exists in history already we need to skip it
+                    if self._genericName(helpers.remove_non_release_groups(curResult["resource"])) == clean_proper_name:
+                        isSame = True
+                        break
+                if isSame:
+                    logger.log(u"This proper is already in history, skipping it", logger.DEBUG)
+                    continue
+
+                # get the episode object
+                epObj = curProper.show.getEpisode(curProper.season, curProper.episode)
+
+                # make the result object
+                result = curProper.provider.getResult([epObj])
+                result.show = curProper.show
+                result.url = curProper.url
+                result.name = curProper.name
+                result.quality = curProper.quality
+                result.release_group = curProper.release_group
+                result.version = curProper.version
+                result.content = curProper.content
+
+                # snatch it
+                search.snatchEpisode(result, SNATCHED_PROPER)
+
+    def _genericName(self, name):
+        return name.replace(".", " ").replace("-", " ").replace("_", " ").lower()
+
+    def _set_lastProperSearch(self, when):
+
+        logger.log(u"Setting the last Proper search in the DB to " + str(when), logger.DEBUG)
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select("SELECT * FROM info")
+
+        if len(sqlResults) == 0:
+            myDB.action("INSERT INTO info (last_backlog, last_indexer, last_proper_search) VALUES (?,?,?)",
+                        [0, 0, str(when)])
+        else:
+            myDB.action("UPDATE info SET last_proper_search=" + str(when))
+
+    def _get_lastProperSearch(self):
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select("SELECT * FROM info")
+
+        try:
+            last_proper_search = datetime.date.fromordinal(int(sqlResults[0]["last_proper_search"]))
+        except:
+            return datetime.date.fromordinal(1)
+
+        return last_proper_search
diff --git a/sickbeard/providers/__init__.py b/sickbeard/providers/__init__.py
index 17b8cd2e0c06140e1997d1ad9aa73c80d07de56a..de63e1d82d48d5ae36349b6a15587cd0757a9bab 100755
--- a/sickbeard/providers/__init__.py
+++ b/sickbeard/providers/__init__.py
@@ -33,10 +33,11 @@ __all__ = ['ezrss',
            'nextgen',
            'speedcd',
            'nyaatorrents',
-           'fanzub',
+           'animenzb',
            'torrentbytes',
            'animezb',
            'freshontv',
+           'morethantv',
            'bitsoup',
            't411',
            'tokyotoshokan',
diff --git a/sickbeard/providers/fanzub.py b/sickbeard/providers/animenzb.py
similarity index 91%
rename from sickbeard/providers/fanzub.py
rename to sickbeard/providers/animenzb.py
index ad38f1f0b2b6bf4f140ef179bd8ab32682cb37d8..33f12bbd7c104b8fb986341e199be008a6badabd 100644
--- a/sickbeard/providers/fanzub.py
+++ b/sickbeard/providers/animenzb.py
@@ -30,11 +30,11 @@ from sickbeard import tvcache
 from lib.dateutil.parser import parse as parseDate
 
 
-class Fanzub(generic.NZBProvider):
+class animenzb(generic.NZBProvider):
 
     def __init__(self):
 
-        generic.NZBProvider.__init__(self, "Fanzub")
+        generic.NZBProvider.__init__(self, "AnimeNZB")
 
         self.supportsBacklog = False
         self.supportsAbsoluteNumbering = True
@@ -42,9 +42,9 @@ class Fanzub(generic.NZBProvider):
 
         self.enabled = False
 
-        self.cache = FanzubCache(self)
+        self.cache = animenzbCache(self)
 
-        self.urls = {'base_url': 'https://fanzub.com/'}
+        self.urls = {'base_url': 'http://animenzb.com//'}
 
         self.url = self.urls['base_url']
 
@@ -52,7 +52,7 @@ class Fanzub(generic.NZBProvider):
         return self.enabled
 
     def imageName(self):
-        return 'fanzub.gif'
+        return 'animenzb.gif'
 
     def _get_season_search_strings(self, ep_obj):
         return [x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
@@ -111,13 +111,13 @@ class Fanzub(generic.NZBProvider):
         return results
 
 
-class FanzubCache(tvcache.TVCache):
+class animenzbCache(tvcache.TVCache):
 
     def __init__(self, provider):
 
         tvcache.TVCache.__init__(self, provider)
 
-        # only poll Fanzub every 20 minutes max
+        # only poll animenzb every 20 minutes max
         self.minTime = 20
 
     def _getRSSData(self):
@@ -133,4 +133,4 @@ class FanzubCache(tvcache.TVCache):
 
         return self.getRSSFeed(rss_url)
 
-provider = Fanzub()
+provider = animenzb()
diff --git a/sickbeard/providers/bitsoup.py b/sickbeard/providers/bitsoup.py
index f1dd6635cb013555af4b7ab0f302343365b2f110..79f0f032867580e2554ca73e0895199ebd6412e2 100644
--- a/sickbeard/providers/bitsoup.py
+++ b/sickbeard/providers/bitsoup.py
@@ -23,6 +23,7 @@ import sickbeard
 import generic
 import requests
 import requests.exceptions
+import urllib
 
 from sickbeard.common import Quality
 from sickbeard import logger
@@ -163,7 +164,7 @@ class BitSoupProvider(generic.TorrentProvider):
                 if isinstance(search_string, unicode):
                     search_string = unidecode(search_string)
 
-                searchURL = self.urls['search'] % (search_string, self.categories)
+                searchURL = self.urls['search'] % (urllib.quote(search_string), self.categories)
 
                 logger.log(u"Search string: " + searchURL, logger.DEBUG)
 
@@ -208,7 +209,7 @@ class BitSoupProvider(generic.TorrentProvider):
                                 continue
 
                             item = title, download_url, id, seeders, leechers
-                            logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
+                            logger.log(u"Found result: " + title.replace(' ','.') + " (" + searchURL + ")", logger.DEBUG)
 
                             items[mode].append(item)
 
diff --git a/sickbeard/providers/btn.py b/sickbeard/providers/btn.py
index 5b6a86e10c8771d052be8f2b4db229b2f262dc05..3c1a44509eb086eba7829933b1adb7412e6d5ac5 100644
--- a/sickbeard/providers/btn.py
+++ b/sickbeard/providers/btn.py
@@ -1,523 +1,523 @@
-# coding=utf-8
-# Author: Daniel Heimans
-# URL: http://code.google.com/p/sickbeard
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import time
-import socket
-import math
-import sickbeard
-import generic
-import itertools
-
-from sickbeard import classes
-from sickbeard import scene_exceptions
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard.helpers import sanitizeSceneName
-from sickbeard.exceptions import ex, AuthException
-from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
-from sickbeard import db
-from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-from sickbeard.common import Quality
-
-from lib import jsonrpclib
-from datetime import datetime
-
-
-class BTNProvider(generic.TorrentProvider):
-    def __init__(self):
-        generic.TorrentProvider.__init__(self, "BTN")
-
-        self.supportsBacklog = True
-        self.supportsAbsoluteNumbering = True
-
-        self.enabled = False
-        self.api_key = None
-        self.ratio = None
-
-        self.cache = BTNCache(self)
-
-        self.urls = {'base_url': "http://api.btnapps.net"}
-
-
-        self.url = self.urls['base_url']
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'btn.png'
-
-    def _checkAuth(self):
-        if not self.api_key:
-            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
-
-        return True
-
-    def _checkAuthFromData(self, parsedJSON):
-
-        if parsedJSON is None:
-            return self._checkAuth()
-
-        if 'api-error' in parsedJSON:
-            logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['api-error'],
-                       logger.DEBUG)
-            raise AuthException(
-                "Your authentication credentials for " + self.name + " are incorrect, check your config.")
-
-        return True
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        self._checkAuth()
-
-        results = []
-        params = {}
-        apikey = self.api_key
-
-        # age in seconds
-        if age:
-            params['age'] = "<=" + str(int(age))
-
-        if search_params:
-            params.update(search_params)
-
-        parsedJSON = self._api_call(apikey, params)
-        if not parsedJSON:
-            logger.log(u"No data returned from " + self.name, logger.ERROR)
-            return results
-
-        if self._checkAuthFromData(parsedJSON):
-
-            if 'torrents' in parsedJSON:
-                found_torrents = parsedJSON['torrents']
-            else:
-                found_torrents = {}
-
-            # We got something, we know the API sends max 1000 results at a time.
-            # See if there are more than 1000 results for our query, if not we
-            # keep requesting until we've got everything.
-            # max 150 requests per hour so limit at that. Scan every 15 minutes. 60 / 15 = 4.
-            max_pages = 150
-            results_per_page = 1000
-
-            if 'results' in parsedJSON and int(parsedJSON['results']) >= results_per_page:
-                pages_needed = int(math.ceil(int(parsedJSON['results']) / results_per_page))
-                if pages_needed > max_pages:
-                    pages_needed = max_pages
-
-                # +1 because range(1,4) = 1, 2, 3
-                for page in range(1, pages_needed + 1):
-                    parsedJSON = self._api_call(apikey, params, results_per_page, page * results_per_page)
-                    # Note that this these are individual requests and might time out individually. This would result in 'gaps'
-                    # in the results. There is no way to fix this though.
-                    if 'torrents' in parsedJSON:
-                        found_torrents.update(parsedJSON['torrents'])
-
-            for torrentid, torrent_info in found_torrents.iteritems():
-                (title, url) = self._get_title_and_url(torrent_info)
-
-                if title and url:
-                    results.append(torrent_info)
-
-        return results
-
-    def _api_call(self, apikey, params={}, results_per_page=1000, offset=0):
-
-        server = jsonrpclib.Server(self.url)
-        parsedJSON = {}
-
-        try:
-            parsedJSON = server.getTorrents(apikey, params, int(results_per_page), int(offset))
-
-        except jsonrpclib.jsonrpc.ProtocolError, error:
-            logger.log(u"JSON-RPC protocol error while accessing " + self.name + ": " + ex(error), logger.ERROR)
-            parsedJSON = {'api-error': ex(error)}
-            return parsedJSON
-
-        except socket.timeout:
-            logger.log(u"Timeout while accessing " + self.name, logger.WARNING)
-
-        except socket.error, error:
-            # Note that sometimes timeouts are thrown as socket errors
-            logger.log(u"Socket error while accessing " + self.name + ": " + error[1], logger.ERROR)
-
-        except Exception, error:
-            errorstring = str(error)
-            if (errorstring.startswith('<') and errorstring.endswith('>')):
-                errorstring = errorstring[1:-1]
-            logger.log(u"Unknown error while accessing " + self.name + ": " + errorstring, logger.ERROR)
-
-        return parsedJSON
-
-    def _get_title_and_url(self, parsedJSON):
-
-        # The BTN API gives a lot of information in response,
-        # however SickRage is built mostly around Scene or
-        # release names, which is why we are using them here.
-
-        if 'ReleaseName' in parsedJSON and parsedJSON['ReleaseName']:
-            title = parsedJSON['ReleaseName']
-
-        else:
-            # If we don't have a release name we need to get creative
-            title = u''
-            if 'Series' in parsedJSON:
-                title += parsedJSON['Series']
-            if 'GroupName' in parsedJSON:
-                title += '.' + parsedJSON['GroupName'] if title else parsedJSON['GroupName']
-            if 'Resolution' in parsedJSON:
-                title += '.' + parsedJSON['Resolution'] if title else parsedJSON['Resolution']
-            if 'Source' in parsedJSON:
-                title += '.' + parsedJSON['Source'] if title else parsedJSON['Source']
-            if 'Codec' in parsedJSON:
-                title += '.' + parsedJSON['Codec'] if title else parsedJSON['Codec']
-            if title:
-                title = title.replace(' ', '.')
-
-        url = None
-        if 'DownloadURL' in parsedJSON:
-            url = parsedJSON['DownloadURL']
-            if url:
-                # unescaped / is valid in JSON, but it can be escaped
-                url = url.replace("\\/", "/")
-
-        return (title, url)
-
-    def _get_season_search_strings(self, ep_obj):
-        search_params = []
-        current_params = {'category': 'Season'}
-
-        # Search for entire seasons: no need to do special things for air by date or sports shows
-        if ep_obj.show.air_by_date or ep_obj.show.sports:
-            # Search for the year of the air by date show
-            current_params['name'] = str(ep_obj.airdate).split('-')[0]
-        elif ep_obj.show.is_anime:
-            current_params['name'] = "%d" % ep_obj.scene_absolute_number
-        else:
-            current_params['name'] = 'Season ' + str(ep_obj.scene_season)
-
-        # search
-        if ep_obj.show.indexer == 1:
-            current_params['tvdb'] = ep_obj.show.indexerid
-            search_params.append(current_params)
-        elif ep_obj.show.indexer == 2:
-            current_params['tvrage'] = ep_obj.show.indexerid
-            search_params.append(current_params)
-        else:
-            name_exceptions = list(
-                set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
-            for name in name_exceptions:
-                # Search by name if we don't have tvdb or tvrage id
-                current_params['series'] = sanitizeSceneName(name)
-                search_params.append(current_params)
-
-        return search_params
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        if not ep_obj:
-            return [{}]
-
-        to_return = []
-        search_params = {'category': 'Episode'}
-
-        # episode
-        if ep_obj.show.air_by_date or ep_obj.show.sports:
-            date_str = str(ep_obj.airdate)
-
-            # BTN uses dots in dates, we just search for the date since that
-            # combined with the series identifier should result in just one episode
-            search_params['name'] = date_str.replace('-', '.')
-        elif ep_obj.show.anime:
-            search_params['name'] = "%i" % int(ep_obj.scene_absolute_number)
-        else:
-            # Do a general name search for the episode, formatted like SXXEYY
-            search_params['name'] = "S%02dE%02d" % (ep_obj.scene_season, ep_obj.scene_episode)
-
-        # search
-        if ep_obj.show.indexer == 1:
-            search_params['tvdb'] = ep_obj.show.indexerid
-            to_return.append(search_params)
-        elif ep_obj.show.indexer == 2:
-            search_params['tvrage'] = ep_obj.show.indexerid
-            to_return.append(search_params)
-        else:
-            # add new query string for every exception
-            name_exceptions = list(
-                set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
-            for cur_exception in name_exceptions:
-                search_params['series'] = sanitizeSceneName(cur_exception)
-                to_return.append(search_params)
-
-        return to_return
-
-    def _doGeneralSearch(self, search_string):
-        # 'search' looks as broad is it can find. Can contain episode overview and title for example,
-        # use with caution!
-        return self._doSearch({'search': search_string})
-
-    def findPropers(self, search_date=None):
-        results = []
-
-        search_terms = ['%.proper.%', '%.repack.%']
-
-        for term in search_terms:
-            for item in self._doSearch({'release': term}, age=4 * 24 * 60 * 60):
-                if item['Time']:
-                    try:
-                        result_date = datetime.fromtimestamp(float(item['Time']))
-                    except TypeError:
-                        result_date = None
-
-                    if result_date:
-                        if not search_date or result_date > search_date:
-                            title, url = self._get_title_and_url(item)
-                            results.append(classes.Proper(title, url, result_date, self.show))
-
-        return results
-
-    def seedRatio(self):
-        return self.ratio
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False):
-
-        self._checkAuth()
-        self.show = show
-
-        results = {}
-        itemList = []
-
-        searched_scene_season = None
-        for epObj in episodes:
-            # search cache for episode result
-            cacheResult = self.cache.searchCache(epObj, manualSearch)
-            if cacheResult:
-                if epObj.episode not in results:
-                    results[epObj.episode] = cacheResult
-                else:
-                    results[epObj.episode].extend(cacheResult)
-
-                # found result, search next episode
-                continue
-
-            # skip if season already searched
-            if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
-                continue
-
-            # mark season searched for season pack searches so we can skip later on
-            searched_scene_season = epObj.scene_season
-
-            if search_mode == 'sponly':
-                # get season search results
-                for curString in self._get_season_search_strings(epObj):
-                    itemList += self._doSearch(curString, search_mode, len(episodes))
-            else:
-                # get single episode search results
-                for curString in self._get_episode_search_strings(epObj):
-                    itemList += self._doSearch(curString, search_mode, len(episodes))
-
-        # if we found what we needed already from cache then return results and exit
-        if len(results) == len(episodes):
-            return results
-
-        # sort list by quality
-        if len(itemList):
-            items = {}
-            itemsUnknown = []
-            for item in itemList:
-                quality = self.getQuality(item, anime=show.is_anime)
-                if quality == Quality.UNKNOWN:
-                    itemsUnknown += [item]
-                else:
-                    if quality not in items:
-                        items[quality] = [item]
-                    else:
-                        items[quality].append(item)
-
-            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
-            itemList += itemsUnknown if itemsUnknown else []
-
-        # filter results
-        cl = []
-        for item in itemList:
-            (title, url) = self._get_title_and_url(item)
-
-            # parse the file name
-            try:
-                myParser = NameParser(False, convert=True)
-                parse_result = myParser.parse(title)
-            except InvalidNameException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)  # @UndefinedVariable
-                continue
-            except InvalidShowException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
-                continue
-
-            showObj = parse_result.show
-            quality = parse_result.quality
-            release_group = parse_result.release_group
-            version = parse_result.version
-
-            addCacheEntry = False
-            if not (showObj.air_by_date or showObj.sports):
-                if search_mode == 'sponly': 
-                    if len(parse_result.episode_numbers):
-                        logger.log(
-                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    if len(parse_result.episode_numbers) and (
-                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
-                                                                                 ep.scene_episode in parse_result.episode_numbers]):
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                else:
-                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
-                                                                                                     episodes if
-                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
-                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    # we just use the existing info for normal searches
-                    actual_season = parse_result.season_number
-                    actual_episodes = parse_result.episode_numbers
-            else:
-                if not (parse_result.is_air_by_date):
-                    logger.log(
-                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
-                        logger.DEBUG)
-                    addCacheEntry = True
-                else:
-                    airdate = parse_result.air_date.toordinal()
-                    myDB = db.DBConnection()
-                    sql_results = myDB.select(
-                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
-                        [showObj.indexerid, airdate])
-
-                    if len(sql_results) != 1:
-                        logger.log(
-                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
-                            logger.WARNING)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    actual_season = int(sql_results[0]["season"])
-                    actual_episodes = [int(sql_results[0]["episode"])]
-
-            # add parsed result to cache for usage later on
-            if addCacheEntry:
-                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
-                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
-                if ci is not None:
-                    cl.append(ci)
-                continue
-
-            # make sure we want the episode
-            wantEp = True
-            for epNo in actual_episodes:
-                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch):
-                    wantEp = False
-                    break
-
-            if not wantEp:
-                logger.log(
-                    u"Ignoring result " + title + " because we don't want an episode that is " +
-                    Quality.qualityStrings[
-                        quality], logger.DEBUG)
-
-                continue
-
-            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
-
-            # make a result object
-            epObj = []
-            for curEp in actual_episodes:
-                epObj.append(showObj.getEpisode(actual_season, curEp))
-
-            result = self.getResult(epObj)
-            result.show = showObj
-            result.url = url
-            result.name = title
-            result.quality = quality
-            result.release_group = release_group
-            result.version = version
-            result.content = None
-
-            if len(epObj) == 1:
-                epNum = epObj[0].episode
-                logger.log(u"Single episode result.", logger.DEBUG)
-            elif len(epObj) > 1:
-                epNum = MULTI_EP_RESULT
-                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
-                    parse_result.episode_numbers), logger.DEBUG)
-            elif len(epObj) == 0:
-                epNum = SEASON_RESULT
-                logger.log(u"Separating full season result to check for later", logger.DEBUG)
-
-            if epNum not in results:
-                results[epNum] = [result]
-            else:
-                results[epNum].append(result)
-
-        # check if we have items to add to cache
-        if len(cl) > 0:
-            myDB = self.cache._getDB()
-            myDB.mass_action(cl)
-
-        return results
-
-
-class BTNCache(tvcache.TVCache):
-    def __init__(self, provider):
-        tvcache.TVCache.__init__(self, provider)
-
-        # At least 15 minutes between queries
-        self.minTime = 15
-
-    def _getRSSData(self):
-        # Get the torrents uploaded since last check.
-        seconds_since_last_update = math.ceil(time.time() - time.mktime(self._getLastUpdate().timetuple()))
-
-        # default to 15 minutes
-        seconds_minTime = self.minTime * 60
-        if seconds_since_last_update < seconds_minTime:
-            seconds_since_last_update = seconds_minTime
-
-        # Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, older things will need to be done through backlog
-        if seconds_since_last_update > 86400:
-            logger.log(
-                u"The last known successful update on " + self.provider.name + " was more than 24 hours ago, only trying to fetch the last 24 hours!",
-                logger.WARNING)
-            seconds_since_last_update = 86400
-
-        return {'entries': self.provider._doSearch(search_params=None, age=seconds_since_last_update)}
-
-
-provider = BTNProvider()
+# coding=utf-8
+# Author: Daniel Heimans
+# URL: http://code.google.com/p/sickbeard
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import time
+import socket
+import math
+import sickbeard
+import generic
+import itertools
+
+from sickbeard import classes
+from sickbeard import scene_exceptions
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard.helpers import sanitizeSceneName
+from sickbeard.exceptions import ex, AuthException
+from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
+from sickbeard import db
+from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+from sickbeard.common import Quality
+
+from lib import jsonrpclib
+from datetime import datetime
+
+
+class BTNProvider(generic.TorrentProvider):
+    def __init__(self):
+        generic.TorrentProvider.__init__(self, "BTN")
+
+        self.supportsBacklog = True
+        self.supportsAbsoluteNumbering = True
+
+        self.enabled = False
+        self.api_key = None
+        self.ratio = None
+
+        self.cache = BTNCache(self)
+
+        self.urls = {'base_url': "http://api.btnapps.net"}
+
+
+        self.url = self.urls['base_url']
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'btn.png'
+
+    def _checkAuth(self):
+        if not self.api_key:
+            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
+
+        return True
+
+    def _checkAuthFromData(self, parsedJSON):
+
+        if parsedJSON is None:
+            return self._checkAuth()
+
+        if 'api-error' in parsedJSON:
+            logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['api-error'],
+                       logger.DEBUG)
+            raise AuthException(
+                "Your authentication credentials for " + self.name + " are incorrect, check your config.")
+
+        return True
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        self._checkAuth()
+
+        results = []
+        params = {}
+        apikey = self.api_key
+
+        # age in seconds
+        if age:
+            params['age'] = "<=" + str(int(age))
+
+        if search_params:
+            params.update(search_params)
+
+        parsedJSON = self._api_call(apikey, params)
+        if not parsedJSON:
+            logger.log(u"No data returned from " + self.name, logger.ERROR)
+            return results
+
+        if self._checkAuthFromData(parsedJSON):
+
+            if 'torrents' in parsedJSON:
+                found_torrents = parsedJSON['torrents']
+            else:
+                found_torrents = {}
+
+            # We got something, we know the API sends max 1000 results at a time.
+            # See if there are more than 1000 results for our query, if not we
+            # keep requesting until we've got everything.
+            # max 150 requests per hour so limit at that. Scan every 15 minutes. 60 / 15 = 4.
+            max_pages = 150
+            results_per_page = 1000
+
+            if 'results' in parsedJSON and int(parsedJSON['results']) >= results_per_page:
+                pages_needed = int(math.ceil(int(parsedJSON['results']) / results_per_page))
+                if pages_needed > max_pages:
+                    pages_needed = max_pages
+
+                # +1 because range(1,4) = 1, 2, 3
+                for page in range(1, pages_needed + 1):
+                    parsedJSON = self._api_call(apikey, params, results_per_page, page * results_per_page)
+                    # Note that this these are individual requests and might time out individually. This would result in 'gaps'
+                    # in the results. There is no way to fix this though.
+                    if 'torrents' in parsedJSON:
+                        found_torrents.update(parsedJSON['torrents'])
+
+            for torrentid, torrent_info in found_torrents.iteritems():
+                (title, url) = self._get_title_and_url(torrent_info)
+
+                if title and url:
+                    results.append(torrent_info)
+
+        return results
+
+    def _api_call(self, apikey, params={}, results_per_page=1000, offset=0):
+
+        server = jsonrpclib.Server(self.url)
+        parsedJSON = {}
+
+        try:
+            parsedJSON = server.getTorrents(apikey, params, int(results_per_page), int(offset))
+
+        except jsonrpclib.jsonrpc.ProtocolError, error:
+            logger.log(u"JSON-RPC protocol error while accessing " + self.name + ": " + ex(error), logger.ERROR)
+            parsedJSON = {'api-error': ex(error)}
+            return parsedJSON
+
+        except socket.timeout:
+            logger.log(u"Timeout while accessing " + self.name, logger.WARNING)
+
+        except socket.error, error:
+            # Note that sometimes timeouts are thrown as socket errors
+            logger.log(u"Socket error while accessing " + self.name + ": " + error[1], logger.ERROR)
+
+        except Exception, error:
+            errorstring = str(error)
+            if (errorstring.startswith('<') and errorstring.endswith('>')):
+                errorstring = errorstring[1:-1]
+            logger.log(u"Unknown error while accessing " + self.name + ": " + errorstring, logger.ERROR)
+
+        return parsedJSON
+
+    def _get_title_and_url(self, parsedJSON):
+
+        # The BTN API gives a lot of information in response,
+        # however SickRage is built mostly around Scene or
+        # release names, which is why we are using them here.
+
+        if 'ReleaseName' in parsedJSON and parsedJSON['ReleaseName']:
+            title = parsedJSON['ReleaseName']
+
+        else:
+            # If we don't have a release name we need to get creative
+            title = u''
+            if 'Series' in parsedJSON:
+                title += parsedJSON['Series']
+            if 'GroupName' in parsedJSON:
+                title += '.' + parsedJSON['GroupName'] if title else parsedJSON['GroupName']
+            if 'Resolution' in parsedJSON:
+                title += '.' + parsedJSON['Resolution'] if title else parsedJSON['Resolution']
+            if 'Source' in parsedJSON:
+                title += '.' + parsedJSON['Source'] if title else parsedJSON['Source']
+            if 'Codec' in parsedJSON:
+                title += '.' + parsedJSON['Codec'] if title else parsedJSON['Codec']
+            if title:
+                title = title.replace(' ', '.')
+
+        url = None
+        if 'DownloadURL' in parsedJSON:
+            url = parsedJSON['DownloadURL']
+            if url:
+                # unescaped / is valid in JSON, but it can be escaped
+                url = url.replace("\\/", "/")
+
+        return (title, url)
+
+    def _get_season_search_strings(self, ep_obj):
+        search_params = []
+        current_params = {'category': 'Season'}
+
+        # Search for entire seasons: no need to do special things for air by date or sports shows
+        if ep_obj.show.air_by_date or ep_obj.show.sports:
+            # Search for the year of the air by date show
+            current_params['name'] = str(ep_obj.airdate).split('-')[0]
+        elif ep_obj.show.is_anime:
+            current_params['name'] = "%d" % ep_obj.scene_absolute_number
+        else:
+            current_params['name'] = 'Season ' + str(ep_obj.scene_season)
+
+        # search
+        if ep_obj.show.indexer == 1:
+            current_params['tvdb'] = ep_obj.show.indexerid
+            search_params.append(current_params)
+        elif ep_obj.show.indexer == 2:
+            current_params['tvrage'] = ep_obj.show.indexerid
+            search_params.append(current_params)
+        else:
+            name_exceptions = list(
+                set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
+            for name in name_exceptions:
+                # Search by name if we don't have tvdb or tvrage id
+                current_params['series'] = sanitizeSceneName(name)
+                search_params.append(current_params)
+
+        return search_params
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        if not ep_obj:
+            return [{}]
+
+        to_return = []
+        search_params = {'category': 'Episode'}
+
+        # episode
+        if ep_obj.show.air_by_date or ep_obj.show.sports:
+            date_str = str(ep_obj.airdate)
+
+            # BTN uses dots in dates, we just search for the date since that
+            # combined with the series identifier should result in just one episode
+            search_params['name'] = date_str.replace('-', '.')
+        elif ep_obj.show.anime:
+            search_params['name'] = "%i" % int(ep_obj.scene_absolute_number)
+        else:
+            # Do a general name search for the episode, formatted like SXXEYY
+            search_params['name'] = "S%02dE%02d" % (ep_obj.scene_season, ep_obj.scene_episode)
+
+        # search
+        if ep_obj.show.indexer == 1:
+            search_params['tvdb'] = ep_obj.show.indexerid
+            to_return.append(search_params)
+        elif ep_obj.show.indexer == 2:
+            search_params['tvrage'] = ep_obj.show.indexerid
+            to_return.append(search_params)
+        else:
+            # add new query string for every exception
+            name_exceptions = list(
+                set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
+            for cur_exception in name_exceptions:
+                search_params['series'] = sanitizeSceneName(cur_exception)
+                to_return.append(search_params)
+
+        return to_return
+
+    def _doGeneralSearch(self, search_string):
+        # 'search' looks as broad is it can find. Can contain episode overview and title for example,
+        # use with caution!
+        return self._doSearch({'search': search_string})
+
+    def findPropers(self, search_date=None):
+        results = []
+
+        search_terms = ['%.proper.%', '%.repack.%']
+
+        for term in search_terms:
+            for item in self._doSearch({'release': term}, age=4 * 24 * 60 * 60):
+                if item['Time']:
+                    try:
+                        result_date = datetime.fromtimestamp(float(item['Time']))
+                    except TypeError:
+                        result_date = None
+
+                    if result_date:
+                        if not search_date or result_date > search_date:
+                            title, url = self._get_title_and_url(item)
+                            results.append(classes.Proper(title, url, result_date, self.show))
+
+        return results
+
+    def seedRatio(self):
+        return self.ratio
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+
+        self._checkAuth()
+        self.show = show
+
+        results = {}
+        itemList = []
+
+        searched_scene_season = None
+        for epObj in episodes:
+            # search cache for episode result
+            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
+            if cacheResult:
+                if epObj.episode not in results:
+                    results[epObj.episode] = cacheResult
+                else:
+                    results[epObj.episode].extend(cacheResult)
+
+                # found result, search next episode
+                continue
+
+            # skip if season already searched
+            if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
+                continue
+
+            # mark season searched for season pack searches so we can skip later on
+            searched_scene_season = epObj.scene_season
+
+            if search_mode == 'sponly':
+                # get season search results
+                for curString in self._get_season_search_strings(epObj):
+                    itemList += self._doSearch(curString, search_mode, len(episodes))
+            else:
+                # get single episode search results
+                for curString in self._get_episode_search_strings(epObj):
+                    itemList += self._doSearch(curString, search_mode, len(episodes))
+
+        # if we found what we needed already from cache then return results and exit
+        if len(results) == len(episodes):
+            return results
+
+        # sort list by quality
+        if len(itemList):
+            items = {}
+            itemsUnknown = []
+            for item in itemList:
+                quality = self.getQuality(item, anime=show.is_anime)
+                if quality == Quality.UNKNOWN:
+                    itemsUnknown += [item]
+                else:
+                    if quality not in items:
+                        items[quality] = [item]
+                    else:
+                        items[quality].append(item)
+
+            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
+            itemList += itemsUnknown if itemsUnknown else []
+
+        # filter results
+        cl = []
+        for item in itemList:
+            (title, url) = self._get_title_and_url(item)
+
+            # parse the file name
+            try:
+                myParser = NameParser(False, convert=True)
+                parse_result = myParser.parse(title)
+            except InvalidNameException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)  # @UndefinedVariable
+                continue
+            except InvalidShowException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
+                continue
+
+            showObj = parse_result.show
+            quality = parse_result.quality
+            release_group = parse_result.release_group
+            version = parse_result.version
+
+            addCacheEntry = False
+            if not (showObj.air_by_date or showObj.sports):
+                if search_mode == 'sponly': 
+                    if len(parse_result.episode_numbers):
+                        logger.log(
+                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    if len(parse_result.episode_numbers) and (
+                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
+                                                                                 ep.scene_episode in parse_result.episode_numbers]):
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                else:
+                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
+                                                                                                     episodes if
+                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
+                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    # we just use the existing info for normal searches
+                    actual_season = parse_result.season_number
+                    actual_episodes = parse_result.episode_numbers
+            else:
+                if not (parse_result.is_air_by_date):
+                    logger.log(
+                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
+                        logger.DEBUG)
+                    addCacheEntry = True
+                else:
+                    airdate = parse_result.air_date.toordinal()
+                    myDB = db.DBConnection()
+                    sql_results = myDB.select(
+                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
+                        [showObj.indexerid, airdate])
+
+                    if len(sql_results) != 1:
+                        logger.log(
+                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
+                            logger.WARNING)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    actual_season = int(sql_results[0]["season"])
+                    actual_episodes = [int(sql_results[0]["episode"])]
+
+            # add parsed result to cache for usage later on
+            if addCacheEntry:
+                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
+                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
+                if ci is not None:
+                    cl.append(ci)
+                continue
+
+            # make sure we want the episode
+            wantEp = True
+            for epNo in actual_episodes:
+                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
+                    wantEp = False
+                    break
+
+            if not wantEp:
+                logger.log(
+                    u"Ignoring result " + title + " because we don't want an episode that is " +
+                    Quality.qualityStrings[
+                        quality], logger.DEBUG)
+
+                continue
+
+            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
+
+            # make a result object
+            epObj = []
+            for curEp in actual_episodes:
+                epObj.append(showObj.getEpisode(actual_season, curEp))
+
+            result = self.getResult(epObj)
+            result.show = showObj
+            result.url = url
+            result.name = title
+            result.quality = quality
+            result.release_group = release_group
+            result.version = version
+            result.content = None
+
+            if len(epObj) == 1:
+                epNum = epObj[0].episode
+                logger.log(u"Single episode result.", logger.DEBUG)
+            elif len(epObj) > 1:
+                epNum = MULTI_EP_RESULT
+                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
+                    parse_result.episode_numbers), logger.DEBUG)
+            elif len(epObj) == 0:
+                epNum = SEASON_RESULT
+                logger.log(u"Separating full season result to check for later", logger.DEBUG)
+
+            if epNum not in results:
+                results[epNum] = [result]
+            else:
+                results[epNum].append(result)
+
+        # check if we have items to add to cache
+        if len(cl) > 0:
+            myDB = self.cache._getDB()
+            myDB.mass_action(cl)
+
+        return results
+
+
+class BTNCache(tvcache.TVCache):
+    def __init__(self, provider):
+        tvcache.TVCache.__init__(self, provider)
+
+        # At least 15 minutes between queries
+        self.minTime = 15
+
+    def _getRSSData(self):
+        # Get the torrents uploaded since last check.
+        seconds_since_last_update = math.ceil(time.time() - time.mktime(self._getLastUpdate().timetuple()))
+
+        # default to 15 minutes
+        seconds_minTime = self.minTime * 60
+        if seconds_since_last_update < seconds_minTime:
+            seconds_since_last_update = seconds_minTime
+
+        # Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, older things will need to be done through backlog
+        if seconds_since_last_update > 86400:
+            logger.log(
+                u"The last known successful update on " + self.provider.name + " was more than 24 hours ago, only trying to fetch the last 24 hours!",
+                logger.WARNING)
+            seconds_since_last_update = 86400
+
+        return {'entries': self.provider._doSearch(search_params=None, age=seconds_since_last_update)}
+
+
+provider = BTNProvider()
diff --git a/sickbeard/providers/ezrss.py b/sickbeard/providers/ezrss.py
index f35a885c902766735095bdeef727c07613c05b4d..d0307c10c7dddbaa83251f9792df8b9e837d79b1 100644
--- a/sickbeard/providers/ezrss.py
+++ b/sickbeard/providers/ezrss.py
@@ -1,178 +1,178 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import urllib
-import re
-
-try:
-    import xml.etree.cElementTree as etree
-except ImportError:
-    import elementtree.ElementTree as etree
-
-import sickbeard
-import generic
-
-from sickbeard.common import Quality
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard import helpers
-
-
-class EZRSSProvider(generic.TorrentProvider):
-    def __init__(self):
-
-        self.urls = {'base_url': 'https://www.ezrss.it/'}
-
-        self.url = self.urls['base_url']
-
-        generic.TorrentProvider.__init__(self, "EZRSS")
-
-        self.supportsBacklog = True
-        self.enabled = False
-        self.ratio = None
-
-        self.cache = EZRSSCache(self)
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'ezrss.png'
-
-    def getQuality(self, item, anime=False):
-
-        try:
-            quality = Quality.sceneQuality(item.filename, anime)
-        except:
-            quality = Quality.UNKNOWN
-
-        return quality
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False):
-
-        self.show = show
-
-        results = {}
-
-        if show.air_by_date or show.sports:
-            logger.log(self.name + u" doesn't support air-by-date or sports backloging because of limitations on their RSS search.",
-                       logger.WARNING)
-            return results
-
-        results = generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch)
-
-        return results
-
-    def _get_season_search_strings(self, ep_obj):
-
-        params = {}
-
-        params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
-
-        if ep_obj.show.air_by_date or ep_obj.show.sports:
-            params['season'] = str(ep_obj.airdate).split('-')[0]
-        elif ep_obj.show.anime:
-            params['season'] = "%d" % ep_obj.scene_absolute_number
-        else:
-            params['season'] = ep_obj.scene_season
-
-        return [params]
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        params = {}
-
-        if not ep_obj:
-            return params
-
-        params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
-
-        if self.show.air_by_date or self.show.sports:
-            params['date'] = str(ep_obj.airdate)
-        elif self.show.anime:
-            params['episode'] = "%i" % int(ep_obj.scene_absolute_number)
-        else:
-            params['season'] = ep_obj.scene_season
-            params['episode'] = ep_obj.scene_episode
-
-        return [params]
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        params = {"mode": "rss"}
-
-        if search_params:
-            params.update(search_params)
-
-        search_url = self.url + 'search/index.php?' + urllib.urlencode(params)
-
-        logger.log(u"Search string: " + search_url, logger.DEBUG)
-
-        results = []
-        for curItem in self.cache.getRSSFeed(search_url, items=['entries'])['entries'] or []:
-
-            (title, url) = self._get_title_and_url(curItem)
-
-            if title and url:
-                logger.log(u"RSS Feed provider: [" + self.name + "] Attempting to add item to cache: " + title, logger.DEBUG)
-                results.append(curItem)
-
-        return results
-
-    def _get_title_and_url(self, item):
-        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)
-
-        try:
-            new_title = self._extract_name_from_filename(item.filename)
-        except:
-            new_title = None
-
-        if new_title:
-            title = new_title
-            logger.log(u"Extracted the name " + title + " from the torrent link", logger.DEBUG)
-
-        return (title, url)
-
-    def _extract_name_from_filename(self, filename):
-        name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
-        logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
-        match = re.match(name_regex, filename, re.I)
-        if match:
-            return match.group(1)
-        return None
-
-    def seedRatio(self):
-        return self.ratio
-
-
-class EZRSSCache(tvcache.TVCache):
-    def __init__(self, provider):
-
-        tvcache.TVCache.__init__(self, provider)
-
-        # only poll EZRSS every 15 minutes max
-        self.minTime = 15
-
-    def _getRSSData(self):
-
-        rss_url = self.provider.url + 'feed/'
-        logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
-
-        return self.getRSSFeed(rss_url)
-
-provider = EZRSSProvider()
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import urllib
+import re
+
+try:
+    import xml.etree.cElementTree as etree
+except ImportError:
+    import elementtree.ElementTree as etree
+
+import sickbeard
+import generic
+
+from sickbeard.common import Quality
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard import helpers
+
+
+class EZRSSProvider(generic.TorrentProvider):
+    def __init__(self):
+
+        self.urls = {'base_url': 'https://www.ezrss.it/'}
+
+        self.url = self.urls['base_url']
+
+        generic.TorrentProvider.__init__(self, "EZRSS")
+
+        self.supportsBacklog = True
+        self.enabled = False
+        self.ratio = None
+
+        self.cache = EZRSSCache(self)
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'ezrss.png'
+
+    def getQuality(self, item, anime=False):
+
+        try:
+            quality = Quality.sceneQuality(item.filename, anime)
+        except:
+            quality = Quality.UNKNOWN
+
+        return quality
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+
+        self.show = show
+
+        results = {}
+
+        if show.air_by_date or show.sports:
+            logger.log(self.name + u" doesn't support air-by-date or sports backloging because of limitations on their RSS search.",
+                       logger.WARNING)
+            return results
+
+        results = generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
+
+        return results
+
+    def _get_season_search_strings(self, ep_obj):
+
+        params = {}
+
+        params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
+
+        if ep_obj.show.air_by_date or ep_obj.show.sports:
+            params['season'] = str(ep_obj.airdate).split('-')[0]
+        elif ep_obj.show.anime:
+            params['season'] = "%d" % ep_obj.scene_absolute_number
+        else:
+            params['season'] = ep_obj.scene_season
+
+        return [params]
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        params = {}
+
+        if not ep_obj:
+            return params
+
+        params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
+
+        if self.show.air_by_date or self.show.sports:
+            params['date'] = str(ep_obj.airdate)
+        elif self.show.anime:
+            params['episode'] = "%i" % int(ep_obj.scene_absolute_number)
+        else:
+            params['season'] = ep_obj.scene_season
+            params['episode'] = ep_obj.scene_episode
+
+        return [params]
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        params = {"mode": "rss"}
+
+        if search_params:
+            params.update(search_params)
+
+        search_url = self.url + 'search/index.php?' + urllib.urlencode(params)
+
+        logger.log(u"Search string: " + search_url, logger.DEBUG)
+
+        results = []
+        for curItem in self.cache.getRSSFeed(search_url, items=['entries'])['entries'] or []:
+
+            (title, url) = self._get_title_and_url(curItem)
+
+            if title and url:
+                logger.log(u"RSS Feed provider: [" + self.name + "] Attempting to add item to cache: " + title, logger.DEBUG)
+                results.append(curItem)
+
+        return results
+
+    def _get_title_and_url(self, item):
+        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)
+
+        try:
+            new_title = self._extract_name_from_filename(item.filename)
+        except:
+            new_title = None
+
+        if new_title:
+            title = new_title
+            logger.log(u"Extracted the name " + title + " from the torrent link", logger.DEBUG)
+
+        return (title, url)
+
+    def _extract_name_from_filename(self, filename):
+        name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
+        logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
+        match = re.match(name_regex, filename, re.I)
+        if match:
+            return match.group(1)
+        return None
+
+    def seedRatio(self):
+        return self.ratio
+
+
+class EZRSSCache(tvcache.TVCache):
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # only poll EZRSS every 15 minutes max
+        self.minTime = 15
+
+    def _getRSSData(self):
+
+        rss_url = self.provider.url + 'feed/'
+        logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
+
+        return self.getRSSFeed(rss_url)
+
+provider = EZRSSProvider()
diff --git a/sickbeard/providers/generic.py b/sickbeard/providers/generic.py
index a835dad70095225e47b6fa55cb07bb62ab4a4fc1..6fbd54df8d210a5d92213940fbe0d6b3d86c2eab 100644
--- a/sickbeard/providers/generic.py
+++ b/sickbeard/providers/generic.py
@@ -1,520 +1,523 @@
-# coding=utf-8
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
-import datetime
-import os
-import re
-import itertools
-import urllib
-
-import sickbeard
-import requests
-
-from sickbeard import helpers, classes, logger, db
-from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
-from sickbeard import tvcache
-from sickbeard import encodingKludge as ek
-from sickbeard.exceptions import ex
-from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-from sickbeard.common import Quality
-
-from hachoir_parser import createParser
-from base64 import b16encode, b32decode
-
-class GenericProvider:
-    NZB = "nzb"
-    TORRENT = "torrent"
-
-    def __init__(self, name):
-        # these need to be set in the subclass
-        self.providerType = None
-        self.name = name
-
-        self.proxy = ProviderProxy()
-        self.urls = {}
-        self.url = ''
-
-        self.show = None
-
-        self.supportsBacklog = False
-        self.supportsAbsoluteNumbering = False
-        self.anime_only = False
-
-        self.search_mode = None
-        self.search_fallback = False
-        self.enable_daily = False
-        self.enable_backlog = False
-
-        self.cache = tvcache.TVCache(self)
-
-        self.session = requests.session()
-
-        self.headers = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': USER_AGENT}
-
-    def getID(self):
-        return GenericProvider.makeID(self.name)
-
-    @staticmethod
-    def makeID(name):
-        return re.sub("[^\w\d_]", "_", name.strip().lower())
-
-    def imageName(self):
-        return self.getID() + '.png'
-
-    def _checkAuth(self):
-        return True
-
-    def _doLogin(self):
-        return True
-
-    def isActive(self):
-        if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
-            return self.isEnabled()
-        elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
-            return self.isEnabled()
-        else:
-            return False
-
-    def isEnabled(self):
-        """
-        This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
-        """
-        return False
-
-    def getResult(self, episodes):
-        """
-        Returns a result of the correct type for this provider
-        """
-
-        if self.providerType == GenericProvider.NZB:
-            result = classes.NZBSearchResult(episodes)
-        elif self.providerType == GenericProvider.TORRENT:
-            result = classes.TorrentSearchResult(episodes)
-        else:
-            result = classes.SearchResult(episodes)
-
-        result.provider = self
-
-        return result
-
-    def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
-        """
-        By default this is just a simple urlopen call but this method should be overridden
-        for providers with special URL requirements (like cookies)
-        """
-
-        # check for auth
-        if not self._doLogin():
-            return
-
-        if self.proxy.isEnabled():
-            self.headers.update({'Referer': self.proxy.getProxyURL()})
-
-        return helpers.getURL(self.proxy._buildURL(url), post_data=post_data, params=params, headers=self.headers, timeout=timeout,
-                              session=self.session, json=json)
-
-    def downloadResult(self, result):
-        """
-        Save the result to disk.
-        """
-
-        # check for auth
-        if not self._doLogin():
-            return False
-
-        if self.providerType == GenericProvider.TORRENT:
-            try:
-                torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
-
-                if len(torrent_hash) == 32:
-                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()
-
-                if not torrent_hash:
-                    logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
-                    return False
-
-                urls = [
-                    'http://torcache.net/torrent/' + torrent_hash + '.torrent',
-                    'http://torrage.com/torrent/' + torrent_hash + '.torrent',
-                    'http://zoink.it/torrent/' + torrent_hash + '.torrent',
-                ]
-            except:
-                urls = [result.url]
-
-            filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
-                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
-        elif self.providerType == GenericProvider.NZB:
-            urls = [result.url]
-
-            filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
-                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
-        else:
-            return
-
-        for url in urls:
-            if helpers.download_file(url, filename, session=self.session):
-                logger.log(u"Downloading a result from " + self.name + " at " + url)
-
-                if self.providerType == GenericProvider.TORRENT:
-                    logger.log(u"Saved magnet link to " + filename, logger.INFO)
-                else:
-                    logger.log(u"Saved result to " + filename, logger.INFO)
-
-                if self._verify_download(filename):
-                    return True
-
-        logger.log(u"Failed to download result", logger.WARNING)
-        return False
-
-    def _verify_download(self, file_name=None):
-        """
-        Checks the saved file to see if it was actually valid, if not then consider the download a failure.
-        """
-
-        # primitive verification of torrents, just make sure we didn't get a text file or something
-        if self.providerType == GenericProvider.TORRENT:
-            try:
-                parser = createParser(file_name)
-                if parser:
-                    mime_type = parser._getMimeType()
-                    try:
-                        parser.stream._input.close()
-                    except:
-                        pass
-                    if mime_type == 'application/x-bittorrent':
-                        return True
-            except Exception as e:
-                logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
-
-            logger.log(u"Result is not a valid torrent file", logger.WARNING)
-            return False
-
-        return True
-
-    def searchRSS(self, episodes):
-        return self.cache.findNeededEpisodes(episodes)
-
-    def getQuality(self, item, anime=False):
-        """
-        Figures out the quality of the given RSS item node
-        
-        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
-        
-        Returns a Quality value obtained from the node's data 
-        """
-        (title, url) = self._get_title_and_url(item)
-        quality = Quality.sceneQuality(title, anime)
-        return quality
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-        return []
-
-    def _get_season_search_strings(self, episode):
-        return []
-
-    def _get_episode_search_strings(self, eb_obj, add_string=''):
-        return []
-
-    def _get_title_and_url(self, item):
-        """
-        Retrieves the title and URL data from the item XML node
-
-        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
-
-        Returns: A tuple containing two strings representing title and URL respectively
-        """
-
-        title = item.get('title')
-        if title:
-            title = u'' + title.replace(' ', '.')
-
-        url = item.get('link')
-        if url:
-            url = url.replace('&amp;', '&')
-
-        return title, url
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False):
-
-        self._checkAuth()
-        self.show = show
-
-        results = {}
-        itemList = []
-
-        searched_scene_season = None
-        for epObj in episodes:
-            # search cache for episode result
-            cacheResult = self.cache.searchCache(epObj, manualSearch)
-            if cacheResult:
-                if epObj.episode not in results:
-                    results[epObj.episode] = cacheResult
-                else:
-                    results[epObj.episode].extend(cacheResult)
-
-                # found result, search next episode
-                continue
-
-            # skip if season already searched
-            if len(episodes) > 1 and searched_scene_season == epObj.scene_season:
-                continue
-
-            # mark season searched for season pack searches so we can skip later on
-            searched_scene_season = epObj.scene_season
-
-            if len(episodes) > 1:
-                # get season search results
-                for curString in self._get_season_search_strings(epObj):
-                    itemList += self._doSearch(curString, search_mode, len(episodes))
-            else:
-                # get single episode search results
-                for curString in self._get_episode_search_strings(epObj):
-                    itemList += self._doSearch(curString, 'eponly', len(episodes))
-
-        # if we found what we needed already from cache then return results and exit
-        if len(results) == len(episodes):
-            return results
-
-        # sort list by quality
-        if len(itemList):
-            items = {}
-            itemsUnknown = []
-            for item in itemList:
-                quality = self.getQuality(item, anime=show.is_anime)
-                if quality == Quality.UNKNOWN:
-                    itemsUnknown += [item]
-                else:
-                    if quality not in items:
-                        items[quality] = [item]
-                    else:
-                        items[quality].append(item)
-
-            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
-            itemList += itemsUnknown if itemsUnknown else []
-
-        # filter results
-        cl = []
-        for item in itemList:
-            (title, url) = self._get_title_and_url(item)
-
-            # parse the file name
-            try:
-                myParser = NameParser(False, convert=True)
-                parse_result = myParser.parse(title)
-            except InvalidNameException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
-                continue
-            except InvalidShowException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
-                continue
-
-            showObj = parse_result.show
-            quality = parse_result.quality
-            release_group = parse_result.release_group
-            version = parse_result.version
-
-            addCacheEntry = False
-            if not (showObj.air_by_date or showObj.sports):
-                if search_mode == 'sponly': 
-                    if len(parse_result.episode_numbers):
-                        logger.log(
-                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    if len(parse_result.episode_numbers) and (
-                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
-                                                                                 ep.scene_episode in parse_result.episode_numbers]):
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                else:
-                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
-                                                                                                     episodes if
-                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
-                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    # we just use the existing info for normal searches
-                    actual_season = parse_result.season_number
-                    actual_episodes = parse_result.episode_numbers
-            else:
-                if not (parse_result.is_air_by_date):
-                    logger.log(
-                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
-                        logger.DEBUG)
-                    addCacheEntry = True
-                else:
-                    airdate = parse_result.air_date.toordinal()
-                    myDB = db.DBConnection()
-                    sql_results = myDB.select(
-                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
-                        [showObj.indexerid, airdate])
-
-                    if len(sql_results) != 1:
-                        logger.log(
-                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
-                            logger.WARNING)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    actual_season = int(sql_results[0]["season"])
-                    actual_episodes = [int(sql_results[0]["episode"])]
-
-            # add parsed result to cache for usage later on
-            if addCacheEntry:
-                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
-                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
-                if ci is not None:
-                    cl.append(ci)
-                continue
-
-            # make sure we want the episode
-            wantEp = True
-            for epNo in actual_episodes:
-                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch):
-                    wantEp = False
-                    break
-
-            if not wantEp:
-                logger.log(
-                    u"Ignoring result " + title + " because we don't want an episode that is " +
-                    Quality.qualityStrings[
-                        quality], logger.DEBUG)
-
-                continue
-
-            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
-
-            # make a result object
-            epObj = []
-            for curEp in actual_episodes:
-                epObj.append(showObj.getEpisode(actual_season, curEp))
-
-            result = self.getResult(epObj)
-            result.show = showObj
-            result.url = url
-            result.name = title
-            result.quality = quality
-            result.release_group = release_group
-            result.version = version
-            result.content = None
-
-            if len(epObj) == 1:
-                epNum = epObj[0].episode
-                logger.log(u"Single episode result.", logger.DEBUG)
-            elif len(epObj) > 1:
-                epNum = MULTI_EP_RESULT
-                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
-                    parse_result.episode_numbers), logger.DEBUG)
-            elif len(epObj) == 0:
-                epNum = SEASON_RESULT
-                logger.log(u"Separating full season result to check for later", logger.DEBUG)
-
-            if epNum not in results:
-                results[epNum] = [result]
-            else:
-                results[epNum].append(result)
-
-        # check if we have items to add to cache
-        if len(cl) > 0:
-            myDB = self.cache._getDB()
-            myDB.mass_action(cl)
-
-        return results
-
-    def findPropers(self, search_date=None):
-
-        results = self.cache.listPropers(search_date)
-
-        return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
-                results]
-
-    def seedRatio(self):
-        '''
-        Provider should override this value if custom seed ratio enabled
-        It should return the value of the provider seed ratio
-        '''
-        return ''
-
-
-class NZBProvider(GenericProvider):
-    def __init__(self, name):
-        GenericProvider.__init__(self, name)
-
-        self.providerType = GenericProvider.NZB
-
-
-class TorrentProvider(GenericProvider):
-    def __init__(self, name):
-        GenericProvider.__init__(self, name)
-
-        self.providerType = GenericProvider.TORRENT
-
-class ProviderProxy:
-    def __init__(self):
-        self.Type = 'GlypeProxy'
-        self.param = 'browse.php?u='
-        self.option = '&b=32&f=norefer'
-        self.enabled = False
-        self.url = None
-
-        self.urls = {
-            'getprivate.eu (NL)': 'http://getprivate.eu/',
-            'hideme.nl (NL)': 'http://hideme.nl/',
-            'proxite.eu (DE)': 'http://proxite.eu/',
-            'interproxy.net (EU)': 'http://interproxy.net/',
-        }
-
-    def isEnabled(self):
-        """ Return True if we Choose to call TPB via Proxy """
-        return self.enabled
-
-    def getProxyURL(self):
-        """ Return the Proxy URL Choosen via Provider Setting """
-        return str(self.url)
-
-    def _buildURL(self, url):
-        """ Return the Proxyfied URL of the page """
-        if self.isEnabled():
-            url = self.getProxyURL() + self.param + urllib.quote_plus(url.encode('UTF-8')) + self.option
-            logger.log(u"Proxified URL: " + url, logger.DEBUG)
-
-        return url
-
-    def _buildRE(self, regx):
-        """ Return the Proxyfied RE string """
-        if self.isEnabled():
-            regx = re.sub('//1', self.option, regx).replace('&', '&amp;')
-            logger.log(u"Proxified REGEX: " + regx, logger.DEBUG)
-        else:
-            regx = re.sub('//1', '', regx)
-
-        return regx
+# coding=utf-8
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import with_statement
+
+import datetime
+import os
+import re
+import itertools
+import urllib
+
+import sickbeard
+import requests
+
+from sickbeard import helpers, classes, logger, db
+from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
+from sickbeard import tvcache
+from sickbeard import encodingKludge as ek
+from sickbeard.exceptions import ex
+from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+from sickbeard.common import Quality
+
+from hachoir_parser import createParser
+from base64 import b16encode, b32decode
+
+class GenericProvider:
+    NZB = "nzb"
+    TORRENT = "torrent"
+
+    def __init__(self, name):
+        # these need to be set in the subclass
+        self.providerType = None
+        self.name = name
+
+        self.proxy = ProviderProxy()
+        self.proxyGlypeProxySSLwarning = None
+        self.urls = {}
+        self.url = ''
+
+        self.show = None
+
+        self.supportsBacklog = False
+        self.supportsAbsoluteNumbering = False
+        self.anime_only = False
+
+        self.search_mode = None
+        self.search_fallback = False
+        self.enable_daily = False
+        self.enable_backlog = False
+
+        self.cache = tvcache.TVCache(self)
+
+        self.session = requests.session()
+
+        self.headers = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': USER_AGENT}
+
+    def getID(self):
+        return GenericProvider.makeID(self.name)
+
+    @staticmethod
+    def makeID(name):
+        return re.sub("[^\w\d_]", "_", name.strip().lower())
+
+    def imageName(self):
+        return self.getID() + '.png'
+
+    def _checkAuth(self):
+        return True
+
+    def _doLogin(self):
+        return True
+
+    def isActive(self):
+        if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
+            return self.isEnabled()
+        elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
+            return self.isEnabled()
+        else:
+            return False
+
+    def isEnabled(self):
+        """
+        This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
+        """
+        return False
+
+    def getResult(self, episodes):
+        """
+        Returns a result of the correct type for this provider
+        """
+
+        if self.providerType == GenericProvider.NZB:
+            result = classes.NZBSearchResult(episodes)
+        elif self.providerType == GenericProvider.TORRENT:
+            result = classes.TorrentSearchResult(episodes)
+        else:
+            result = classes.SearchResult(episodes)
+
+        result.provider = self
+
+        return result
+
+    def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
+        """
+        By default this is just a simple urlopen call but this method should be overridden
+        for providers with special URL requirements (like cookies)
+        """
+
+        # check for auth
+        if not self._doLogin():
+            return
+
+        if self.proxy.isEnabled():
+            self.headers.update({'Referer': self.proxy.getProxyURL()})
+            # GlypeProxy SSL warning message
+            self.proxyGlypeProxySSLwarning = self.proxy.getProxyURL() + 'includes/process.php?action=sslagree&submit=Continue anyway...'
+
+        return helpers.getURL(self.proxy._buildURL(url), post_data=post_data, params=params, headers=self.headers, timeout=timeout,
+                              session=self.session, json=json, proxyGlypeProxySSLwarning=self.proxyGlypeProxySSLwarning)
+
+    def downloadResult(self, result):
+        """
+        Save the result to disk.
+        """
+
+        # check for auth
+        if not self._doLogin():
+            return False
+
+        if self.providerType == GenericProvider.TORRENT:
+            try:
+                torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
+
+                if len(torrent_hash) == 32:
+                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()
+
+                if not torrent_hash:
+                    logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
+                    return False
+
+                urls = [
+                    'http://torcache.net/torrent/' + torrent_hash + '.torrent',
+                    'http://torrage.com/torrent/' + torrent_hash + '.torrent',
+                    'http://zoink.it/torrent/' + torrent_hash + '.torrent',
+                ]
+            except:
+                urls = [result.url]
+
+            filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
+                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
+        elif self.providerType == GenericProvider.NZB:
+            urls = [result.url]
+
+            filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
+                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
+        else:
+            return
+
+        for url in urls:
+            if helpers.download_file(url, filename, session=self.session):
+                logger.log(u"Downloading a result from " + self.name + " at " + url)
+
+                if self.providerType == GenericProvider.TORRENT:
+                    logger.log(u"Saved magnet link to " + filename, logger.INFO)
+                else:
+                    logger.log(u"Saved result to " + filename, logger.INFO)
+
+                if self._verify_download(filename):
+                    return True
+
+        logger.log(u"Failed to download result", logger.WARNING)
+        return False
+
+    def _verify_download(self, file_name=None):
+        """
+        Checks the saved file to see if it was actually valid, if not then consider the download a failure.
+        """
+
+        # primitive verification of torrents, just make sure we didn't get a text file or something
+        if self.providerType == GenericProvider.TORRENT:
+            try:
+                parser = createParser(file_name)
+                if parser:
+                    mime_type = parser._getMimeType()
+                    try:
+                        parser.stream._input.close()
+                    except:
+                        pass
+                    if mime_type == 'application/x-bittorrent':
+                        return True
+            except Exception as e:
+                logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
+
+            logger.log(u"Result is not a valid torrent file", logger.WARNING)
+            return False
+
+        return True
+
+    def searchRSS(self, episodes):
+        return self.cache.findNeededEpisodes(episodes)
+
+    def getQuality(self, item, anime=False):
+        """
+        Figures out the quality of the given RSS item node
+        
+        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
+        
+        Returns a Quality value obtained from the node's data 
+        """
+        (title, url) = self._get_title_and_url(item)
+        quality = Quality.sceneQuality(title, anime)
+        return quality
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+        return []
+
+    def _get_season_search_strings(self, episode):
+        return []
+
+    def _get_episode_search_strings(self, eb_obj, add_string=''):
+        return []
+
+    def _get_title_and_url(self, item):
+        """
+        Retrieves the title and URL data from the item XML node
+
+        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
+
+        Returns: A tuple containing two strings representing title and URL respectively
+        """
+
+        title = item.get('title')
+        if title:
+            title = u'' + title.replace(' ', '.')
+
+        url = item.get('link')
+        if url:
+            url = url.replace('&amp;', '&')
+
+        return title, url
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+
+        self._checkAuth()
+        self.show = show
+
+        results = {}
+        itemList = []
+
+        searched_scene_season = None
+        for epObj in episodes:
+            # search cache for episode result
+            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
+            if cacheResult:
+                if epObj.episode not in results:
+                    results[epObj.episode] = cacheResult
+                else:
+                    results[epObj.episode].extend(cacheResult)
+
+                # found result, search next episode
+                continue
+
+            # skip if season already searched
+            if len(episodes) > 1 and searched_scene_season == epObj.scene_season:
+                continue
+
+            # mark season searched for season pack searches so we can skip later on
+            searched_scene_season = epObj.scene_season
+
+            if len(episodes) > 1:
+                # get season search results
+                for curString in self._get_season_search_strings(epObj):
+                    itemList += self._doSearch(curString, search_mode, len(episodes))
+            else:
+                # get single episode search results
+                for curString in self._get_episode_search_strings(epObj):
+                    itemList += self._doSearch(curString, 'eponly', len(episodes))
+
+        # if we found what we needed already from cache then return results and exit
+        if len(results) == len(episodes):
+            return results
+
+        # sort list by quality
+        if len(itemList):
+            items = {}
+            itemsUnknown = []
+            for item in itemList:
+                quality = self.getQuality(item, anime=show.is_anime)
+                if quality == Quality.UNKNOWN:
+                    itemsUnknown += [item]
+                else:
+                    if quality not in items:
+                        items[quality] = [item]
+                    else:
+                        items[quality].append(item)
+
+            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
+            itemList += itemsUnknown if itemsUnknown else []
+
+        # filter results
+        cl = []
+        for item in itemList:
+            (title, url) = self._get_title_and_url(item)
+
+            # parse the file name
+            try:
+                myParser = NameParser(False, convert=True)
+                parse_result = myParser.parse(title)
+            except InvalidNameException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
+                continue
+            except InvalidShowException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
+                continue
+
+            showObj = parse_result.show
+            quality = parse_result.quality
+            release_group = parse_result.release_group
+            version = parse_result.version
+
+            addCacheEntry = False
+            if not (showObj.air_by_date or showObj.sports):
+                if search_mode == 'sponly': 
+                    if len(parse_result.episode_numbers):
+                        logger.log(
+                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    if len(parse_result.episode_numbers) and (
+                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
+                                                                                 ep.scene_episode in parse_result.episode_numbers]):
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                else:
+                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
+                                                                                                     episodes if
+                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
+                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    # we just use the existing info for normal searches
+                    actual_season = parse_result.season_number
+                    actual_episodes = parse_result.episode_numbers
+            else:
+                if not (parse_result.is_air_by_date):
+                    logger.log(
+                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
+                        logger.DEBUG)
+                    addCacheEntry = True
+                else:
+                    airdate = parse_result.air_date.toordinal()
+                    myDB = db.DBConnection()
+                    sql_results = myDB.select(
+                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
+                        [showObj.indexerid, airdate])
+
+                    if len(sql_results) != 1:
+                        logger.log(
+                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
+                            logger.WARNING)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    actual_season = int(sql_results[0]["season"])
+                    actual_episodes = [int(sql_results[0]["episode"])]
+
+            # add parsed result to cache for usage later on
+            if addCacheEntry:
+                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
+                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
+                if ci is not None:
+                    cl.append(ci)
+                continue
+
+            # make sure we want the episode
+            wantEp = True
+            for epNo in actual_episodes:
+                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
+                    wantEp = False
+                    break
+
+            if not wantEp:
+                logger.log(
+                    u"Ignoring result " + title + " because we don't want an episode that is " +
+                    Quality.qualityStrings[
+                        quality], logger.DEBUG)
+
+                continue
+
+            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
+
+            # make a result object
+            epObj = []
+            for curEp in actual_episodes:
+                epObj.append(showObj.getEpisode(actual_season, curEp))
+
+            result = self.getResult(epObj)
+            result.show = showObj
+            result.url = url
+            result.name = title
+            result.quality = quality
+            result.release_group = release_group
+            result.version = version
+            result.content = None
+
+            if len(epObj) == 1:
+                epNum = epObj[0].episode
+                logger.log(u"Single episode result.", logger.DEBUG)
+            elif len(epObj) > 1:
+                epNum = MULTI_EP_RESULT
+                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
+                    parse_result.episode_numbers), logger.DEBUG)
+            elif len(epObj) == 0:
+                epNum = SEASON_RESULT
+                logger.log(u"Separating full season result to check for later", logger.DEBUG)
+
+            if epNum not in results:
+                results[epNum] = [result]
+            else:
+                results[epNum].append(result)
+
+        # check if we have items to add to cache
+        if len(cl) > 0:
+            myDB = self.cache._getDB()
+            myDB.mass_action(cl)
+
+        return results
+
+    def findPropers(self, search_date=None):
+
+        results = self.cache.listPropers(search_date)
+
+        return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
+                results]
+
+    def seedRatio(self):
+        '''
+        Provider should override this value if custom seed ratio enabled
+        It should return the value of the provider seed ratio
+        '''
+        return ''
+
+
+class NZBProvider(GenericProvider):
+    def __init__(self, name):
+        GenericProvider.__init__(self, name)
+
+        self.providerType = GenericProvider.NZB
+
+
+class TorrentProvider(GenericProvider):
+    def __init__(self, name):
+        GenericProvider.__init__(self, name)
+
+        self.providerType = GenericProvider.TORRENT
+
+class ProviderProxy:
+    def __init__(self):
+        self.Type = 'GlypeProxy'
+        self.param = 'browse.php?u='
+        self.option = '&b=32&f=norefer'
+        self.enabled = False
+        self.url = None
+
+        self.urls = {
+            'getprivate.eu (NL)': 'http://getprivate.eu/',
+            'hideme.nl (NL)': 'http://hideme.nl/',
+            'proxite.eu (DE)': 'http://proxite.eu/',
+            'interproxy.net (EU)': 'http://interproxy.net/',
+        }
+
+    def isEnabled(self):
+        """ Return True if we Choose to call TPB via Proxy """
+        return self.enabled
+
+    def getProxyURL(self):
+        """ Return the Proxy URL Choosen via Provider Setting """
+        return str(self.url)
+
+    def _buildURL(self, url):
+        """ Return the Proxyfied URL of the page """
+        if self.isEnabled():
+            url = self.getProxyURL() + self.param + urllib.quote_plus(url.encode('UTF-8')) + self.option
+            logger.log(u"Proxified URL: " + url, logger.DEBUG)
+
+        return url
+
+    def _buildRE(self, regx):
+        """ Return the Proxyfied RE string """
+        if self.isEnabled():
+            regx = re.sub('//1', self.option, regx).replace('&', '&amp;')
+            logger.log(u"Proxified REGEX: " + regx, logger.DEBUG)
+        else:
+            regx = re.sub('//1', '', regx)
+
+        return regx
diff --git a/sickbeard/providers/hdtorrents.py b/sickbeard/providers/hdtorrents.py
index de2972d16b4a95a5503754f6747386af00f17b98..b5d9857ba015c3fb617e6249d2c5f9750eb7fb0c 100644
--- a/sickbeard/providers/hdtorrents.py
+++ b/sickbeard/providers/hdtorrents.py
@@ -23,6 +23,7 @@ import datetime
 import urlparse
 import sickbeard
 import generic
+import urllib
 from sickbeard.common import Quality, cpu_presets
 from sickbeard import logger
 from sickbeard import tvcache
@@ -190,7 +191,7 @@ class HDTorrentsProvider(generic.TorrentProvider):
                 if search_string == '':
                     continue
                 search_string = str(search_string).replace('.', ' ')
-                searchURL = self.urls['search'] % (search_string, self.categories)
+                searchURL = self.urls['search'] % (urllib.quote(search_string), self.categories)
 
                 logger.log(u"Search string: " + searchURL, logger.DEBUG)
 
@@ -233,7 +234,7 @@ class HDTorrentsProvider(generic.TorrentProvider):
                             continue
 
                         item = title, download_url, id, seeders, leechers
-                        logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
+                        logger.log(u"Found result: " + title.replace(' ','.') + " (" + searchURL + ")", logger.DEBUG)
 
                         items[mode].append(item)
 
@@ -272,7 +273,7 @@ class HDTorrentsProvider(generic.TorrentProvider):
                                 continue
 
                             item = title, download_url, id, seeders, leechers
-                            logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
+                            logger.log(u"Found result: " + title.replace(' ','.') + " (" + searchURL + ")", logger.DEBUG)
 
                             items[mode].append(item)
 
@@ -320,9 +321,15 @@ class HDTorrentsProvider(generic.TorrentProvider):
             if not self.show: continue
             curEp = curshow.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
 
-            searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
+            proper_searchString = self._get_episode_search_strings(curEp, add_string='PROPER')
 
-            for item in self._doSearch(searchString[0]):
+            for item in self._doSearch(proper_searchString[0]):
+                title, url = self._get_title_and_url(item)
+                results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
+                
+            repack_searchString = self._get_episode_search_strings(curEp, add_string='REPACK')
+
+            for item in self._doSearch(repack_searchString[0]):
                 title, url = self._get_title_and_url(item)
                 results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
 
diff --git a/sickbeard/providers/hounddawgs.py b/sickbeard/providers/hounddawgs.py
index ac51d2d8081dd0cd240b452dfebe40f68728a061..8a7f0328cd149d195a7f943217d8bc9b9667a35f 100644
--- a/sickbeard/providers/hounddawgs.py
+++ b/sickbeard/providers/hounddawgs.py
@@ -22,6 +22,7 @@ import datetime
 import urlparse
 import sickbeard
 import generic
+import urllib
 from sickbeard.common import Quality, cpu_presets
 from sickbeard import logger
 from sickbeard import tvcache
@@ -166,7 +167,7 @@ class HoundDawgsProvider(generic.TorrentProvider):
                 #if mode == 'RSS':
                     #searchURL = self.urls['index'] % self.categories
                 #else:
-                searchURL = self.urls['search'] % (search_string, self.categories)
+                searchURL = self.urls['search'] % (urllib.quote(search_string), self.categories)
 
                 logger.log(u"Search string: " + searchURL, logger.DEBUG)
 
@@ -223,7 +224,7 @@ class HoundDawgsProvider(generic.TorrentProvider):
                                 continue
 
                             item = title, download_url
-                            logger.log(u"Found result: " + title + "(" + download_url + ")", logger.DEBUG)
+                            logger.log(u"Found result: " + title.replace(' ','.') + " (" + download_url + ")", logger.DEBUG)
 
                             items[mode].append(item)
 
diff --git a/sickbeard/providers/iptorrents.py b/sickbeard/providers/iptorrents.py
index 21da6aa58cc757f6a37d074198c980aa99e8a428..e9e999bd066e530c1cfa56df2808975b987e07a9 100644
--- a/sickbeard/providers/iptorrents.py
+++ b/sickbeard/providers/iptorrents.py
@@ -1,463 +1,463 @@
-# Author: seedboy
-# URL: https://github.com/seedboy
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import re
-import traceback
-import datetime
-import urlparse
-import itertools
-
-import sickbeard
-import generic
-from sickbeard.common import Quality
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard import db
-from sickbeard import classes
-from sickbeard import helpers
-from sickbeard import show_name_helpers
-from sickbeard.exceptions import ex, AuthException
-from sickbeard import clients
-from lib import requests
-from lib.requests import exceptions
-from sickbeard.bs4_parser import BS4Parser
-from lib.unidecode import unidecode
-from sickbeard.helpers import sanitizeSceneName
-from sickbeard.show_name_helpers import allPossibleShowNames
-from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-
-
-class IPTorrentsProvider(generic.TorrentProvider):
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "IPTorrents")
-
-        self.supportsBacklog = True
-
-        self.enabled = False
-        self.username = None
-        self.password = None
-        self.ratio = None
-        self.freeleech = False
-
-        self.cache = IPTorrentsCache(self)
-
-        self.urls = {'base_url': 'https://iptorrents.eu',
-                'login': 'https://iptorrents.eu/torrents/',
-                'search': 'https://iptorrents.eu/torrents/?%s%s&q=%s&qf=ti',
-        }
-
-        self.url = self.urls['base_url']
-
-        self.categorie = 'l73=1&l78=1&l66=1&l65=1&l79=1&l5=1&l4=1'
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'iptorrents.png'
-
-    def getQuality(self, item, anime=False):
-
-        quality = Quality.sceneQuality(item[0], anime)
-        return quality
-
-    def _checkAuth(self):
-
-        if not self.username or not self.password:
-            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
-
-        return True
-
-    def _doLogin(self):
-
-        login_params = {'username': self.username,
-                        'password': self.password,
-                        'login': 'submit',
-        }
-
-        try:
-            response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
-        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
-            logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
-            return False
-
-        if re.search('tries left', response.text) \
-                or re.search('<title>IPT</title>', response.text) \
-                or response.status_code == 401:
-            logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR)
-            return False
-
-        return True
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        search_string = {'Episode': []}
-
-        if not ep_obj:
-            return []
-
-        if self.show.air_by_date:
-            for show_name in set(allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|')
-                search_string['Episode'].append(ep_string)
-        elif self.show.sports:
-            for show_name in set(allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|') + '|' + \
-                            ep_obj.airdate.strftime('%b')
-                search_string['Episode'].append(ep_string)
-        elif self.show.anime:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            "%i" % int(ep_obj.scene_absolute_number)
-                search_string['Episode'].append(ep_string)
-        else:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
-                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
-                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
-
-                search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
-
-        return [search_string]
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False):
-
-        self._checkAuth()
-        self.show = show
-
-        results = {}
-        itemList = []
-
-        if search_mode == 'sponly':
-            logger.log(u"This provider doesn't support season pack. Consider setting Season search mode to episodes only and unchecked Season search fallback", logger.WARNING)
-            search_mode = 'eponly'
-
-        for epObj in episodes:
-            # search cache for episode result
-            cacheResult = self.cache.searchCache(epObj, manualSearch)
-            if cacheResult:
-                if epObj.episode not in results:
-                    results[epObj.episode] = cacheResult
-                else:
-                    results[epObj.episode].extend(cacheResult)
-
-                # found result, search next episode
-                continue
-
-            for curString in self._get_episode_search_strings(epObj):
-                itemList += self._doSearch(curString, 'eponly', len(episodes))
-
-        # if we found what we needed already from cache then return results and exit
-        if len(results) == len(episodes):
-            return results
-
-        # sort list by quality
-        if len(itemList):
-            items = {}
-            itemsUnknown = []
-            for item in itemList:
-                quality = self.getQuality(item, anime=show.is_anime)
-                if quality == Quality.UNKNOWN:
-                    itemsUnknown += [item]
-                else:
-                    if quality not in items:
-                        items[quality] = [item]
-                    else:
-                        items[quality].append(item)
-
-            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
-            itemList += itemsUnknown if itemsUnknown else []
-
-        # filter results
-        cl = []
-        for item in itemList:
-            (title, url) = self._get_title_and_url(item)
-
-            # parse the file name
-            try:
-                myParser = NameParser(False, convert=True)
-                parse_result = myParser.parse(title)
-            except InvalidNameException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
-                continue
-            except InvalidShowException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
-                continue
-
-            showObj = parse_result.show
-            quality = parse_result.quality
-            release_group = parse_result.release_group
-            version = parse_result.version
-
-            addCacheEntry = False
-            if not (showObj.air_by_date or showObj.sports):
-                if search_mode == 'sponly': 
-                    if len(parse_result.episode_numbers):
-                        logger.log(
-                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    if len(parse_result.episode_numbers) and (
-                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
-                                                                                 ep.scene_episode in parse_result.episode_numbers]):
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                else:
-                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
-                                                                                                     episodes if
-                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
-                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    # we just use the existing info for normal searches
-                    actual_season = parse_result.season_number
-                    actual_episodes = parse_result.episode_numbers
-            else:
-                if not (parse_result.is_air_by_date):
-                    logger.log(
-                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
-                        logger.DEBUG)
-                    addCacheEntry = True
-                else:
-                    airdate = parse_result.air_date.toordinal()
-                    myDB = db.DBConnection()
-                    sql_results = myDB.select(
-                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
-                        [showObj.indexerid, airdate])
-
-                    if len(sql_results) != 1:
-                        logger.log(
-                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
-                            logger.WARNING)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    actual_season = int(sql_results[0]["season"])
-                    actual_episodes = [int(sql_results[0]["episode"])]
-
-            # add parsed result to cache for usage later on
-            if addCacheEntry:
-                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
-                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
-                if ci is not None:
-                    cl.append(ci)
-                continue
-
-            # make sure we want the episode
-            wantEp = True
-            for epNo in actual_episodes:
-                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch):
-                    wantEp = False
-                    break
-
-            if not wantEp:
-                logger.log(
-                    u"Ignoring result " + title + " because we don't want an episode that is " +
-                    Quality.qualityStrings[
-                        quality], logger.DEBUG)
-
-                continue
-
-            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
-
-            # make a result object
-            epObj = []
-            for curEp in actual_episodes:
-                epObj.append(showObj.getEpisode(actual_season, curEp))
-
-            result = self.getResult(epObj)
-            result.show = showObj
-            result.url = url
-            result.name = title
-            result.quality = quality
-            result.release_group = release_group
-            result.version = version
-            result.content = None
-
-            if len(epObj) == 1:
-                epNum = epObj[0].episode
-                logger.log(u"Single episode result.", logger.DEBUG)
-            elif len(epObj) > 1:
-                epNum = MULTI_EP_RESULT
-                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
-                    parse_result.episode_numbers), logger.DEBUG)
-            elif len(epObj) == 0:
-                epNum = SEASON_RESULT
-                logger.log(u"Separating full season result to check for later", logger.DEBUG)
-
-            if epNum not in results:
-                results[epNum] = [result]
-            else:
-                results[epNum].append(result)
-
-        # check if we have items to add to cache
-        if len(cl) > 0:
-            myDB = self.cache._getDB()
-            myDB.mass_action(cl)
-
-        return results
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        results = []
-        items = {'Season': [], 'Episode': [], 'RSS': []}
-
-        freeleech = '&free=on' if self.freeleech else ''
-
-        if not self._doLogin():
-            return results
-
-        for mode in search_params.keys():
-            for search_string in search_params[mode]:
-                if isinstance(search_string, unicode):
-                    search_string = unidecode(search_string)
-
-                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
-                searchURL = self.urls['search'] % (self.categorie, freeleech, search_string)
-                searchURL += ';o=seeders' if mode != 'RSS' else ''
-
-                logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
-
-                data = self.getURL(searchURL)
-                if not data:
-                    continue
-
-                try:
-                    data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
-                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
-                        if not html:
-                            logger.log(u"Invalid HTML data: " + str(data), logger.DEBUG)
-                            continue
-
-                        if html.find(text='No Torrents Found!'):
-                            logger.log(u"No results found for: " + search_string + " (" + searchURL + ")", logger.DEBUG)
-                            continue
-
-                        torrent_table = html.find('table', attrs={'class': 'torrents'})
-                        torrents = torrent_table.find_all('tr') if torrent_table else []
-
-                        #Continue only if one Release is found
-                        if len(torrents) < 2:
-                            logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
-                                       logger.WARNING)
-                            continue
-
-                        for result in torrents[1:]:
-
-                            try:
-                                torrent = result.find_all('td')[1].find('a')
-                                torrent_name = torrent.string
-                                torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href']
-                                torrent_details_url = self.urls['base_url'] + torrent['href']
-                                torrent_seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).string)
-                                ## Not used, perhaps in the future ##
-                                #torrent_id = int(torrent['href'].replace('/details.php?id=', ''))
-                                #torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
-                            except (AttributeError, TypeError):
-                                continue
-
-                            # Filter unseeded torrent and torrents with no name/url
-                            if mode != 'RSS' and torrent_seeders == 0:
-                                continue
-
-                            if not torrent_name or not torrent_download_url:
-                                continue
-
-                            item = torrent_name, torrent_download_url
-                            logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")", logger.DEBUG)
-                            items[mode].append(item)
-
-                except Exception, e:
-                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
-
-            results += items[mode]
-
-        return results
-
-    def _get_title_and_url(self, item):
-
-        title, url = item
-
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        if url:
-            url = str(url).replace('&amp;', '&')
-
-        return (title, url)
-
-    def findPropers(self, search_date=datetime.datetime.today()):
-
-        results = []
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select(
-            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
-            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
-            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
-            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
-            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
-        )
-
-        if not sqlResults:
-            return []
-
-        for sqlshow in sqlResults:
-            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
-            if self.show:
-                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
-                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
-
-                for item in self._doSearch(searchString[0]):
-                    title, url = self._get_title_and_url(item)
-                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
-
-        return results
-
-    def seedRatio(self):
-        return self.ratio
-
-class IPTorrentsCache(tvcache.TVCache):
-    def __init__(self, provider):
-
-        tvcache.TVCache.__init__(self, provider)
-
-        # Only poll IPTorrents every 10 minutes max
-        self.minTime = 10
-
-    def _getRSSData(self):
-        search_params = {'RSS': ['']}
-        return {'entries': self.provider._doSearch(search_params)}
-
-
-provider = IPTorrentsProvider()
+# Author: seedboy
+# URL: https://github.com/seedboy
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import traceback
+import datetime
+import urlparse
+import itertools
+
+import sickbeard
+import generic
+from sickbeard.common import Quality
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard import db
+from sickbeard import classes
+from sickbeard import helpers
+from sickbeard import show_name_helpers
+from sickbeard.exceptions import ex, AuthException
+from sickbeard import clients
+from lib import requests
+from lib.requests import exceptions
+from sickbeard.bs4_parser import BS4Parser
+from lib.unidecode import unidecode
+from sickbeard.helpers import sanitizeSceneName
+from sickbeard.show_name_helpers import allPossibleShowNames
+from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+
+
+class IPTorrentsProvider(generic.TorrentProvider):
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "IPTorrents")
+
+        self.supportsBacklog = True
+
+        self.enabled = False
+        self.username = None
+        self.password = None
+        self.ratio = None
+        self.freeleech = False
+
+        self.cache = IPTorrentsCache(self)
+
+        self.urls = {'base_url': 'https://iptorrents.eu',
+                'login': 'https://iptorrents.eu/torrents/',
+                'search': 'https://iptorrents.eu/torrents/?%s%s&q=%s&qf=ti',
+        }
+
+        self.url = self.urls['base_url']
+
+        self.categorie = 'l73=1&l78=1&l66=1&l65=1&l79=1&l5=1&l4=1'
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'iptorrents.png'
+
+    def getQuality(self, item, anime=False):
+
+        quality = Quality.sceneQuality(item[0], anime)
+        return quality
+
+    def _checkAuth(self):
+
+        if not self.username or not self.password:
+            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
+
+        return True
+
+    def _doLogin(self):
+
+        login_params = {'username': self.username,
+                        'password': self.password,
+                        'login': 'submit',
+        }
+
+        try:
+            response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
+        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
+            logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
+            return False
+
+        if re.search('tries left', response.text) \
+                or re.search('<title>IPT</title>', response.text) \
+                or response.status_code == 401:
+            logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR)
+            return False
+
+        return True
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        search_string = {'Episode': []}
+
+        if not ep_obj:
+            return []
+
+        if self.show.air_by_date:
+            for show_name in set(allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|')
+                search_string['Episode'].append(ep_string)
+        elif self.show.sports:
+            for show_name in set(allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|') + '|' + \
+                            ep_obj.airdate.strftime('%b')
+                search_string['Episode'].append(ep_string)
+        elif self.show.anime:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            "%i" % int(ep_obj.scene_absolute_number)
+                search_string['Episode'].append(ep_string)
+        else:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
+                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
+                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
+
+                search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
+
+        return [search_string]
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+
+        self._checkAuth()
+        self.show = show
+
+        results = {}
+        itemList = []
+
+        if search_mode == 'sponly':
+            logger.log(u"This provider doesn't support season pack. Consider setting Season search mode to episodes only and unchecked Season search fallback", logger.WARNING)
+            search_mode = 'eponly'
+
+        for epObj in episodes:
+            # search cache for episode result
+            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
+            if cacheResult:
+                if epObj.episode not in results:
+                    results[epObj.episode] = cacheResult
+                else:
+                    results[epObj.episode].extend(cacheResult)
+
+                # found result, search next episode
+                continue
+
+            for curString in self._get_episode_search_strings(epObj):
+                itemList += self._doSearch(curString, 'eponly', len(episodes))
+
+        # if we found what we needed already from cache then return results and exit
+        if len(results) == len(episodes):
+            return results
+
+        # sort list by quality
+        if len(itemList):
+            items = {}
+            itemsUnknown = []
+            for item in itemList:
+                quality = self.getQuality(item, anime=show.is_anime)
+                if quality == Quality.UNKNOWN:
+                    itemsUnknown += [item]
+                else:
+                    if quality not in items:
+                        items[quality] = [item]
+                    else:
+                        items[quality].append(item)
+
+            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
+            itemList += itemsUnknown if itemsUnknown else []
+
+        # filter results
+        cl = []
+        for item in itemList:
+            (title, url) = self._get_title_and_url(item)
+
+            # parse the file name
+            try:
+                myParser = NameParser(False, convert=True)
+                parse_result = myParser.parse(title)
+            except InvalidNameException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
+                continue
+            except InvalidShowException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
+                continue
+
+            showObj = parse_result.show
+            quality = parse_result.quality
+            release_group = parse_result.release_group
+            version = parse_result.version
+
+            addCacheEntry = False
+            if not (showObj.air_by_date or showObj.sports):
+                if search_mode == 'sponly': 
+                    if len(parse_result.episode_numbers):
+                        logger.log(
+                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    if len(parse_result.episode_numbers) and (
+                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
+                                                                                 ep.scene_episode in parse_result.episode_numbers]):
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                else:
+                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
+                                                                                                     episodes if
+                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
+                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    # we just use the existing info for normal searches
+                    actual_season = parse_result.season_number
+                    actual_episodes = parse_result.episode_numbers
+            else:
+                if not (parse_result.is_air_by_date):
+                    logger.log(
+                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
+                        logger.DEBUG)
+                    addCacheEntry = True
+                else:
+                    airdate = parse_result.air_date.toordinal()
+                    myDB = db.DBConnection()
+                    sql_results = myDB.select(
+                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
+                        [showObj.indexerid, airdate])
+
+                    if len(sql_results) != 1:
+                        logger.log(
+                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
+                            logger.WARNING)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    actual_season = int(sql_results[0]["season"])
+                    actual_episodes = [int(sql_results[0]["episode"])]
+
+            # add parsed result to cache for usage later on
+            if addCacheEntry:
+                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
+                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
+                if ci is not None:
+                    cl.append(ci)
+                continue
+
+            # make sure we want the episode
+            wantEp = True
+            for epNo in actual_episodes:
+                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
+                    wantEp = False
+                    break
+
+            if not wantEp:
+                logger.log(
+                    u"Ignoring result " + title + " because we don't want an episode that is " +
+                    Quality.qualityStrings[
+                        quality], logger.DEBUG)
+
+                continue
+
+            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
+
+            # make a result object
+            epObj = []
+            for curEp in actual_episodes:
+                epObj.append(showObj.getEpisode(actual_season, curEp))
+
+            result = self.getResult(epObj)
+            result.show = showObj
+            result.url = url
+            result.name = title
+            result.quality = quality
+            result.release_group = release_group
+            result.version = version
+            result.content = None
+
+            if len(epObj) == 1:
+                epNum = epObj[0].episode
+                logger.log(u"Single episode result.", logger.DEBUG)
+            elif len(epObj) > 1:
+                epNum = MULTI_EP_RESULT
+                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
+                    parse_result.episode_numbers), logger.DEBUG)
+            elif len(epObj) == 0:
+                epNum = SEASON_RESULT
+                logger.log(u"Separating full season result to check for later", logger.DEBUG)
+
+            if epNum not in results:
+                results[epNum] = [result]
+            else:
+                results[epNum].append(result)
+
+        # check if we have items to add to cache
+        if len(cl) > 0:
+            myDB = self.cache._getDB()
+            myDB.mass_action(cl)
+
+        return results
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        results = []
+        items = {'Season': [], 'Episode': [], 'RSS': []}
+
+        freeleech = '&free=on' if self.freeleech else ''
+
+        if not self._doLogin():
+            return results
+
+        for mode in search_params.keys():
+            for search_string in search_params[mode]:
+                if isinstance(search_string, unicode):
+                    search_string = unidecode(search_string)
+
+                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
+                searchURL = self.urls['search'] % (self.categorie, freeleech, search_string)
+                searchURL += ';o=seeders' if mode != 'RSS' else ''
+
+                logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
+
+                data = self.getURL(searchURL)
+                if not data:
+                    continue
+
+                try:
+                    data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
+                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
+                        if not html:
+                            logger.log(u"Invalid HTML data: " + str(data), logger.DEBUG)
+                            continue
+
+                        if html.find(text='No Torrents Found!'):
+                            logger.log(u"No results found for: " + search_string + " (" + searchURL + ")", logger.DEBUG)
+                            continue
+
+                        torrent_table = html.find('table', attrs={'class': 'torrents'})
+                        torrents = torrent_table.find_all('tr') if torrent_table else []
+
+                        #Continue only if one Release is found
+                        if len(torrents) < 2:
+                            logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
+                                       logger.WARNING)
+                            continue
+
+                        for result in torrents[1:]:
+
+                            try:
+                                torrent = result.find_all('td')[1].find('a')
+                                torrent_name = torrent.string
+                                torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href']
+                                torrent_details_url = self.urls['base_url'] + torrent['href']
+                                torrent_seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).string)
+                                ## Not used, perhaps in the future ##
+                                #torrent_id = int(torrent['href'].replace('/details.php?id=', ''))
+                                #torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
+                            except (AttributeError, TypeError):
+                                continue
+
+                            # Filter unseeded torrent and torrents with no name/url
+                            if mode != 'RSS' and torrent_seeders == 0:
+                                continue
+
+                            if not torrent_name or not torrent_download_url:
+                                continue
+
+                            item = torrent_name, torrent_download_url
+                            logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")", logger.DEBUG)
+                            items[mode].append(item)
+
+                except Exception, e:
+                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
+
+            results += items[mode]
+
+        return results
+
+    def _get_title_and_url(self, item):
+
+        title, url = item
+
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        if url:
+            url = str(url).replace('&amp;', '&')
+
+        return (title, url)
+
+    def findPropers(self, search_date=datetime.datetime.today()):
+
+        results = []
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select(
+            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
+            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
+            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
+            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
+            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
+        )
+
+        if not sqlResults:
+            return []
+
+        for sqlshow in sqlResults:
+            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
+            if self.show:
+                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
+                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
+
+                for item in self._doSearch(searchString[0]):
+                    title, url = self._get_title_and_url(item)
+                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
+
+        return results
+
+    def seedRatio(self):
+        return self.ratio
+
+class IPTorrentsCache(tvcache.TVCache):
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # Only poll IPTorrents every 10 minutes max
+        self.minTime = 10
+
+    def _getRSSData(self):
+        search_params = {'RSS': ['']}
+        return {'entries': self.provider._doSearch(search_params)}
+
+
+provider = IPTorrentsProvider()
diff --git a/sickbeard/providers/morethantv.py b/sickbeard/providers/morethantv.py
new file mode 100755
index 0000000000000000000000000000000000000000..fa48f69efb427c900c27c171d62e220aace13d6f
--- /dev/null
+++ b/sickbeard/providers/morethantv.py
@@ -0,0 +1,321 @@
+# Author: Seamus Wassman
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+# This file was adapted for MoreThanTV from the freshontv scraper by
+# Sparhawk76, this is my first foray into python, so there most likely
+# are some mistakes or things I could have done better.
+
+import re
+import traceback
+import datetime
+import urlparse
+import sickbeard
+import generic
+from sickbeard.common import Quality, cpu_presets
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard import db
+from sickbeard import classes
+from sickbeard import helpers
+from sickbeard import show_name_helpers
+from sickbeard.exceptions import ex, AuthException
+from sickbeard import clients
+from lib import requests
+from lib.requests import exceptions
+from sickbeard.bs4_parser import BS4Parser
+from lib.unidecode import unidecode
+from sickbeard.helpers import sanitizeSceneName
+
+
+class MoreThanTVProvider(generic.TorrentProvider):
+
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "MoreThanTV")
+
+        self.supportsBacklog = True
+
+        self.enabled = False
+        self._uid = None
+        self._hash = None
+        self.username = None
+        self.password = None
+        self.ratio = None
+        self.minseed = None
+        self.minleech = None
+        self.freeleech = False
+
+        self.cache = MoreThanTVCache(self)
+
+        self.urls = {'base_url': 'http://www.morethan.tv/',
+                'login': 'http://www.morethan.tv/login.php',
+                'detail': 'http://www.morethan.tv/torrents.php?id=%s',
+                'search': 'http://www.morethan.tv/torrents.php?tags_type=1&order_by=time&order_way=desc&action=basic&searchsubmit=1&searchstr=%s',
+                'download': 'http://www.morethan.tv/torrents.php?action=download&id=%s',
+                }
+
+        self.url = self.urls['base_url']
+
+        self.cookies = None
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'morethantv.png'
+
+    def getQuality(self, item, anime=False):
+
+        quality = Quality.sceneQuality(item[0], anime)
+        return quality
+
+    def _checkAuth(self):
+
+        if not self.username or not self.password:
+            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
+
+        return True
+
+    def _doLogin(self):
+        if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
+            return True
+
+        if self._uid and self._hash:
+            requests.utils.add_dict_to_cookiejar(self.session.cookies, self.cookies)
+        else:
+            login_params = {'username': self.username,
+                            'password': self.password,
+                            'login': 'submit'
+            }
+
+            if not self.session:
+                self.session = requests.Session()
+
+            try:
+                response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
+            except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
+                logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
+                return False
+
+            if re.search('Your username or password was incorrect.', response.text):
+                logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
+                return False
+
+            return True
+
+    def _get_season_search_strings(self, ep_obj):
+
+        search_string = {'Season': []}
+        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+            if ep_obj.show.air_by_date or ep_obj.show.sports:
+                ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
+            elif ep_obj.show.anime:
+                ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
+            else:
+                ep_string = show_name + '.S%02d*' % int(ep_obj.scene_season)  #1) showName SXX
+
+            search_string['Season'].append(re.sub('\.', '+', ep_string))
+
+        return [search_string]
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        search_string = {'Episode': []}
+
+        if not ep_obj:
+            return []
+
+        if self.show.air_by_date:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|')
+                search_string['Episode'].append(ep_string)
+        elif self.show.sports:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|') + '|' + \
+                            ep_obj.airdate.strftime('%b')
+                search_string['Episode'].append(ep_string)
+        elif self.show.anime:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            "%i" % int(ep_obj.scene_absolute_number)
+                search_string['Episode'].append(ep_string)
+        else:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
+                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
+                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
+
+		search_string['Episode'].append(re.sub('\.', '+', ep_string))
+
+        return [search_string]
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        results = []
+        items = {'Season': [], 'Episode': [], 'RSS': []}
+
+        freeleech = '3' if self.freeleech else '0'
+
+        if not self._doLogin():
+            return results
+
+        for mode in search_params.keys():
+            for search_string in search_params[mode]:
+
+                if isinstance(search_string, unicode):
+                    search_string = unidecode(search_string)
+
+                searchURL = self.urls['search'] % (search_string)
+
+                logger.log(u"Search string: " + searchURL, logger.DEBUG)
+
+                # returns top 15 results by default, expandable in user profile to 100
+                data = self.getURL(searchURL)
+                if not data:
+                    continue
+
+                try:
+                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
+                        torrent_table = html.find('table', attrs={'class': 'torrent_table'})
+                        torrent_rows = torrent_table.findChildren('tr') if torrent_table else []
+
+                        #Continue only if one Release is found
+                        if len(torrent_rows) < 2:
+                            logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
+                                       logger.DEBUG)
+                            continue
+
+                        # skip colheader
+                        for result in torrent_rows[1:]:
+                            cells = result.findChildren('td')
+
+                            link = cells[1].find('a', attrs = {'title': 'Download'})
+
+                            link_str = str(link['href'])
+
+                            logger.log(u"link=" + link_str, logger.DEBUG)
+
+                            #skip if torrent has been nuked due to poor quality
+                            if cells[1].find('img', alt='Nuked') != None:
+                                continue
+                            torrent_id_long = link['href'].replace('torrents.php?action=download&id=', '')
+                            torrent_id = torrent_id_long.split('&', 1)[0]
+
+
+                            try:
+                                if link.has_key('title'):
+                                    title = cells[1].find('a', {'title': 'View torrent'}).contents[0].strip()
+                                else:
+                                    title = link.contents[0]
+                                download_url = self.urls['download'] % (torrent_id_long)
+
+                                seeders = cells[6].contents[0]
+
+                                leechers = cells[7].contents[0]
+
+                            except (AttributeError, TypeError):
+                                continue
+
+ 
+                            #Filter unseeded torrent
+                            if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
+                                continue
+
+                            if not title or not download_url:
+                                continue
+
+# Debug
+#                            logger.log(u"title = " + title + ", download_url = " + download_url + ", torrent_id = " + torrent_id + ", seeders = " + seeders + ", leechers = " + leechers, logger.DEBUG)
+
+
+                            item = title, download_url, torrent_id, seeders, leechers
+                            logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
+
+                            items[mode].append(item)
+
+                except Exception, e:
+                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
+
+            #For each search mode sort all the items by seeders
+            items[mode].sort(key=lambda tup: tup[3], reverse=True)
+
+            results += items[mode]
+
+        return results
+
+    def _get_title_and_url(self, item):
+
+        title, url, id, seeders, leechers = item
+
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        if url:
+            url = str(url).replace('&amp;', '&')
+
+        return (title, url)
+
+    def findPropers(self, search_date=datetime.datetime.today()):
+
+        results = []
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select(
+            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
+            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
+            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
+            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
+            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
+        )
+
+        if not sqlResults:
+            return []
+
+        for sqlshow in sqlResults:
+            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
+            if self.show:
+                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
+
+                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
+
+                for item in self._doSearch(searchString[0]):
+                    title, url = self._get_title_and_url(item)
+                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
+
+        return results
+
+    def seedRatio(self):
+        return self.ratio
+
+
+class MoreThanTVCache(tvcache.TVCache):
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # poll delay in minutes
+        self.minTime = 20
+
+    def _getRSSData(self):
+        search_params = {'RSS': ['']}
+        return {'entries': self.provider._doSearch(search_params)}
+
+provider = MoreThanTVProvider()
diff --git a/sickbeard/providers/nextgen.py b/sickbeard/providers/nextgen.py
index a8db7dc0414866d69b699c8828ebc9562f8ec49c..c23b93f0034e0841c3735b657958ea602f7a4c5e 100644
--- a/sickbeard/providers/nextgen.py
+++ b/sickbeard/providers/nextgen.py
@@ -197,7 +197,7 @@ class NextGenProvider(generic.TorrentProvider):
 
             for search_string in search_params[mode]:
 
-                searchURL = self.urls['search'] % (search_string, self.categories)
+                searchURL = self.urls['search'] % (urllib.quote(search_string), self.categories)
                 logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
 
                 data = self.getURL(searchURL)
@@ -249,7 +249,7 @@ class NextGenProvider(generic.TorrentProvider):
                                     continue
 
                                 item = torrent_name, torrent_download_url
-                                logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")",
+                                logger.log(u"Found result: " + torrent_name.replace(' ','.') + " (" + torrent_details_url + ")",
                                            logger.DEBUG)
                                 items[mode].append(item)
 
diff --git a/sickbeard/providers/nyaatorrents.py b/sickbeard/providers/nyaatorrents.py
index 464686288cf91666560772d6054714e3b4f86190..5699abe607c3bd3dbbff053e13a93b3b4e0081c7 100644
--- a/sickbeard/providers/nyaatorrents.py
+++ b/sickbeard/providers/nyaatorrents.py
@@ -1,132 +1,132 @@
-# Author: Mr_Orange
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import urllib
-import re
-
-import sickbeard
-import generic
-
-from sickbeard import show_name_helpers
-from sickbeard import logger
-from sickbeard.common import Quality
-from sickbeard import tvcache
-from sickbeard import show_name_helpers
-
-
-class NyaaProvider(generic.TorrentProvider):
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "NyaaTorrents")
-
-        self.supportsBacklog = True
-        self.supportsAbsoluteNumbering = True
-        self.anime_only = True
-        self.enabled = False
-        self.ratio = None
-
-        self.cache = NyaaCache(self)
-
-        self.urls = {'base_url': 'http://www.nyaa.se/'}
-
-        self.url = self.urls['base_url']
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'nyaatorrents.png'
-
-    def getQuality(self, item, anime=False):
-        title = item.get('title')
-        quality = Quality.sceneQuality(title, anime)
-        return quality
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False):
-        return generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch)
-
-    def _get_season_search_strings(self, ep_obj):
-        return show_name_helpers.makeSceneShowSearchStrings(self.show, anime=True)
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-        return self._get_season_search_strings(ep_obj)
-
-    def _doSearch(self, search_string, search_mode='eponly', epcount=0, age=0):
-        if self.show and not self.show.is_anime:
-            logger.log(u"" + str(self.show.name) + " is not an anime skiping " + str(self.name))
-            return []
-
-        params = {
-            "term": search_string.encode('utf-8'),
-            "cats": '1_37',  # Limit to English-translated Anime (for now)
-            "sort": '2',     # Sort Descending By Seeders
-        }
-
-        searchURL = self.url + '?page=rss&' + urllib.urlencode(params)
-
-        logger.log(u"Search string: " + searchURL, logger.DEBUG)
-
-        results = []
-        for curItem in self.cache.getRSSFeed(searchURL, items=['entries'])['entries'] or []:
-            (title, url) = self._get_title_and_url(curItem)
-
-            if title and url:
-                results.append(curItem)
-            else:
-                logger.log(
-                    u"The data returned from the " + self.name + " is incomplete, this result is unusable",
-                    logger.DEBUG)
-
-        return results
-
-    def _get_title_and_url(self, item):
-        return generic.TorrentProvider._get_title_and_url(self, item)
-
-    def _extract_name_from_filename(self, filename):
-        name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
-        logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
-        match = re.match(name_regex, filename, re.I)
-        if match:
-            return match.group(1)
-        return None
-
-    def seedRatio(self):
-        return self.ratio
-
-
-class NyaaCache(tvcache.TVCache):
-    def __init__(self, provider):
-        tvcache.TVCache.__init__(self, provider)
-
-        # only poll NyaaTorrents every 15 minutes max
-        self.minTime = 15
-
-    def _getRSSData(self):
-        params = {
-            "page": 'rss',   # Use RSS page
-            "order": '1',    # Sort Descending By Date
-            "cats": '1_37',  # Limit to English-translated Anime (for now)
-        }
-
-        url = self.provider.url + '?' + urllib.urlencode(params)
-
-        logger.log(u"NyaaTorrents cache update URL: " + url, logger.DEBUG)
-
-        return self.getRSSFeed(url)
-
-provider = NyaaProvider()
+# Author: Mr_Orange
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import urllib
+import re
+
+import sickbeard
+import generic
+
+from sickbeard import show_name_helpers
+from sickbeard import logger
+from sickbeard.common import Quality
+from sickbeard import tvcache
+from sickbeard import show_name_helpers
+
+
+class NyaaProvider(generic.TorrentProvider):
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "NyaaTorrents")
+
+        self.supportsBacklog = True
+        self.supportsAbsoluteNumbering = True
+        self.anime_only = True
+        self.enabled = False
+        self.ratio = None
+
+        self.cache = NyaaCache(self)
+
+        self.urls = {'base_url': 'http://www.nyaa.se/'}
+
+        self.url = self.urls['base_url']
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'nyaatorrents.png'
+
+    def getQuality(self, item, anime=False):
+        title = item.get('title')
+        quality = Quality.sceneQuality(title, anime)
+        return quality
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+        return generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
+
+    def _get_season_search_strings(self, ep_obj):
+        return show_name_helpers.makeSceneShowSearchStrings(self.show, anime=True)
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+        return self._get_season_search_strings(ep_obj)
+
+    def _doSearch(self, search_string, search_mode='eponly', epcount=0, age=0):
+        if self.show and not self.show.is_anime:
+            logger.log(u"" + str(self.show.name) + " is not an anime skiping " + str(self.name))
+            return []
+
+        params = {
+            "term": search_string.encode('utf-8'),
+            "cats": '1_37',  # Limit to English-translated Anime (for now)
+            "sort": '2',     # Sort Descending By Seeders
+        }
+
+        searchURL = self.url + '?page=rss&' + urllib.urlencode(params)
+
+        logger.log(u"Search string: " + searchURL, logger.DEBUG)
+
+        results = []
+        for curItem in self.cache.getRSSFeed(searchURL, items=['entries'])['entries'] or []:
+            (title, url) = self._get_title_and_url(curItem)
+
+            if title and url:
+                results.append(curItem)
+            else:
+                logger.log(
+                    u"The data returned from the " + self.name + " is incomplete, this result is unusable",
+                    logger.DEBUG)
+
+        return results
+
+    def _get_title_and_url(self, item):
+        return generic.TorrentProvider._get_title_and_url(self, item)
+
+    def _extract_name_from_filename(self, filename):
+        name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
+        logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
+        match = re.match(name_regex, filename, re.I)
+        if match:
+            return match.group(1)
+        return None
+
+    def seedRatio(self):
+        return self.ratio
+
+
+class NyaaCache(tvcache.TVCache):
+    def __init__(self, provider):
+        tvcache.TVCache.__init__(self, provider)
+
+        # only poll NyaaTorrents every 15 minutes max
+        self.minTime = 15
+
+    def _getRSSData(self):
+        params = {
+            "page": 'rss',   # Use RSS page
+            "order": '1',    # Sort Descending By Date
+            "cats": '1_37',  # Limit to English-translated Anime (for now)
+        }
+
+        url = self.provider.url + '?' + urllib.urlencode(params)
+
+        logger.log(u"NyaaTorrents cache update URL: " + url, logger.DEBUG)
+
+        return self.getRSSFeed(url)
+
+provider = NyaaProvider()
diff --git a/sickbeard/providers/scc.py b/sickbeard/providers/scc.py
index 2e236441211c588153080445c147678435174fb3..ec6a992e7a00d8623d23fcb0e9074b80513eb800 100644
--- a/sickbeard/providers/scc.py
+++ b/sickbeard/providers/scc.py
@@ -23,6 +23,7 @@ import datetime
 import urlparse
 import sickbeard
 import generic
+import urllib
 from sickbeard.common import Quality
 from sickbeard import logger
 from sickbeard import tvcache
@@ -177,11 +178,11 @@ class SCCProvider(generic.TorrentProvider):
                     search_string = unidecode(search_string)
 
                 if mode == 'Season' and search_mode == 'sponly':
-                    searchURLS += [self.urls['archive'] % (search_string)]
+                    searchURLS += [self.urls['archive'] % (urllib.quote(search_string))]
                 else:
-                    searchURLS += [self.urls['search'] % (search_string, self.categories)]
-                    searchURLS += [self.urls['nonscene'] % (search_string)]
-                    searchURLS += [self.urls['foreign'] % (search_string)]
+                    searchURLS += [self.urls['search'] % (urllib.quote(search_string), self.categories)]
+                    searchURLS += [self.urls['nonscene'] % (urllib.quote(search_string))]
+                    searchURLS += [self.urls['foreign'] % (urllib.quote(search_string))]
 
                 for searchURL in searchURLS:
                     logger.log(u"Search string: " + searchURL, logger.DEBUG)
@@ -239,7 +240,7 @@ class SCCProvider(generic.TorrentProvider):
                                 continue
 
                             item = title, download_url, id, seeders, leechers
-                            #logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
+                            logger.log(u"Found result: " + title.replace(' ','.') + " (" + searchURL + ")", logger.DEBUG)
 
                             items[mode].append(item)
 
diff --git a/sickbeard/providers/t411.py b/sickbeard/providers/t411.py
index 1ed51f0a0ad0cda365abfaefdfd0086824ce4487..cce3bf944b207e79e51fd3159f884a7dbb138fa0 100644
--- a/sickbeard/providers/t411.py
+++ b/sickbeard/providers/t411.py
@@ -59,7 +59,7 @@ class T411Provider(generic.TorrentProvider):
 
         self.url = self.urls['base_url']
 
-        self.subcategories = [433, 637, 455]
+        self.subcategories = [433, 637, 455, 639]
 
     def isEnabled(self):
         return self.enabled
diff --git a/sickbeard/providers/tokyotoshokan.py b/sickbeard/providers/tokyotoshokan.py
index e5ed69d9b74632cc03bae7fb65a4ffe9da48eff0..3f3089457167fc0a43a248bd29f607d4fc011cdd 100644
--- a/sickbeard/providers/tokyotoshokan.py
+++ b/sickbeard/providers/tokyotoshokan.py
@@ -1,171 +1,171 @@
-# Author: Mr_Orange
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import urllib
-import re
-import traceback
-
-import sickbeard
-import generic
-
-from sickbeard import show_name_helpers
-from sickbeard import logger
-from sickbeard.common import Quality
-from sickbeard import tvcache
-from sickbeard import show_name_helpers, helpers
-from sickbeard.bs4_parser import BS4Parser
-
-
-class TokyoToshokanProvider(generic.TorrentProvider):
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "TokyoToshokan")
-
-        self.supportsBacklog = True
-        self.supportsAbsoluteNumbering = True
-        self.anime_only = True
-        self.enabled = False
-        self.ratio = None
-
-        self.cache = TokyoToshokanCache(self)
-
-        self.urls = {'base_url': 'http://tokyotosho.info/'}
-        self.url = self.urls['base_url']
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'tokyotoshokan.png'
-
-    def _get_title_and_url(self, item):
-
-        title, url = item
-
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        if url:
-            url = url.replace('&amp;', '&')
-
-        return (title, url)
-
-    def seedRatio(self):
-        return self.ratio
-
-    def getQuality(self, item, anime=False):
-        quality = Quality.sceneQuality(item[0], anime)
-        return quality
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False):
-        return generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch)
-
-    def _get_season_search_strings(self, ep_obj):
-        return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-        return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
-
-    def _doSearch(self, search_string, search_mode='eponly', epcount=0, age=0):
-        if self.show and not self.show.is_anime:
-            logger.log(u"" + str(self.show.name) + " is not an anime skiping " + str(self.name))
-            return []
-
-        params = {
-            "terms": search_string.encode('utf-8'),
-            "type": 1, # get anime types
-        }
-
-        searchURL = self.url + 'search.php?' + urllib.urlencode(params)
-
-        data = self.getURL(searchURL)
-
-        logger.log(u"Search string: " + searchURL, logger.DEBUG)
-
-        if not data:
-            return []
-
-        results = []
-        try:
-            with BS4Parser(data, features=["html5lib", "permissive"]) as soup:
-                torrent_table = soup.find('table', attrs={'class': 'listing'})
-                torrent_rows = torrent_table.find_all('tr') if torrent_table else []
-                if torrent_rows: 
-                    if torrent_rows[0].find('td', attrs={'class': 'centertext'}):
-                        a = 1
-                    else:
-                        a = 0
-    
-                    for top, bottom in zip(torrent_rows[a::2], torrent_rows[a::2]):
-                        title = top.find('td', attrs={'class': 'desc-top'}).text
-                        url = top.find('td', attrs={'class': 'desc-top'}).find('a')['href']
-    
-                        if not title or not url:
-                            continue
-    
-                        item = title.lstrip(), url
-                        results.append(item)
-
-        except Exception, e:
-            logger.log(u"Failed to parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
-
-
-        return results
-
-
-class TokyoToshokanCache(tvcache.TVCache):
-    def __init__(self, provider):
-        tvcache.TVCache.__init__(self, provider)
-
-        # only poll NyaaTorrents every 15 minutes max
-        self.minTime = 15
-
-    def _get_title_and_url(self, item):
-        """
-        Retrieves the title and URL data from the item XML node
-
-        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
-
-        Returns: A tuple containing two strings representing title and URL respectively
-        """
-
-        title = item.title if item.title else None
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        url = item.link if item.link else None
-        if url:
-            url = url.replace('&amp;', '&')
-
-        return (title, url)
-
-    def _getRSSData(self):
-        params = {
-            "filter": '1',
-        }
-
-        url = self.provider.url + 'rss.php?' + urllib.urlencode(params)
-
-        logger.log(u"TokyoToshokan cache update URL: " + url, logger.DEBUG)
-
-        return self.getRSSFeed(url)
-
-
-provider = TokyoToshokanProvider()
+# Author: Mr_Orange
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import urllib
+import re
+import traceback
+
+import sickbeard
+import generic
+
+from sickbeard import show_name_helpers
+from sickbeard import logger
+from sickbeard.common import Quality
+from sickbeard import tvcache
+from sickbeard import show_name_helpers, helpers
+from sickbeard.bs4_parser import BS4Parser
+
+
+class TokyoToshokanProvider(generic.TorrentProvider):
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "TokyoToshokan")
+
+        self.supportsBacklog = True
+        self.supportsAbsoluteNumbering = True
+        self.anime_only = True
+        self.enabled = False
+        self.ratio = None
+
+        self.cache = TokyoToshokanCache(self)
+
+        self.urls = {'base_url': 'http://tokyotosho.info/'}
+        self.url = self.urls['base_url']
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'tokyotoshokan.png'
+
+    def _get_title_and_url(self, item):
+
+        title, url = item
+
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        if url:
+            url = url.replace('&amp;', '&')
+
+        return (title, url)
+
+    def seedRatio(self):
+        return self.ratio
+
+    def getQuality(self, item, anime=False):
+        quality = Quality.sceneQuality(item[0], anime)
+        return quality
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+        return generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
+
+    def _get_season_search_strings(self, ep_obj):
+        return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+        return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
+
+    def _doSearch(self, search_string, search_mode='eponly', epcount=0, age=0):
+        if self.show and not self.show.is_anime:
+            logger.log(u"" + str(self.show.name) + " is not an anime skiping " + str(self.name))
+            return []
+
+        params = {
+            "terms": search_string.encode('utf-8'),
+            "type": 1, # get anime types
+        }
+
+        searchURL = self.url + 'search.php?' + urllib.urlencode(params)
+
+        data = self.getURL(searchURL)
+
+        logger.log(u"Search string: " + searchURL, logger.DEBUG)
+
+        if not data:
+            return []
+
+        results = []
+        try:
+            with BS4Parser(data, features=["html5lib", "permissive"]) as soup:
+                torrent_table = soup.find('table', attrs={'class': 'listing'})
+                torrent_rows = torrent_table.find_all('tr') if torrent_table else []
+                if torrent_rows: 
+                    if torrent_rows[0].find('td', attrs={'class': 'centertext'}):
+                        a = 1
+                    else:
+                        a = 0
+    
+                    for top, bottom in zip(torrent_rows[a::2], torrent_rows[a::2]):
+                        title = top.find('td', attrs={'class': 'desc-top'}).text
+                        url = top.find('td', attrs={'class': 'desc-top'}).find('a')['href']
+    
+                        if not title or not url:
+                            continue
+    
+                        item = title.lstrip(), url
+                        results.append(item)
+
+        except Exception, e:
+            logger.log(u"Failed to parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
+
+
+        return results
+
+
+class TokyoToshokanCache(tvcache.TVCache):
+    def __init__(self, provider):
+        tvcache.TVCache.__init__(self, provider)
+
+        # only poll NyaaTorrents every 15 minutes max
+        self.minTime = 15
+
+    def _get_title_and_url(self, item):
+        """
+        Retrieves the title and URL data from the item XML node
+
+        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
+
+        Returns: A tuple containing two strings representing title and URL respectively
+        """
+
+        title = item.title if item.title else None
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        url = item.link if item.link else None
+        if url:
+            url = url.replace('&amp;', '&')
+
+        return (title, url)
+
+    def _getRSSData(self):
+        params = {
+            "filter": '1',
+        }
+
+        url = self.provider.url + 'rss.php?' + urllib.urlencode(params)
+
+        logger.log(u"TokyoToshokan cache update URL: " + url, logger.DEBUG)
+
+        return self.getRSSFeed(url)
+
+
+provider = TokyoToshokanProvider()
diff --git a/sickbeard/providers/torrentbytes.py b/sickbeard/providers/torrentbytes.py
index e62a91fbe953e5bf678fcef04989d431f22b6164..807097a3dba364d1f4ef192176af7d12f0f8c209 100644
--- a/sickbeard/providers/torrentbytes.py
+++ b/sickbeard/providers/torrentbytes.py
@@ -22,6 +22,7 @@ import datetime
 import urlparse
 import sickbeard
 import generic
+import urllib
 from sickbeard.common import Quality
 from sickbeard import logger
 from sickbeard import tvcache
@@ -160,7 +161,7 @@ class TorrentBytesProvider(generic.TorrentProvider):
                 if isinstance(search_string, unicode):
                     search_string = unidecode(search_string)
 
-                searchURL = self.urls['search'] % (search_string, self.categories)
+                searchURL = self.urls['search'] % (urllib.quote(search_string), self.categories)
 
                 logger.log(u"Search string: " + searchURL, logger.DEBUG)
 
@@ -207,7 +208,7 @@ class TorrentBytesProvider(generic.TorrentProvider):
                                 continue
 
                             item = title, download_url, id, seeders, leechers
-                            logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
+                            logger.log(u"Found result: " + title.replace(' ','.') + " (" + searchURL + ")", logger.DEBUG)
 
                             items[mode].append(item)
 
diff --git a/sickbeard/providers/torrentleech.py b/sickbeard/providers/torrentleech.py
index 5f4a428d1d56a92f92294ca505d7b375754ba96d..dbd5e4688be9ada61364a31d304f77bec577777d 100644
--- a/sickbeard/providers/torrentleech.py
+++ b/sickbeard/providers/torrentleech.py
@@ -20,7 +20,7 @@ import re
 import traceback
 import datetime
 import urlparse
-
+import urllib
 import sickbeard
 import generic
 
@@ -170,7 +170,7 @@ class TorrentLeechProvider(generic.TorrentProvider):
                 if mode == 'RSS':
                     searchURL = self.urls['index'] % self.categories
                 else:
-                    searchURL = self.urls['search'] % (search_string, self.categories)
+                    searchURL = self.urls['search'] % (urllib.quote(search_string), self.categories)
 
                 logger.log(u"Search string: " + searchURL, logger.DEBUG)
 
@@ -210,7 +210,7 @@ class TorrentLeechProvider(generic.TorrentProvider):
                                 continue
 
                             item = title, download_url, id, seeders, leechers
-                            logger.log(u"Found result: " + title + "(" + download_url + ")", logger.DEBUG)
+                            logger.log(u"Found result: " + title.replace(' ','.') + " (" + download_url + ")", logger.DEBUG)
 
                             items[mode].append(item)
 
diff --git a/sickbeard/providers/womble.py b/sickbeard/providers/womble.py
index e3afaa23e64acb7dd0a699b1cd4d864001ebb862..0349d3e878728eb41d7cbc4d3281e7e10fe36a13 100644
--- a/sickbeard/providers/womble.py
+++ b/sickbeard/providers/womble.py
@@ -55,8 +55,11 @@ class WombleCache(tvcache.TVCache):
         self.setLastUpdate()
 
         cl = []
-        for url in [self.provider.url + 'rss/?sec=tv-sd&fr=false', self.provider.url + 'rss/?sec=tv-hd&fr=false']:
-            logger.log(u"Womble's Index cache update URL: " + url, logger.DEBUG)
+        for url in [self.provider.url + 'rss/?sec=tv-x264&fr=false',
+                    self.provider.url + 'rss/?sec=tv-sd&fr=false',
+                    self.provider.url + 'rss/?sec=tv-dvd&fr=false',
+                    self.provider.url + 'rss/?sec=tv-hd&fr=false']:
+            logger.log(u'Womble\'s Index cache update URL: ' + url, logger.DEBUG)
 
             for item in self.getRSSFeed(url)['entries'] or []:
                 ci = self._parseItem(item)
diff --git a/sickbeard/sab.py b/sickbeard/sab.py
index 078e92f06403878b476656ad48fef1d88ff043c2..9464fe0c8604dff504f267261dede18e149f47f8 100644
--- a/sickbeard/sab.py
+++ b/sickbeard/sab.py
@@ -59,7 +59,10 @@ def sendNZB(nzb):
 
     # use high priority if specified (recently aired episode)
     if nzb.priority == 1:
-        params['priority'] = 1
+        if sickbeard.SAB_FORCED == 1:
+            params['priority'] = 2
+        else:
+            params['priority'] = 1
 
     # if it's a normal result we just pass SAB the URL
     if nzb.resultType == "nzb":
diff --git a/sickbeard/search.py b/sickbeard/search.py
index e1759f413b25cb21f5f506f6aa8daeffb254d0bf..2576ea6fb1009e3fe72a683d235e10bed46b7a17 100644
--- a/sickbeard/search.py
+++ b/sickbeard/search.py
@@ -1,711 +1,714 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
-import os
-import re
-import threading
-import datetime
-import traceback
-
-import sickbeard
-
-from common import SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, Quality, SEASON_RESULT, MULTI_EP_RESULT
-
-from sickbeard import logger, db, show_name_helpers, exceptions, helpers
-from sickbeard import sab
-from sickbeard import nzbget
-from sickbeard import clients
-from sickbeard import history
-from sickbeard import notifiers
-from sickbeard import nzbSplitter
-from sickbeard import ui
-from sickbeard import encodingKludge as ek
-from sickbeard import failed_history
-from sickbeard.exceptions import ex
-from sickbeard.providers.generic import GenericProvider
-from sickbeard.blackandwhitelist import BlackAndWhiteList
-from sickbeard import common
-
-def _downloadResult(result):
-    """
-    Downloads a result to the appropriate black hole folder.
-
-    Returns a bool representing success.
-
-    result: SearchResult instance to download.
-    """
-
-    resProvider = result.provider
-    if resProvider == None:
-        logger.log(u"Invalid provider name - this is a coding error, report it please", logger.ERROR)
-        return False
-
-    # nzbs with an URL can just be downloaded from the provider
-    if result.resultType == "nzb":
-        newResult = resProvider.downloadResult(result)
-    # if it's an nzb data result
-    elif result.resultType == "nzbdata":
-
-        # get the final file path to the nzb
-        fileName = ek.ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb")
-
-        logger.log(u"Saving NZB to " + fileName)
-
-        newResult = True
-
-        # save the data to disk
-        try:
-            with ek.ek(open, fileName, 'w') as fileOut:
-                fileOut.write(result.extraInfo[0])
-
-            helpers.chmodAsParent(fileName)
-
-        except EnvironmentError, e:
-            logger.log(u"Error trying to save NZB to black hole: " + ex(e), logger.ERROR)
-            newResult = False
-    elif resProvider.providerType == "torrent":
-        newResult = resProvider.downloadResult(result)
-    else:
-        logger.log(u"Invalid provider type - this is a coding error, report it please", logger.ERROR)
-        newResult = False
-
-    return newResult
-
-def snatchEpisode(result, endStatus=SNATCHED):
-    """
-    Contains the internal logic necessary to actually "snatch" a result that
-    has been found.
-
-    Returns a bool representing success.
-
-    result: SearchResult instance to be snatched.
-    endStatus: the episode status that should be used for the episode object once it's snatched.
-    """
-
-    if result is None:
-        return False
-
-    result.priority = 0  # -1 = low, 0 = normal, 1 = high
-    if sickbeard.ALLOW_HIGH_PRIORITY:
-        # if it aired recently make it high priority
-        for curEp in result.episodes:
-            if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
-                result.priority = 1
-    if re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', result.name, re.I) != None:
-        endStatus = SNATCHED_PROPER
-
-    # NZBs can be sent straight to SAB or saved to disk
-    if result.resultType in ("nzb", "nzbdata"):
-        if sickbeard.NZB_METHOD == "blackhole":
-            dlResult = _downloadResult(result)
-        elif sickbeard.NZB_METHOD == "sabnzbd":
-            dlResult = sab.sendNZB(result)
-        elif sickbeard.NZB_METHOD == "nzbget":
-            is_proper = True if endStatus == SNATCHED_PROPER else False
-            dlResult = nzbget.sendNZB(result, is_proper)
-        else:
-            logger.log(u"Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
-            dlResult = False
-
-    # TORRENTs can be sent to clients or saved to disk
-    elif result.resultType == "torrent":
-        # torrents are saved to disk when blackhole mode
-        if sickbeard.TORRENT_METHOD == "blackhole":
-            dlResult = _downloadResult(result)
-        else:
-            #result.content = result.provider.getURL(result.url) if not result.url.startswith('magnet') else None
-            client = clients.getClientIstance(sickbeard.TORRENT_METHOD)()
-            dlResult = client.sendTORRENT(result)
-    else:
-        logger.log(u"Unknown result type, unable to download it", logger.ERROR)
-        dlResult = False
-
-    if not dlResult:
-        return False
-
-    if sickbeard.USE_FAILED_DOWNLOADS:
-        failed_history.logSnatch(result)
-
-    ui.notifications.message('Episode snatched', result.name)
-
-    history.logSnatch(result)
-
-    # don't notify when we re-download an episode
-    sql_l = []
-    trakt_data = []
-    for curEpObj in result.episodes:
-        with curEpObj.lock:
-            if isFirstBestMatch(result):
-                curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality)
-            else:
-                curEpObj.status = Quality.compositeStatus(endStatus, result.quality)
-
-            sql_l.append(curEpObj.get_sql())
-
-        if curEpObj.status not in Quality.DOWNLOADED:
-            notifiers.notify_snatch(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN') + " from " + result.provider.name)
-
-            trakt_data.append((curEpObj.season, curEpObj.episode))
-
-    data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
-
-    if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
-        logger.log(u"Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str(result.show.name) + " to Traktv Watchlist", logger.DEBUG)
-        if data:
-            notifiers.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add")
-
-    if len(sql_l) > 0:
-        myDB = db.DBConnection()
-        myDB.mass_action(sql_l)
-
-    if sickbeard.UPDATE_SHOWS_ON_SNATCH and not sickbeard.showQueueScheduler.action.isBeingUpdated(result.show) and result.show.status == "Continuing":
-        sickbeard.showQueueScheduler.action.updateShow(result.show, True)
-
-    return True
-
-
-def pickBestResult(results, show, quality_list=None):
-    results = results if isinstance(results, list) else [results]
-
-    logger.log(u"Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
-
-    bwl = None
-    bestResult = None
-
-    # find the best result for the current episode
-    for cur_result in results:
-        if show and cur_result.show is not show:
-            continue
-
-        # filter out possible bad torrents from providers such as ezrss
-        if isinstance(cur_result, sickbeard.classes.SearchResult):
-            if cur_result.resultType == "torrent" and sickbeard.TORRENT_METHOD != "blackhole":
-                if not cur_result.url.startswith('magnet'):
-                    cur_result.content = cur_result.provider.getURL(cur_result.url)
-                    if not cur_result.content:
-                        continue
-        else:
-            if not cur_result.url.startswith('magnet'):
-                cur_result.content = cur_result.provider.getURL(cur_result.url)
-                if not cur_result.content:
-                    continue
-
-        # build the black And white list
-        if cur_result.show.is_anime:
-            if not bwl:
-                bwl = BlackAndWhiteList(cur_result.show.indexerid)
-            if not bwl.is_valid(cur_result):
-                logger.log(cur_result.name+" does not match the blacklist or the whitelist, rejecting it. Result: " + bwl.get_last_result_msg(), logger.INFO)
-                continue
-
-        logger.log("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality])
-
-        if quality_list and cur_result.quality not in quality_list:
-            logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
-            continue
-
-        if show.rls_ignore_words and show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_ignore_words):
-            logger.log(u"Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words,
-                       logger.INFO)
-            continue
-
-        if show.rls_require_words and not show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_require_words):
-            logger.log(u"Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words,
-                       logger.INFO)
-            continue
-
-        if not show_name_helpers.filterBadReleases(cur_result.name, parse=False):
-            logger.log(u"Ignoring " + cur_result.name + " because its not a valid scene release that we want, ignoring it",
-                       logger.INFO)
-            continue
-
-        if hasattr(cur_result, 'size'):
-            if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
-                                                                           cur_result.provider.name):
-                logger.log(cur_result.name + u" has previously failed, rejecting it")
-                continue
-
-        if not bestResult or bestResult.quality < cur_result.quality and cur_result.quality != Quality.UNKNOWN:
-            bestResult = cur_result
-
-        elif bestResult.quality == cur_result.quality:
-            if "proper" in cur_result.name.lower() or "repack" in cur_result.name.lower():
-                bestResult = cur_result
-            elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower():
-                bestResult = cur_result
-            elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower():
-                logger.log(u"Preferring " + cur_result.name + " (x264 over xvid)")
-                bestResult = cur_result
-
-    if bestResult:
-        logger.log(u"Picked " + bestResult.name + " as the best", logger.DEBUG)
-    else:
-        logger.log(u"No result picked.", logger.DEBUG)
-
-    return bestResult
-
-
-def isFinalResult(result):
-    """
-    Checks if the given result is good enough quality that we can stop searching for other ones.
-
-    If the result is the highest quality in both the any/best quality lists then this function
-    returns True, if not then it's False
-
-    """
-
-    logger.log(u"Checking if we should keep searching after we've found " + result.name, logger.DEBUG)
-
-    show_obj = result.episodes[0].show
-
-    bwl = None
-    if show_obj.is_anime:
-        bwl = BlackAndWhiteList(show_obj.indexerid)
-
-    any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
-
-    # if there is a redownload that's higher than this then we definitely need to keep looking
-    if best_qualities and result.quality < max(best_qualities):
-        return False
-
-    # if it does not match the shows black and white list its no good
-    elif bwl and not bwl.is_valid(result):
-        return False
-
-    # if there's no redownload that's higher (above) and this is the highest initial download then we're good
-    elif any_qualities and result.quality in any_qualities:
-        return True
-
-    elif best_qualities and result.quality == max(best_qualities):
-
-        # if this is the best redownload but we have a higher initial download then keep looking
-        if any_qualities and result.quality < max(any_qualities):
-            return False
-
-        # if this is the best redownload and we don't have a higher initial download then we're done
-        else:
-            return True
-
-    # if we got here than it's either not on the lists, they're empty, or it's lower than the highest required
-    else:
-        return False
-
-
-def isFirstBestMatch(result):
-    """
-    Checks if the given result is a best quality match and if we want to archive the episode on first match.
-    """
-
-    logger.log(u"Checking if we should archive our first best quality match for for episode " + result.name,
-               logger.DEBUG)
-
-    show_obj = result.episodes[0].show
-
-    any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
-
-    # if there is a redownload that's a match to one of our best qualities and we want to archive the episode then we are done
-    if best_qualities and show_obj.archive_firstmatch and result.quality in best_qualities:
-        return True
-
-    return False
-
-def wantedEpisodes(show, fromDate):
-    anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # @UnusedVariable
-    allQualities = list(set(anyQualities + bestQualities))
-
-    logger.log(u"Seeing if we need anything from " + show.name)
-    myDB = db.DBConnection()
-
-    if show.air_by_date:
-        sqlResults = myDB.select(
-            "SELECT ep.status, ep.season, ep.episode FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.indexer_id AND show.paused = 0 AND ep.airdate > ? AND ep.showid = ? AND show.air_by_date = 1",
-        [fromDate.toordinal(), show.indexerid])
-    else:
-        sqlResults = myDB.select(
-            "SELECT status, season, episode FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?",
-            [show.indexerid, fromDate.toordinal()])
-
-    # check through the list of statuses to see if we want any
-    wanted = []
-    for result in sqlResults:
-        curCompositeStatus = int(result["status"] or -1)
-        curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
-
-        if bestQualities:
-            highestBestQuality = max(allQualities)
-        else:
-            highestBestQuality = 0
-
-        # if we need a better one then say yes
-        if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER,
-            common.SNATCHED_BEST) and curQuality < highestBestQuality) or curStatus == common.WANTED:
-
-            epObj = show.getEpisode(int(result["season"]), int(result["episode"]))
-            epObj.wantedQuality = [i for i in allQualities if (i > curQuality and i != common.Quality.UNKNOWN)]
-            wanted.append(epObj)
-
-    return wanted
-
-def searchForNeededEpisodes():
-    foundResults = {}
-
-    didSearch = False
-
-    origThreadName = threading.currentThread().name
-    threads = []
-
-    show_list = sickbeard.showList
-    fromDate = datetime.date.fromordinal(1)
-    episodes = []
-
-    for curShow in show_list:
-        if not curShow.paused:
-            episodes.extend(wantedEpisodes(curShow, fromDate))
-
-    providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_daily]
-    for curProvider in providers:
-        threads += [threading.Thread(target=curProvider.cache.updateCache, name=origThreadName + " :: [" + curProvider.name + "]")]
-
-    # start the thread we just created
-    for t in threads:
-        t.start()
-
-    # wait for all threads to finish
-    for t in threads:
-        t.join()
-
-    for curProvider in providers:
-        threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
-        curFoundResults = curProvider.searchRSS(episodes)
-        didSearch = True
-
-        # pick a single result for each episode, respecting existing results
-        for curEp in curFoundResults:
-            bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
-
-            # if all results were rejected move on to the next episode
-            if not bestResult:
-                logger.log(u"All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG)
-                continue
-
-            # if it's already in the list (from another provider) and the newly found quality is no better then skip it
-            if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality:
-                continue
-
-            foundResults[curEp] = bestResult
-
-    threading.currentThread().name = origThreadName
-
-    if not didSearch:
-        logger.log(
-            u"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.",
-            logger.ERROR)
-
-    return foundResults.values()
-
-
-def searchProviders(show, episodes, manualSearch=False):
-    foundResults = {}
-    finalResults = []
-
-    didSearch = False
-    threads = []
-
-    # build name cache for show
-    sickbeard.name_cache.buildNameCache(show)
-
-    origThreadName = threading.currentThread().name
-
-    providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_backlog]
-    for curProvider in providers:
-        threads += [threading.Thread(target=curProvider.cache.updateCache,
-                                     name=origThreadName + " :: [" + curProvider.name + "]")]
-
-    # start the thread we just created
-    for t in threads:
-        t.start()
-
-    # wait for all threads to finish
-    for t in threads:
-        t.join()
-
-    for providerNum, curProvider in enumerate(providers):
-        if curProvider.anime_only and not show.is_anime:
-            logger.log(u"" + str(show.name) + " is not an anime, skiping", logger.DEBUG)
-            continue
-
-        threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
-
-        foundResults[curProvider.name] = {}
-
-        searchCount = 0
-        search_mode = curProvider.search_mode
-
-        # Always search for episode when manually searching when in sponly and fallback false
-        if search_mode == 'sponly' and manualSearch == True and curProvider.search_fallback == False:
-            search_mode = 'eponly'
-
-        while(True):
-            searchCount += 1
-
-            if search_mode == 'eponly':
-                logger.log(u"Performing episode search for " + show.name)
-            else:
-                logger.log(u"Performing season pack search for " + show.name)
-
-            try:
-                searchResults = curProvider.findSearchResults(show, episodes, search_mode, manualSearch)
-            except exceptions.AuthException, e:
-                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
-                break
-            except Exception, e:
-                logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
-                logger.log(traceback.format_exc(), logger.DEBUG)
-                break
-            finally:
-                threading.currentThread().name = origThreadName
-
-            didSearch = True
-
-            if len(searchResults):
-                # make a list of all the results for this provider
-                for curEp in searchResults:
-                    if curEp in foundResults:
-                        foundResults[curProvider.name][curEp] += searchResults[curEp]
-                    else:
-                        foundResults[curProvider.name][curEp] = searchResults[curEp]
-
-                break
-            elif not curProvider.search_fallback or searchCount == 2:
-                break
-
-            if search_mode == 'sponly':
-                logger.log(u"FALLBACK EPISODE SEARCH INITIATED ...")
-                search_mode = 'eponly'
-            else:
-                logger.log(u"FALLBACK SEASON PACK SEARCH INITIATED ...")
-                search_mode = 'sponly'
-
-        # skip to next provider if we have no results to process
-        if not len(foundResults[curProvider.name]):
-            continue
-
-        anyQualities, bestQualities = Quality.splitQuality(show.quality)
-
-        # pick the best season NZB
-        bestSeasonResult = None
-        if SEASON_RESULT in foundResults[curProvider.name]:
-            bestSeasonResult = pickBestResult(foundResults[curProvider.name][SEASON_RESULT], show,
-                                           anyQualities + bestQualities)
-
-        highest_quality_overall = 0
-        for cur_episode in foundResults[curProvider.name]:
-            for cur_result in foundResults[curProvider.name][cur_episode]:
-                if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
-                    highest_quality_overall = cur_result.quality
-        logger.log(u"The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall],
-                   logger.DEBUG)
-
-        # see if every episode is wanted
-        if bestSeasonResult:
-            searchedSeasons = [str(x.season) for x in episodes]
-
-            # get the quality of the season nzb
-            seasonQual = bestSeasonResult.quality
-            logger.log(
-                u"The quality of the season " + bestSeasonResult.provider.providerType + " is " + Quality.qualityStrings[
-                    seasonQual], logger.DEBUG)
-
-            myDB = db.DBConnection()
-            allEps = [int(x["episode"])
-                      for x in myDB.select("SELECT episode FROM tv_episodes WHERE showid = ? AND ( season IN ( " + ','.join(searchedSeasons) + " ) )",
-                                           [show.indexerid])]
-
-            logger.log(u"Executed query: [SELECT episode FROM tv_episodes WHERE showid = %s AND season in  %s]" % (show.indexerid, ','.join(searchedSeasons)))
-            logger.log(u"Episode list: " + str(allEps), logger.DEBUG)
-
-            allWanted = True
-            anyWanted = False
-            for curEpNum in allEps:
-                for season in set([x.season for x in episodes]):
-                    if not show.wantEpisode(season, curEpNum, seasonQual):
-                        allWanted = False
-                    else:
-                        anyWanted = True
-
-            # if we need every ep in the season and there's nothing better then just download this and be done with it (unless single episodes are preferred)
-            if allWanted and bestSeasonResult.quality == highest_quality_overall:
-                logger.log(
-                    u"Every ep in this season is needed, downloading the whole " + bestSeasonResult.provider.providerType + " " + bestSeasonResult.name)
-                epObjs = []
-                for curEpNum in allEps:
-                    for season in set([x.season for x in episodes]):
-                        epObjs.append(show.getEpisode(season, curEpNum))
-                bestSeasonResult.episodes = epObjs
-
-                return [bestSeasonResult]
-
-            elif not anyWanted:
-                logger.log(
-                    u"No eps from this season are wanted at this quality, ignoring the result of " + bestSeasonResult.name,
-                    logger.DEBUG)
-
-            else:
-
-                if bestSeasonResult.provider.providerType == GenericProvider.NZB:
-                    logger.log(u"Breaking apart the NZB and adding the individual ones to our results", logger.DEBUG)
-
-                    # if not, break it apart and add them as the lowest priority results
-                    individualResults = nzbSplitter.splitResult(bestSeasonResult)
-                    for curResult in individualResults:
-                        if len(curResult.episodes) == 1:
-                            epNum = curResult.episodes[0].episode
-                        elif len(curResult.episodes) > 1:
-                            epNum = MULTI_EP_RESULT
-
-                        if epNum in foundResults[curProvider.name]:
-                            foundResults[curProvider.name][epNum].append(curResult)
-                        else:
-                            foundResults[curProvider.name][epNum] = [curResult]
-
-                # If this is a torrent all we can do is leech the entire torrent, user will have to select which eps not do download in his torrent client
-                else:
-
-                    # Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it.
-                    logger.log(
-                        u"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!")
-                    epObjs = []
-                    for curEpNum in allEps:
-                        for season in set([x.season for x in episodes]):
-                            epObjs.append(show.getEpisode(season, curEpNum))
-                    bestSeasonResult.episodes = epObjs
-
-                    epNum = MULTI_EP_RESULT
-                    if epNum in foundResults[curProvider.name]:
-                        foundResults[curProvider.name][epNum].append(bestSeasonResult)
-                    else:
-                        foundResults[curProvider.name][epNum] = [bestSeasonResult]
-
-        # go through multi-ep results and see if we really want them or not, get rid of the rest
-        multiResults = {}
-        if MULTI_EP_RESULT in foundResults[curProvider.name]:
-            for multiResult in foundResults[curProvider.name][MULTI_EP_RESULT]:
-
-                logger.log(u"Seeing if we want to bother with multi-episode result " + multiResult.name, logger.DEBUG)
-
-                if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(multiResult.name, multiResult.size,
-                                                                               multiResult.provider.name):
-                    logger.log(multiResult.name + u" has previously failed, rejecting this multi-ep result")
-                    continue
-
-                # see how many of the eps that this result covers aren't covered by single results
-                neededEps = []
-                notNeededEps = []
-                for epObj in multiResult.episodes:
-                    epNum = epObj.episode
-                    # if we have results for the episode
-                    if epNum in foundResults[curProvider.name] and len(foundResults[curProvider.name][epNum]) > 0:
-                        neededEps.append(epNum)
-                    else:
-                        notNeededEps.append(epNum)
-
-                logger.log(
-                    u"Single-ep check result is neededEps: " + str(neededEps) + ", notNeededEps: " + str(notNeededEps),
-                    logger.DEBUG)
-
-                if not notNeededEps:
-                    logger.log(u"All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG)
-                    continue
-
-                # check if these eps are already covered by another multi-result
-                multiNeededEps = []
-                multiNotNeededEps = []
-                for epObj in multiResult.episodes:
-                    epNum = epObj.episode
-                    if epNum in multiResults:
-                        multiNotNeededEps.append(epNum)
-                    else:
-                        multiNeededEps.append(epNum)
-
-                logger.log(
-                    u"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(
-                        multiNotNeededEps), logger.DEBUG)
-
-                if not multiNeededEps:
-                    logger.log(
-                        u"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result",
-                        logger.DEBUG)
-                    continue
-
-                # if we're keeping this multi-result then remember it
-                for epObj in multiResult.episodes:
-                    multiResults[epObj.episode] = multiResult
-
-                # don't bother with the single result if we're going to get it with a multi result
-                for epObj in multiResult.episodes:
-                    epNum = epObj.episode
-                    if epNum in foundResults[curProvider.name]:
-                        logger.log(
-                            u"A needed multi-episode result overlaps with a single-episode result for ep #" + str(
-                                epNum) + ", removing the single-episode results from the list", logger.DEBUG)
-                        del foundResults[curProvider.name][epNum]
-
-        # of all the single ep results narrow it down to the best one for each episode
-        finalResults += set(multiResults.values())
-        for curEp in foundResults[curProvider.name]:
-            if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
-                continue
-
-            if not len(foundResults[curProvider.name][curEp]) > 0:
-                continue
-
-            # if all results were rejected move on to the next episode
-            bestResult = pickBestResult(foundResults[curProvider.name][curEp], show)
-            if not bestResult:
-                continue
-
-            # add result if its not a duplicate and
-            found = False
-            for i, result in enumerate(finalResults):
-                for bestResultEp in bestResult.episodes:
-                    if bestResultEp in result.episodes:
-                        if result.quality < bestResult.quality:
-                            finalResults.pop(i)
-                        else:
-                            found = True
-            if not found:
-                finalResults += [bestResult]
-
-        # check that we got all the episodes we wanted first before doing a match and snatch
-        wantedEpCount = 0
-        for wantedEp in episodes:
-            for result in finalResults:
-                if wantedEp in result.episodes and isFinalResult(result):
-                    wantedEpCount += 1
-
-        # make sure we search every provider for results unless we found everything we wanted
-        if wantedEpCount == len(episodes):
-            break
-
-    if not didSearch:
-        logger.log(u"No NZB/Torrent providers found or enabled in the sickrage config for backlog searches. Please check your settings.",
-                   logger.ERROR)
-
-    return finalResults
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import with_statement
+
+import os
+import re
+import threading
+import datetime
+import traceback
+
+import sickbeard
+
+from common import SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, Quality, SEASON_RESULT, MULTI_EP_RESULT
+
+from sickbeard import logger, db, show_name_helpers, exceptions, helpers
+from sickbeard import sab
+from sickbeard import nzbget
+from sickbeard import clients
+from sickbeard import history
+from sickbeard import notifiers
+from sickbeard import nzbSplitter
+from sickbeard import ui
+from sickbeard import encodingKludge as ek
+from sickbeard import failed_history
+from sickbeard.exceptions import ex
+from sickbeard.providers.generic import GenericProvider
+from sickbeard.blackandwhitelist import BlackAndWhiteList
+from sickbeard import common
+
+def _downloadResult(result):
+    """
+    Downloads a result to the appropriate black hole folder.
+
+    Returns a bool representing success.
+
+    result: SearchResult instance to download.
+    """
+
+    resProvider = result.provider
+    if resProvider == None:
+        logger.log(u"Invalid provider name - this is a coding error, report it please", logger.ERROR)
+        return False
+
+    # nzbs with an URL can just be downloaded from the provider
+    if result.resultType == "nzb":
+        newResult = resProvider.downloadResult(result)
+    # if it's an nzb data result
+    elif result.resultType == "nzbdata":
+
+        # get the final file path to the nzb
+        fileName = ek.ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb")
+
+        logger.log(u"Saving NZB to " + fileName)
+
+        newResult = True
+
+        # save the data to disk
+        try:
+            with ek.ek(open, fileName, 'w') as fileOut:
+                fileOut.write(result.extraInfo[0])
+
+            helpers.chmodAsParent(fileName)
+
+        except EnvironmentError, e:
+            logger.log(u"Error trying to save NZB to black hole: " + ex(e), logger.ERROR)
+            newResult = False
+    elif resProvider.providerType == "torrent":
+        newResult = resProvider.downloadResult(result)
+    else:
+        logger.log(u"Invalid provider type - this is a coding error, report it please", logger.ERROR)
+        newResult = False
+
+    return newResult
+
+def snatchEpisode(result, endStatus=SNATCHED):
+    """
+    Contains the internal logic necessary to actually "snatch" a result that
+    has been found.
+
+    Returns a bool representing success.
+
+    result: SearchResult instance to be snatched.
+    endStatus: the episode status that should be used for the episode object once it's snatched.
+    """
+
+    if result is None:
+        return False
+
+    result.priority = 0  # -1 = low, 0 = normal, 1 = high
+    if sickbeard.ALLOW_HIGH_PRIORITY:
+        # if it aired recently make it high priority
+        for curEp in result.episodes:
+            if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
+                result.priority = 1
+    if re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', result.name, re.I) != None:
+        endStatus = SNATCHED_PROPER
+
+    # NZBs can be sent straight to SAB or saved to disk
+    if result.resultType in ("nzb", "nzbdata"):
+        if sickbeard.NZB_METHOD == "blackhole":
+            dlResult = _downloadResult(result)
+        elif sickbeard.NZB_METHOD == "sabnzbd":
+            dlResult = sab.sendNZB(result)
+        elif sickbeard.NZB_METHOD == "nzbget":
+            is_proper = True if endStatus == SNATCHED_PROPER else False
+            dlResult = nzbget.sendNZB(result, is_proper)
+        else:
+            logger.log(u"Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
+            dlResult = False
+
+    # TORRENTs can be sent to clients or saved to disk
+    elif result.resultType == "torrent":
+        # torrents are saved to disk when blackhole mode
+        if sickbeard.TORRENT_METHOD == "blackhole":
+            dlResult = _downloadResult(result)
+        else:
+            if result.content:
+                client = clients.getClientIstance(sickbeard.TORRENT_METHOD)()
+                dlResult = client.sendTORRENT(result)
+            else:
+                logger.log(u"Torrent file content is empty", logger.ERROR)
+                dlResult = False
+    else:
+        logger.log(u"Unknown result type, unable to download it", logger.ERROR)
+        dlResult = False
+
+    if not dlResult:
+        return False
+
+    if sickbeard.USE_FAILED_DOWNLOADS:
+        failed_history.logSnatch(result)
+
+    ui.notifications.message('Episode snatched', result.name)
+
+    history.logSnatch(result)
+
+    # don't notify when we re-download an episode
+    sql_l = []
+    trakt_data = []
+    for curEpObj in result.episodes:
+        with curEpObj.lock:
+            if isFirstBestMatch(result):
+                curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality)
+            else:
+                curEpObj.status = Quality.compositeStatus(endStatus, result.quality)
+
+            sql_l.append(curEpObj.get_sql())
+
+        if curEpObj.status not in Quality.DOWNLOADED:
+            notifiers.notify_snatch(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN') + " from " + result.provider.name)
+
+            trakt_data.append((curEpObj.season, curEpObj.episode))
+
+    data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
+
+    if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
+        logger.log(u"Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str(result.show.name) + " to Traktv Watchlist", logger.DEBUG)
+        if data:
+            notifiers.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add")
+
+    if len(sql_l) > 0:
+        myDB = db.DBConnection()
+        myDB.mass_action(sql_l)
+
+    if sickbeard.UPDATE_SHOWS_ON_SNATCH and not sickbeard.showQueueScheduler.action.isBeingUpdated(result.show) and result.show.status == "Continuing":
+        sickbeard.showQueueScheduler.action.updateShow(result.show, True)
+
+    return True
+
+
+def pickBestResult(results, show, quality_list=None):
+    results = results if isinstance(results, list) else [results]
+
+    logger.log(u"Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
+
+    bwl = None
+    bestResult = None
+
+    # find the best result for the current episode
+    for cur_result in results:
+        if show and cur_result.show is not show:
+            continue
+
+        # filter out possible bad torrents from providers such as ezrss
+        if isinstance(cur_result, sickbeard.classes.SearchResult):
+            if cur_result.resultType == "torrent" and sickbeard.TORRENT_METHOD != "blackhole":
+                if not cur_result.url.startswith('magnet'):
+                    cur_result.content = cur_result.provider.getURL(cur_result.url)
+                    if not cur_result.content:
+                        continue
+        else:
+            if not cur_result.url.startswith('magnet'):
+                cur_result.content = cur_result.provider.getURL(cur_result.url)
+                if not cur_result.content:
+                    continue
+
+        # build the black And white list
+        if cur_result.show.is_anime:
+            if not bwl:
+                bwl = BlackAndWhiteList(cur_result.show.indexerid)
+            if not bwl.is_valid(cur_result):
+                logger.log(cur_result.name+" does not match the blacklist or the whitelist, rejecting it. Result: " + bwl.get_last_result_msg(), logger.INFO)
+                continue
+
+        logger.log("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality])
+
+        if quality_list and cur_result.quality not in quality_list:
+            logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
+            continue
+
+        if show.rls_ignore_words and show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_ignore_words):
+            logger.log(u"Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words,
+                       logger.INFO)
+            continue
+
+        if show.rls_require_words and not show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_require_words):
+            logger.log(u"Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words,
+                       logger.INFO)
+            continue
+
+        if not show_name_helpers.filterBadReleases(cur_result.name, parse=False):
+            logger.log(u"Ignoring " + cur_result.name + " because its not a valid scene release that we want, ignoring it",
+                       logger.INFO)
+            continue
+
+        if hasattr(cur_result, 'size'):
+            if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
+                                                                           cur_result.provider.name):
+                logger.log(cur_result.name + u" has previously failed, rejecting it")
+                continue
+
+        if not bestResult or bestResult.quality < cur_result.quality and cur_result.quality != Quality.UNKNOWN:
+            bestResult = cur_result
+
+        elif bestResult.quality == cur_result.quality:
+            if "proper" in cur_result.name.lower() or "repack" in cur_result.name.lower():
+                bestResult = cur_result
+            elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower():
+                bestResult = cur_result
+            elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower():
+                logger.log(u"Preferring " + cur_result.name + " (x264 over xvid)")
+                bestResult = cur_result
+
+    if bestResult:
+        logger.log(u"Picked " + bestResult.name + " as the best", logger.DEBUG)
+    else:
+        logger.log(u"No result picked.", logger.DEBUG)
+
+    return bestResult
+
+
+def isFinalResult(result):
+    """
+    Checks if the given result is good enough quality that we can stop searching for other ones.
+
+    If the result is the highest quality in both the any/best quality lists then this function
+    returns True, if not then it's False
+
+    """
+
+    logger.log(u"Checking if we should keep searching after we've found " + result.name, logger.DEBUG)
+
+    show_obj = result.episodes[0].show
+
+    bwl = None
+    if show_obj.is_anime:
+        bwl = BlackAndWhiteList(show_obj.indexerid)
+
+    any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
+
+    # if there is a redownload that's higher than this then we definitely need to keep looking
+    if best_qualities and result.quality < max(best_qualities):
+        return False
+
+    # if it does not match the shows black and white list its no good
+    elif bwl and not bwl.is_valid(result):
+        return False
+
+    # if there's no redownload that's higher (above) and this is the highest initial download then we're good
+    elif any_qualities and result.quality in any_qualities:
+        return True
+
+    elif best_qualities and result.quality == max(best_qualities):
+
+        # if this is the best redownload but we have a higher initial download then keep looking
+        if any_qualities and result.quality < max(any_qualities):
+            return False
+
+        # if this is the best redownload and we don't have a higher initial download then we're done
+        else:
+            return True
+
+    # if we got here than it's either not on the lists, they're empty, or it's lower than the highest required
+    else:
+        return False
+
+
+def isFirstBestMatch(result):
+    """
+    Checks if the given result is a best quality match and if we want to archive the episode on first match.
+    """
+
+    logger.log(u"Checking if we should archive our first best quality match for for episode " + result.name,
+               logger.DEBUG)
+
+    show_obj = result.episodes[0].show
+
+    any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
+
+    # if there is a redownload that's a match to one of our best qualities and we want to archive the episode then we are done
+    if best_qualities and show_obj.archive_firstmatch and result.quality in best_qualities:
+        return True
+
+    return False
+
+def wantedEpisodes(show, fromDate):
+    anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # @UnusedVariable
+    allQualities = list(set(anyQualities + bestQualities))
+
+    logger.log(u"Seeing if we need anything from " + show.name)
+    myDB = db.DBConnection()
+
+    if show.air_by_date:
+        sqlResults = myDB.select(
+            "SELECT ep.status, ep.season, ep.episode FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.indexer_id AND show.paused = 0 AND ep.airdate > ? AND ep.showid = ? AND show.air_by_date = 1",
+        [fromDate.toordinal(), show.indexerid])
+    else:
+        sqlResults = myDB.select(
+            "SELECT status, season, episode FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?",
+            [show.indexerid, fromDate.toordinal()])
+
+    # check through the list of statuses to see if we want any
+    wanted = []
+    for result in sqlResults:
+        curCompositeStatus = int(result["status"] or -1)
+        curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
+
+        if bestQualities:
+            highestBestQuality = max(allQualities)
+        else:
+            highestBestQuality = 0
+
+        # if we need a better one then say yes
+        if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER,
+            common.SNATCHED_BEST) and curQuality < highestBestQuality) or curStatus == common.WANTED:
+
+            epObj = show.getEpisode(int(result["season"]), int(result["episode"]))
+            epObj.wantedQuality = [i for i in allQualities if (i > curQuality and i != common.Quality.UNKNOWN)]
+            wanted.append(epObj)
+
+    return wanted
+
+def searchForNeededEpisodes():
+    foundResults = {}
+
+    didSearch = False
+
+    origThreadName = threading.currentThread().name
+    threads = []
+
+    show_list = sickbeard.showList
+    fromDate = datetime.date.fromordinal(1)
+    episodes = []
+
+    for curShow in show_list:
+        if not curShow.paused:
+            episodes.extend(wantedEpisodes(curShow, fromDate))
+
+    providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_daily]
+    for curProvider in providers:
+        threads += [threading.Thread(target=curProvider.cache.updateCache, name=origThreadName + " :: [" + curProvider.name + "]")]
+
+    # start the thread we just created
+    for t in threads:
+        t.start()
+
+    # wait for all threads to finish
+    for t in threads:
+        t.join()
+
+    for curProvider in providers:
+        threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
+        curFoundResults = curProvider.searchRSS(episodes)
+        didSearch = True
+
+        # pick a single result for each episode, respecting existing results
+        for curEp in curFoundResults:
+            bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
+
+            # if all results were rejected move on to the next episode
+            if not bestResult:
+                logger.log(u"All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG)
+                continue
+
+            # if it's already in the list (from another provider) and the newly found quality is no better then skip it
+            if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality:
+                continue
+
+            foundResults[curEp] = bestResult
+
+    threading.currentThread().name = origThreadName
+
+    if not didSearch:
+        logger.log(
+            u"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.",
+            logger.ERROR)
+
+    return foundResults.values()
+
+
+def searchProviders(show, episodes, manualSearch=False, downCurQuality=False):
+    foundResults = {}
+    finalResults = []
+
+    didSearch = False
+    threads = []
+
+    # build name cache for show
+    sickbeard.name_cache.buildNameCache(show)
+
+    origThreadName = threading.currentThread().name
+
+    providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_backlog]
+    for curProvider in providers:
+        threads += [threading.Thread(target=curProvider.cache.updateCache,
+                                     name=origThreadName + " :: [" + curProvider.name + "]")]
+
+    # start the thread we just created
+    for t in threads:
+        t.start()
+
+    # wait for all threads to finish
+    for t in threads:
+        t.join()
+
+    for providerNum, curProvider in enumerate(providers):
+        if curProvider.anime_only and not show.is_anime:
+            logger.log(u"" + str(show.name) + " is not an anime, skiping", logger.DEBUG)
+            continue
+
+        threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
+
+        foundResults[curProvider.name] = {}
+
+        searchCount = 0
+        search_mode = curProvider.search_mode
+
+        # Always search for episode when manually searching when in sponly and fallback false
+        if search_mode == 'sponly' and manualSearch == True and curProvider.search_fallback == False:
+            search_mode = 'eponly'
+
+        while(True):
+            searchCount += 1
+
+            if search_mode == 'eponly':
+                logger.log(u"Performing episode search for " + show.name)
+            else:
+                logger.log(u"Performing season pack search for " + show.name)
+
+            try:
+                searchResults = curProvider.findSearchResults(show, episodes, search_mode, manualSearch, downCurQuality)
+            except exceptions.AuthException, e:
+                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
+                break
+            except Exception, e:
+                logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
+                logger.log(traceback.format_exc(), logger.DEBUG)
+                break
+            finally:
+                threading.currentThread().name = origThreadName
+
+            didSearch = True
+
+            if len(searchResults):
+                # make a list of all the results for this provider
+                for curEp in searchResults:
+                    if curEp in foundResults:
+                        foundResults[curProvider.name][curEp] += searchResults[curEp]
+                    else:
+                        foundResults[curProvider.name][curEp] = searchResults[curEp]
+
+                break
+            elif not curProvider.search_fallback or searchCount == 2:
+                break
+
+            if search_mode == 'sponly':
+                logger.log(u"FALLBACK EPISODE SEARCH INITIATED ...")
+                search_mode = 'eponly'
+            else:
+                logger.log(u"FALLBACK SEASON PACK SEARCH INITIATED ...")
+                search_mode = 'sponly'
+
+        # skip to next provider if we have no results to process
+        if not len(foundResults[curProvider.name]):
+            continue
+
+        anyQualities, bestQualities = Quality.splitQuality(show.quality)
+
+        # pick the best season NZB
+        bestSeasonResult = None
+        if SEASON_RESULT in foundResults[curProvider.name]:
+            bestSeasonResult = pickBestResult(foundResults[curProvider.name][SEASON_RESULT], show,
+                                           anyQualities + bestQualities)
+
+        highest_quality_overall = 0
+        for cur_episode in foundResults[curProvider.name]:
+            for cur_result in foundResults[curProvider.name][cur_episode]:
+                if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
+                    highest_quality_overall = cur_result.quality
+        logger.log(u"The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall],
+                   logger.DEBUG)
+
+        # see if every episode is wanted
+        if bestSeasonResult:
+            searchedSeasons = [str(x.season) for x in episodes]
+
+            # get the quality of the season nzb
+            seasonQual = bestSeasonResult.quality
+            logger.log(
+                u"The quality of the season " + bestSeasonResult.provider.providerType + " is " + Quality.qualityStrings[
+                    seasonQual], logger.DEBUG)
+
+            myDB = db.DBConnection()
+            allEps = [int(x["episode"])
+                      for x in myDB.select("SELECT episode FROM tv_episodes WHERE showid = ? AND ( season IN ( " + ','.join(searchedSeasons) + " ) )",
+                                           [show.indexerid])]
+
+            logger.log(u"Executed query: [SELECT episode FROM tv_episodes WHERE showid = %s AND season in  %s]" % (show.indexerid, ','.join(searchedSeasons)))
+            logger.log(u"Episode list: " + str(allEps), logger.DEBUG)
+
+            allWanted = True
+            anyWanted = False
+            for curEpNum in allEps:
+                for season in set([x.season for x in episodes]):
+                    if not show.wantEpisode(season, curEpNum, seasonQual, downCurQuality):
+                        allWanted = False
+                    else:
+                        anyWanted = True
+
+            # if we need every ep in the season and there's nothing better then just download this and be done with it (unless single episodes are preferred)
+            if allWanted and bestSeasonResult.quality == highest_quality_overall:
+                logger.log(
+                    u"Every ep in this season is needed, downloading the whole " + bestSeasonResult.provider.providerType + " " + bestSeasonResult.name)
+                epObjs = []
+                for curEpNum in allEps:
+                    for season in set([x.season for x in episodes]):
+                        epObjs.append(show.getEpisode(season, curEpNum))
+                bestSeasonResult.episodes = epObjs
+
+                return [bestSeasonResult]
+
+            elif not anyWanted:
+                logger.log(
+                    u"No eps from this season are wanted at this quality, ignoring the result of " + bestSeasonResult.name,
+                    logger.DEBUG)
+
+            else:
+
+                if bestSeasonResult.provider.providerType == GenericProvider.NZB:
+                    logger.log(u"Breaking apart the NZB and adding the individual ones to our results", logger.DEBUG)
+
+                    # if not, break it apart and add them as the lowest priority results
+                    individualResults = nzbSplitter.splitResult(bestSeasonResult)
+                    for curResult in individualResults:
+                        if len(curResult.episodes) == 1:
+                            epNum = curResult.episodes[0].episode
+                        elif len(curResult.episodes) > 1:
+                            epNum = MULTI_EP_RESULT
+
+                        if epNum in foundResults[curProvider.name]:
+                            foundResults[curProvider.name][epNum].append(curResult)
+                        else:
+                            foundResults[curProvider.name][epNum] = [curResult]
+
+                # If this is a torrent all we can do is leech the entire torrent, user will have to select which eps not do download in his torrent client
+                else:
+
+                    # Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it.
+                    logger.log(
+                        u"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!")
+                    epObjs = []
+                    for curEpNum in allEps:
+                        for season in set([x.season for x in episodes]):
+                            epObjs.append(show.getEpisode(season, curEpNum))
+                    bestSeasonResult.episodes = epObjs
+
+                    epNum = MULTI_EP_RESULT
+                    if epNum in foundResults[curProvider.name]:
+                        foundResults[curProvider.name][epNum].append(bestSeasonResult)
+                    else:
+                        foundResults[curProvider.name][epNum] = [bestSeasonResult]
+
+        # go through multi-ep results and see if we really want them or not, get rid of the rest
+        multiResults = {}
+        if MULTI_EP_RESULT in foundResults[curProvider.name]:
+            for multiResult in foundResults[curProvider.name][MULTI_EP_RESULT]:
+
+                logger.log(u"Seeing if we want to bother with multi-episode result " + multiResult.name, logger.DEBUG)
+
+                if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(multiResult.name, multiResult.size,
+                                                                               multiResult.provider.name):
+                    logger.log(multiResult.name + u" has previously failed, rejecting this multi-ep result")
+                    continue
+
+                # see how many of the eps that this result covers aren't covered by single results
+                neededEps = []
+                notNeededEps = []
+                for epObj in multiResult.episodes:
+                    epNum = epObj.episode
+                    # if we have results for the episode
+                    if epNum in foundResults[curProvider.name] and len(foundResults[curProvider.name][epNum]) > 0:
+                        neededEps.append(epNum)
+                    else:
+                        notNeededEps.append(epNum)
+
+                logger.log(
+                    u"Single-ep check result is neededEps: " + str(neededEps) + ", notNeededEps: " + str(notNeededEps),
+                    logger.DEBUG)
+
+                if not notNeededEps:
+                    logger.log(u"All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG)
+                    continue
+
+                # check if these eps are already covered by another multi-result
+                multiNeededEps = []
+                multiNotNeededEps = []
+                for epObj in multiResult.episodes:
+                    epNum = epObj.episode
+                    if epNum in multiResults:
+                        multiNotNeededEps.append(epNum)
+                    else:
+                        multiNeededEps.append(epNum)
+
+                logger.log(
+                    u"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(
+                        multiNotNeededEps), logger.DEBUG)
+
+                if not multiNeededEps:
+                    logger.log(
+                        u"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result",
+                        logger.DEBUG)
+                    continue
+
+                # if we're keeping this multi-result then remember it
+                for epObj in multiResult.episodes:
+                    multiResults[epObj.episode] = multiResult
+
+                # don't bother with the single result if we're going to get it with a multi result
+                for epObj in multiResult.episodes:
+                    epNum = epObj.episode
+                    if epNum in foundResults[curProvider.name]:
+                        logger.log(
+                            u"A needed multi-episode result overlaps with a single-episode result for ep #" + str(
+                                epNum) + ", removing the single-episode results from the list", logger.DEBUG)
+                        del foundResults[curProvider.name][epNum]
+
+        # of all the single ep results narrow it down to the best one for each episode
+        finalResults += set(multiResults.values())
+        for curEp in foundResults[curProvider.name]:
+            if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
+                continue
+
+            if not len(foundResults[curProvider.name][curEp]) > 0:
+                continue
+
+            # if all results were rejected move on to the next episode
+            bestResult = pickBestResult(foundResults[curProvider.name][curEp], show)
+            if not bestResult:
+                continue
+
+            # add result if its not a duplicate and
+            found = False
+            for i, result in enumerate(finalResults):
+                for bestResultEp in bestResult.episodes:
+                    if bestResultEp in result.episodes:
+                        if result.quality < bestResult.quality:
+                            finalResults.pop(i)
+                        else:
+                            found = True
+            if not found:
+                finalResults += [bestResult]
+
+        # check that we got all the episodes we wanted first before doing a match and snatch
+        wantedEpCount = 0
+        for wantedEp in episodes:
+            for result in finalResults:
+                if wantedEp in result.episodes and isFinalResult(result):
+                    wantedEpCount += 1
+
+        # make sure we search every provider for results unless we found everything we wanted
+        if wantedEpCount == len(episodes):
+            break
+
+    if not didSearch:
+        logger.log(u"No NZB/Torrent providers found or enabled in the sickrage config for backlog searches. Please check your settings.",
+                   logger.ERROR)
+
+    return finalResults
diff --git a/sickbeard/search_queue.py b/sickbeard/search_queue.py
index a0ecc4ee3963fcabd71ad77ddc87ec5cccdf53c7..a2c87602e5ffc5ff3cd5149e1eaa95c0044aeb06 100644
--- a/sickbeard/search_queue.py
+++ b/sickbeard/search_queue.py
@@ -1,294 +1,296 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
-import time
-import traceback
-import threading
-
-import sickbeard
-from sickbeard import db, logger, common, exceptions, helpers
-from sickbeard import generic_queue, scheduler
-from sickbeard import search, failed_history, history
-from sickbeard import ui
-from sickbeard.exceptions import ex
-from sickbeard.search import pickBestResult
-
-search_queue_lock = threading.Lock()
-
-BACKLOG_SEARCH = 10
-DAILY_SEARCH = 20
-FAILED_SEARCH = 30
-MANUAL_SEARCH = 40
-
-MANUAL_SEARCH_HISTORY = []
-MANUAL_SEARCH_HISTORY_SIZE = 100
-
-class SearchQueue(generic_queue.GenericQueue):
-    def __init__(self):
-        generic_queue.GenericQueue.__init__(self)
-        self.queue_name = "SEARCHQUEUE"
-
-    def is_in_queue(self, show, segment):
-        for cur_item in self.queue:
-            if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
-                return True
-        return False
-
-    def is_ep_in_queue(self, segment):
-        for cur_item in self.queue:
-            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.segment == segment:
-                return True
-        return False
-    
-    def is_show_in_queue(self, show):
-        for cur_item in self.queue:
-            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.show.indexerid == show:
-                return True
-        return False
-    
-    def get_all_ep_from_queue(self, show):
-        ep_obj_list = []
-        for cur_item in self.queue:
-            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and str(cur_item.show.indexerid) == show:
-                ep_obj_list.append(cur_item)
-        return ep_obj_list
-    
-    def pause_backlog(self):
-        self.min_priority = generic_queue.QueuePriorities.HIGH
-
-    def unpause_backlog(self):
-        self.min_priority = 0
-
-    def is_backlog_paused(self):
-        # backlog priorities are NORMAL, this should be done properly somewhere
-        return self.min_priority >= generic_queue.QueuePriorities.NORMAL
-
-    def is_manualsearch_in_progress(self):
-        # Only referenced in webserve.py, only current running manualsearch or failedsearch is needed!!
-        if isinstance(self.currentItem, (ManualSearchQueueItem, FailedQueueItem)):
-            return True
-        return False
-    
-    def is_backlog_in_progress(self):
-        for cur_item in self.queue + [self.currentItem]:
-            if isinstance(cur_item, BacklogQueueItem):
-                return True
-        return False
-
-    def is_dailysearch_in_progress(self):
-        for cur_item in self.queue + [self.currentItem]:
-            if isinstance(cur_item, DailySearchQueueItem):
-                return True
-        return False
-
-    def queue_length(self):
-        length = {'backlog': 0, 'daily': 0, 'manual': 0, 'failed': 0}
-        for cur_item in self.queue:
-            if isinstance(cur_item, DailySearchQueueItem):
-                length['daily'] += 1
-            elif isinstance(cur_item, BacklogQueueItem):
-                length['backlog'] += 1
-            elif isinstance(cur_item, ManualSearchQueueItem):
-                length['manual'] += 1
-            elif isinstance(cur_item, FailedQueueItem):
-                length['failed'] += 1
-        return length
-
-
-    def add_item(self, item):
-        if isinstance(item, DailySearchQueueItem):
-            # daily searches
-            generic_queue.GenericQueue.add_item(self, item)
-        elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
-            # backlog searches
-            generic_queue.GenericQueue.add_item(self, item)
-        elif isinstance(item, (ManualSearchQueueItem, FailedQueueItem)) and not self.is_ep_in_queue(item.segment):
-            # manual and failed searches
-            generic_queue.GenericQueue.add_item(self, item)
-        else:
-            logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
-
-class DailySearchQueueItem(generic_queue.QueueItem):
-    def __init__(self):
-        self.success = None
-        generic_queue.QueueItem.__init__(self, 'Daily Search', DAILY_SEARCH)
-
-    def run(self):
-        generic_queue.QueueItem.run(self)
-
-        try:
-            logger.log("Beginning daily search for new episodes")
-            foundResults = search.searchForNeededEpisodes()
-
-            if not len(foundResults):
-                logger.log(u"No needed episodes found")
-            else:
-                for result in foundResults:
-                    # just use the first result for now
-                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
-                    self.success = search.snatchEpisode(result)
-
-                    # give the CPU a break
-                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
-
-            generic_queue.QueueItem.finish(self)
-        except Exception:
-            logger.log(traceback.format_exc(), logger.DEBUG)
-
-        if self.success is None:
-            self.success = False
-
-        self.finish()
-
-
-class ManualSearchQueueItem(generic_queue.QueueItem):
-    def __init__(self, show, segment):
-        generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH)
-        self.priority = generic_queue.QueuePriorities.HIGH
-        self.name = 'MANUAL-' + str(show.indexerid)
-        self.success = None
-        self.show = show
-        self.segment = segment
-        self.started = None
-
-    def run(self):
-        generic_queue.QueueItem.run(self)
-
-        try:
-            logger.log("Beginning manual search for: [" + self.segment.prettyName() + "]")
-            self.started = True
-            
-            searchResult = search.searchProviders(self.show, [self.segment], True)
-
-            if searchResult:
-                # just use the first result for now
-                logger.log(u"Downloading " + searchResult[0].name + " from " + searchResult[0].provider.name)
-                self.success = search.snatchEpisode(searchResult[0])
-
-                # give the CPU a break
-                time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
-
-            else:
-                ui.notifications.message('No downloads were found',
-                                         "Couldn't find a download for <i>%s</i>" % self.segment.prettyName())
-
-                logger.log(u"Unable to find a download for: [" + self.segment.prettyName() + "]")
-
-        except Exception:
-            logger.log(traceback.format_exc(), logger.DEBUG)
-        
-        ### Keep a list with the 100 last executed searches
-        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
-        
-        if self.success is None:
-            self.success = False
-
-        self.finish()
-
-
-class BacklogQueueItem(generic_queue.QueueItem):
-    def __init__(self, show, segment):
-        generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH)
-        self.priority = generic_queue.QueuePriorities.LOW
-        self.name = 'BACKLOG-' + str(show.indexerid)
-        self.success = None
-        self.show = show
-        self.segment = segment
-
-    def run(self):
-        generic_queue.QueueItem.run(self)
-
-        try:
-            logger.log("Beginning backlog search for: [" + self.show.name + "]")
-            searchResult = search.searchProviders(self.show, self.segment, False)
-
-            if searchResult:
-                for result in searchResult:
-                    # just use the first result for now
-                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
-                    search.snatchEpisode(result)
-
-                    # give the CPU a break
-                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
-            else:
-                logger.log(u"No needed episodes found during backlog search for: [" + self.show.name + "]")
-        except Exception:
-            logger.log(traceback.format_exc(), logger.DEBUG)
-
-        self.finish()
-
-
-class FailedQueueItem(generic_queue.QueueItem):
-    def __init__(self, show, segment):
-        generic_queue.QueueItem.__init__(self, 'Retry', FAILED_SEARCH)
-        self.priority = generic_queue.QueuePriorities.HIGH
-        self.name = 'RETRY-' + str(show.indexerid)
-        self.show = show
-        self.segment = segment
-        self.success = None
-        self.started = None
-
-    def run(self):
-        generic_queue.QueueItem.run(self)
-        self.started = True
-        
-        try:
-            for epObj in self.segment:
-            
-                logger.log(u"Marking episode as bad: [" + epObj.prettyName() + "]")
-                
-                failed_history.markFailed(epObj)
-    
-                (release, provider) = failed_history.findRelease(epObj)
-                if release:
-                    failed_history.logFailed(release)
-                    history.logFailed(epObj, release, provider)
-    
-                failed_history.revertEpisode(epObj)
-                logger.log("Beginning failed download search for: [" + epObj.prettyName() + "]")
-
-            searchResult = search.searchProviders(self.show, self.segment, True)
-
-            if searchResult:
-                for result in searchResult:
-                    # just use the first result for now
-                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
-                    search.snatchEpisode(result)
-
-                    # give the CPU a break
-                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
-            else:
-                pass
-                #logger.log(u"No valid episode found to retry for: [" + self.segment.prettyName() + "]")
-        except Exception:
-            logger.log(traceback.format_exc(), logger.DEBUG)
-            
-        ### Keep a list with the 100 last executed searches
-        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
-
-        if self.success is None:
-            self.success = False
-
-        self.finish()
-        
-def fifo(myList, item, maxSize = 100):
-    if len(myList) >= maxSize:
-        myList.pop(0)
-    myList.append(item)
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import with_statement
+
+import time
+import traceback
+import threading
+
+import sickbeard
+from sickbeard import db, logger, common, exceptions, helpers
+from sickbeard import generic_queue, scheduler
+from sickbeard import search, failed_history, history
+from sickbeard import ui
+from sickbeard.exceptions import ex
+from sickbeard.search import pickBestResult
+
+search_queue_lock = threading.Lock()
+
+BACKLOG_SEARCH = 10
+DAILY_SEARCH = 20
+FAILED_SEARCH = 30
+MANUAL_SEARCH = 40
+
+MANUAL_SEARCH_HISTORY = []
+MANUAL_SEARCH_HISTORY_SIZE = 100
+
+class SearchQueue(generic_queue.GenericQueue):
+    def __init__(self):
+        generic_queue.GenericQueue.__init__(self)
+        self.queue_name = "SEARCHQUEUE"
+
+    def is_in_queue(self, show, segment):
+        for cur_item in self.queue:
+            if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
+                return True
+        return False
+
+    def is_ep_in_queue(self, segment):
+        for cur_item in self.queue:
+            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.segment == segment:
+                return True
+        return False
+    
+    def is_show_in_queue(self, show):
+        for cur_item in self.queue:
+            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.show.indexerid == show:
+                return True
+        return False
+    
+    def get_all_ep_from_queue(self, show):
+        ep_obj_list = []
+        for cur_item in self.queue:
+            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and str(cur_item.show.indexerid) == show:
+                ep_obj_list.append(cur_item)
+        return ep_obj_list
+    
+    def pause_backlog(self):
+        self.min_priority = generic_queue.QueuePriorities.HIGH
+
+    def unpause_backlog(self):
+        self.min_priority = 0
+
+    def is_backlog_paused(self):
+        # backlog priorities are NORMAL, this should be done properly somewhere
+        return self.min_priority >= generic_queue.QueuePriorities.NORMAL
+
+    def is_manualsearch_in_progress(self):
+        # Only referenced in webserve.py, only current running manualsearch or failedsearch is needed!!
+        if isinstance(self.currentItem, (ManualSearchQueueItem, FailedQueueItem)):
+            return True
+        return False
+    
+    def is_backlog_in_progress(self):
+        for cur_item in self.queue + [self.currentItem]:
+            if isinstance(cur_item, BacklogQueueItem):
+                return True
+        return False
+
+    def is_dailysearch_in_progress(self):
+        for cur_item in self.queue + [self.currentItem]:
+            if isinstance(cur_item, DailySearchQueueItem):
+                return True
+        return False
+
+    def queue_length(self):
+        length = {'backlog': 0, 'daily': 0, 'manual': 0, 'failed': 0}
+        for cur_item in self.queue:
+            if isinstance(cur_item, DailySearchQueueItem):
+                length['daily'] += 1
+            elif isinstance(cur_item, BacklogQueueItem):
+                length['backlog'] += 1
+            elif isinstance(cur_item, ManualSearchQueueItem):
+                length['manual'] += 1
+            elif isinstance(cur_item, FailedQueueItem):
+                length['failed'] += 1
+        return length
+
+
+    def add_item(self, item):
+        if isinstance(item, DailySearchQueueItem):
+            # daily searches
+            generic_queue.GenericQueue.add_item(self, item)
+        elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
+            # backlog searches
+            generic_queue.GenericQueue.add_item(self, item)
+        elif isinstance(item, (ManualSearchQueueItem, FailedQueueItem)) and not self.is_ep_in_queue(item.segment):
+            # manual and failed searches
+            generic_queue.GenericQueue.add_item(self, item)
+        else:
+            logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
+
+class DailySearchQueueItem(generic_queue.QueueItem):
+    def __init__(self):
+        self.success = None
+        generic_queue.QueueItem.__init__(self, 'Daily Search', DAILY_SEARCH)
+
+    def run(self):
+        generic_queue.QueueItem.run(self)
+
+        try:
+            logger.log("Beginning daily search for new episodes")
+            foundResults = search.searchForNeededEpisodes()
+
+            if not len(foundResults):
+                logger.log(u"No needed episodes found")
+            else:
+                for result in foundResults:
+                    # just use the first result for now
+                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
+                    self.success = search.snatchEpisode(result)
+
+                    # give the CPU a break
+                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
+
+            generic_queue.QueueItem.finish(self)
+        except Exception:
+            logger.log(traceback.format_exc(), logger.DEBUG)
+
+        if self.success is None:
+            self.success = False
+
+        self.finish()
+
+
+class ManualSearchQueueItem(generic_queue.QueueItem):
+    def __init__(self, show, segment, downCurQuality=False):
+        generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH)
+        self.priority = generic_queue.QueuePriorities.HIGH
+        self.name = 'MANUAL-' + str(show.indexerid)
+        self.success = None
+        self.show = show
+        self.segment = segment
+        self.started = None
+        self.downCurQuality = downCurQuality
+
+    def run(self):
+        generic_queue.QueueItem.run(self)
+
+        try:
+            logger.log("Beginning manual search for: [" + self.segment.prettyName() + "]")
+            self.started = True
+            
+            searchResult = search.searchProviders(self.show, [self.segment], True, self.downCurQuality)
+
+            if searchResult:
+                # just use the first result for now
+                logger.log(u"Downloading " + searchResult[0].name + " from " + searchResult[0].provider.name)
+                self.success = search.snatchEpisode(searchResult[0])
+
+                # give the CPU a break
+                time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
+
+            else:
+                ui.notifications.message('No downloads were found',
+                                         "Couldn't find a download for <i>%s</i>" % self.segment.prettyName())
+
+                logger.log(u"Unable to find a download for: [" + self.segment.prettyName() + "]")
+
+        except Exception:
+            logger.log(traceback.format_exc(), logger.DEBUG)
+        
+        ### Keep a list with the 100 last executed searches
+        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
+        
+        if self.success is None:
+            self.success = False
+
+        self.finish()
+
+
+class BacklogQueueItem(generic_queue.QueueItem):
+    def __init__(self, show, segment):
+        generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH)
+        self.priority = generic_queue.QueuePriorities.LOW
+        self.name = 'BACKLOG-' + str(show.indexerid)
+        self.success = None
+        self.show = show
+        self.segment = segment
+
+    def run(self):
+        generic_queue.QueueItem.run(self)
+
+        try:
+            logger.log("Beginning backlog search for: [" + self.show.name + "]")
+            searchResult = search.searchProviders(self.show, self.segment, False)
+
+            if searchResult:
+                for result in searchResult:
+                    # just use the first result for now
+                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
+                    search.snatchEpisode(result)
+
+                    # give the CPU a break
+                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
+            else:
+                logger.log(u"No needed episodes found during backlog search for: [" + self.show.name + "]")
+        except Exception:
+            logger.log(traceback.format_exc(), logger.DEBUG)
+
+        self.finish()
+
+
+class FailedQueueItem(generic_queue.QueueItem):
+    def __init__(self, show, segment, downCurQuality=False):
+        generic_queue.QueueItem.__init__(self, 'Retry', FAILED_SEARCH)
+        self.priority = generic_queue.QueuePriorities.HIGH
+        self.name = 'RETRY-' + str(show.indexerid)
+        self.show = show
+        self.segment = segment
+        self.success = None
+        self.started = None
+        self.downCurQuality = downCurQuality
+
+    def run(self):
+        generic_queue.QueueItem.run(self)
+        self.started = True
+        
+        try:
+            for epObj in self.segment:
+            
+                logger.log(u"Marking episode as bad: [" + epObj.prettyName() + "]")
+                
+                failed_history.markFailed(epObj)
+    
+                (release, provider) = failed_history.findRelease(epObj)
+                if release:
+                    failed_history.logFailed(release)
+                    history.logFailed(epObj, release, provider)
+    
+                failed_history.revertEpisode(epObj)
+                logger.log("Beginning failed download search for: [" + epObj.prettyName() + "]")
+
+            searchResult = search.searchProviders(self.show, self.segment, True, self.downCurQuality)
+
+            if searchResult:
+                for result in searchResult:
+                    # just use the first result for now
+                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
+                    search.snatchEpisode(result)
+
+                    # give the CPU a break
+                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
+            else:
+                pass
+                #logger.log(u"No valid episode found to retry for: [" + self.segment.prettyName() + "]")
+        except Exception:
+            logger.log(traceback.format_exc(), logger.DEBUG)
+            
+        ### Keep a list with the 100 last executed searches
+        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
+
+        if self.success is None:
+            self.success = False
+
+        self.finish()
+        
+def fifo(myList, item, maxSize = 100):
+    if len(myList) >= maxSize:
+        myList.pop(0)
+    myList.append(item)
diff --git a/sickbeard/tv.py b/sickbeard/tv.py
index 854c4248e4fcbacf7ab63715055a2a12ce4b8447..4b138b8648a30ad7efd58cd0add9c583557a9359 100644
--- a/sickbeard/tv.py
+++ b/sickbeard/tv.py
@@ -1198,7 +1198,7 @@ class TVShow(object):
         return toReturn
 
 
-    def wantEpisode(self, season, episode, quality, manualSearch=False):
+    def wantEpisode(self, season, episode, quality, manualSearch=False, downCurQuality=False):
 
         logger.log(u"Checking if found episode " + str(season) + "x" + str(episode) + " is wanted at quality " +
                    Quality.qualityStrings[quality], logger.DEBUG)
@@ -1230,22 +1230,23 @@ class TVShow(object):
             logger.log(u"Existing episode status is skipped/ignored/archived, ignoring found episode", logger.DEBUG)
             return False
 
+        curStatus, curQuality = Quality.splitCompositeStatus(epStatus)
+
         # if it's one of these then we want it as long as it's in our allowed initial qualities
         if quality in anyQualities + bestQualities:
             if epStatus in (WANTED, UNAIRED, SKIPPED):
                 logger.log(u"Existing episode status is wanted/unaired/skipped, getting found episode", logger.DEBUG)
                 return True
-            #elif manualSearch:
-            #    logger.log(
-            #        u"Usually ignoring found episode, but forced search allows the quality, getting found episode",
-            #        logger.DEBUG)
-            #    return True
+            elif manualSearch:
+                if (downCurQuality and quality >= curQuality) or (not downCurQuality and quality > curQuality):
+                    logger.log(
+                        u"Usually ignoring found episode, but forced search allows the quality, getting found episode",
+                        logger.DEBUG)
+                    return True
             else:
                 logger.log(u"Quality is on wanted list, need to check if it's better than existing quality",
                            logger.DEBUG)
 
-        curStatus, curQuality = Quality.splitCompositeStatus(epStatus)
-
         # if we are re-downloading then we only want it if it's in our bestQualities list and better than what we have
         if curStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST and quality in bestQualities and quality > curQuality:
             logger.log(u"Episode already exists but the found episode has better quality, getting found episode",
@@ -1289,9 +1290,8 @@ class TVShow(object):
             if epStatus == DOWNLOADED and curQuality == Quality.UNKNOWN:
                 return Overview.QUAL
             elif epStatus in (SNATCHED, SNATCHED_PROPER, SNATCHED_BEST):
-                if curQuality < maxBestQuality:
-                    return Overview.QUAL
                 return Overview.SNATCHED
+            # if they don't want re-downloads then we call it good if they have anything
             elif maxBestQuality == None:
                 return Overview.GOOD
             # if the want only first match and already have one call it good
@@ -1770,7 +1770,10 @@ class TVEpisode(object):
 
                 # if we somehow are still UNKNOWN then just use the shows defined default status or SKIPPED
                 elif self.status == UNKNOWN:
-                    self.status = self.show.default_ep_status
+                    if self.season > 0: #If it's not a special
+                        self.status = self.show.default_ep_status
+                    else:
+                        self.status = SKIPPED
 
                 else:
                     logger.log(
diff --git a/sickbeard/tvcache.py b/sickbeard/tvcache.py
index 595da3f9de97adb4b47f96197cad5b8ff228ae1c..4d642472ac861aea7fe68983df916331b43dc3b3 100644
--- a/sickbeard/tvcache.py
+++ b/sickbeard/tvcache.py
@@ -134,8 +134,7 @@ class TVCache():
         except AuthException, e:
             logger.log(u"Authentication error: " + ex(e), logger.ERROR)
         except Exception, e:
-            logger.log(u"Error while searching " + self.provider.name + ", skipping: " + ex(e), logger.ERROR)
-            logger.log(traceback.format_exc(), logger.DEBUG)
+            logger.log(u"Error while searching " + self.provider.name + ", skipping: " + repr(e), logger.DEBUG)
 
     def getRSSFeed(self, url, post_data=None, items=[]):
         handlers = []
@@ -298,8 +297,8 @@ class TVCache():
                 [name, season, episodeText, parse_result.show.indexerid, url, curTimestamp, quality, release_group, version]]
 
 
-    def searchCache(self, episode, manualSearch=False):
-        neededEps = self.findNeededEpisodes(episode, manualSearch)
+    def searchCache(self, episode, manualSearch=False, downCurQuality=False):
+        neededEps = self.findNeededEpisodes(episode, manualSearch, downCurQuality)
         return neededEps[episode] if len(neededEps) > 0 else []
 
     def listPropers(self, date=None, delimiter="."):
@@ -312,7 +311,7 @@ class TVCache():
         return filter(lambda x: x['indexerid'] != 0, myDB.select(sql))
 
 
-    def findNeededEpisodes(self, episode, manualSearch=False):
+    def findNeededEpisodes(self, episode, manualSearch=False, downCurQuality=False):
         neededEps = {}
         cl = []
 
@@ -357,7 +356,7 @@ class TVCache():
             curVersion = curResult["version"]
 
             # if the show says we want that episode then add it to the list
-            if not showObj.wantEpisode(curSeason, curEp, curQuality, manualSearch):
+            if not showObj.wantEpisode(curSeason, curEp, curQuality, manualSearch, downCurQuality):
                 logger.log(u"Skipping " + curResult["name"] + " because we don't want an episode that's " +
                            Quality.qualityStrings[curQuality], logger.DEBUG)
                 continue
diff --git a/sickbeard/versionChecker.py b/sickbeard/versionChecker.py
index 619a212381fe56ec00a93696c0b546433756fc84..d32c778203b454415841e6b7ccfd62c8e7f41960 100644
--- a/sickbeard/versionChecker.py
+++ b/sickbeard/versionChecker.py
@@ -73,6 +73,9 @@ class CheckVersion():
                             logger.log(u"Update was successful!")
                             ui.notifications.message('Update was successful')
                             sickbeard.events.put(sickbeard.events.SystemEvent.RESTART)
+                        else:
+                            logger.log(u"Update failed!")
+                            ui.notifications.message('Update failed!')
 
     def _runbackup(self):
         # Do a system backup before update
@@ -372,7 +375,7 @@ class GitUpdateManager(UpdateManager):
 
             if output:
                 output = output.strip()
-            logger.log(u"git output: " + str(output), logger.DEBUG)
+
 
         except OSError:
             logger.log(u"Command " + cmd + " didn't work")
@@ -383,7 +386,10 @@ class GitUpdateManager(UpdateManager):
             exit_status = 0
 
         elif exit_status == 1:
-            logger.log(cmd + u" returned : " + str(output), logger.ERROR)
+            if 'stash' in output:
+                logger.log(u"Please enable 'git reset' in settings or stash your changes in local files",logger.WARNING)
+            else:
+                logger.log(cmd + u" returned : " + str(output), logger.ERROR)
             exit_status = 1
 
         elif exit_status == 128 or 'fatal:' in output or err:
@@ -555,6 +561,8 @@ class GitUpdateManager(UpdateManager):
                 notifiers.notify_git_update(sickbeard.CUR_COMMIT_HASH if sickbeard.CUR_COMMIT_HASH else "")
 
             return True
+        else:
+            return False
 
     def clean(self):
         """
diff --git a/sickbeard/webapi.py b/sickbeard/webapi.py
index fa4b614b59d9935c822806e0935ddb30c46e5e8c..ec25c41f93a9562dd7224a6c7004c57cf50bfe7a 100644
--- a/sickbeard/webapi.py
+++ b/sickbeard/webapi.py
@@ -749,7 +749,11 @@ class CMD_ComingEpisodes(ApiCall):
         for curType in self.type:
             finalEpResults[curType] = []
 
-        for ep in sql_results:
+        # Safety Measure to convert rows in sql_results to dict.
+        # This should not be required as the DB connections should only be returning dict results not sqlite3.row_type
+        dict_results = [dict(row) for row in sql_results]
+        
+        for ep in dict_results:
             """
                 Missed:   yesterday... (less than 1week)
                 Today:    today
diff --git a/sickbeard/webserve.py b/sickbeard/webserve.py
index 64749f52d17081aef23a89de7d3a9a4d270cbec2..f5a5f594687f84130ba68819191fec31cb8460f5 100644
--- a/sickbeard/webserve.py
+++ b/sickbeard/webserve.py
@@ -1087,7 +1087,7 @@ class Home(WebRoot):
                 # do a hard restart
                 sickbeard.events.put(sickbeard.events.SystemEvent.RESTART)
             
-                t = PageTemplate(rh=self, file="restart_bare.tmpl")
+                t = PageTemplate(rh=self, file="restart.tmpl")
                 return t.respond()
             else:
                 return self._genericMessage("Update Failed",
@@ -1871,7 +1871,7 @@ class Home(WebRoot):
 
         return self.redirect("/home/displayShow?show=" + show)
 
-    def searchEpisode(self, show=None, season=None, episode=None):
+    def searchEpisode(self, show=None, season=None, episode=None, downCurQuality=0):
 
         # retrieve the episode object and fail if we can't get one
         ep_obj = self._getEpisode(show, season, episode)
@@ -1879,7 +1879,7 @@ class Home(WebRoot):
             return json.dumps({'result': 'failure'})
 
         # make a queue item for it and put it on the queue
-        ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj.show, ep_obj)
+        ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj.show, ep_obj, bool(int(downCurQuality)))
 
         sickbeard.searchQueueScheduler.action.add_item(ep_queue_item)
 
@@ -1894,13 +1894,14 @@ class Home(WebRoot):
     ### Returns the current ep_queue_item status for the current viewed show.
     # Possible status: Downloaded, Snatched, etc...
     # Returns {'show': 279530, 'episodes' : ['episode' : 6, 'season' : 1, 'searchstatus' : 'queued', 'status' : 'running', 'quality': '4013']
-    def getManualSearchStatus(self, show=None, season=None):
+    def getManualSearchStatus(self, show=None):
         def getEpisodes(searchThread, searchstatus):
             results = []
             showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(searchThread.show.indexerid))
 
             if isinstance(searchThread, sickbeard.search_queue.ManualSearchQueueItem):
-                results.append({'episode': searchThread.segment.episode,
+                results.append({'show': searchThread.show.indexerid,
+                                'episode': searchThread.segment.episode,
                                 'episodeindexid': searchThread.segment.indexerid,
                                 'season': searchThread.segment.season,
                                 'searchstatus': searchstatus,
@@ -1909,7 +1910,8 @@ class Home(WebRoot):
                                 'overview': Overview.overviewStrings[showObj.getOverview(int(searchThread.segment.status or -1))]})
             else:
                 for epObj in searchThread.segment:
-                    results.append({'episode': epObj.episode,
+                    results.append({'show': epObj.show.indexerid,
+                                    'episode': epObj.episode,
                                     'episodeindexid': epObj.indexerid,
                                     'season': epObj.season,
                                     'searchstatus': searchstatus,
@@ -1921,9 +1923,6 @@ class Home(WebRoot):
 
         episodes = []
 
-        if not show and not season:
-            return json.dumps({'show': show, 'episodes': episodes})
-
         # Queued Searches
         searchstatus = 'queued'
         for searchThread in sickbeard.searchQueueScheduler.action.get_all_ep_from_queue(show):
@@ -1942,8 +1941,9 @@ class Home(WebRoot):
         # Finished Searches
         searchstatus = 'finished'
         for searchThread in sickbeard.search_queue.MANUAL_SEARCH_HISTORY:
-            if not str(searchThread.show.indexerid) == show:
-                continue
+            if show is not None:
+                if not str(searchThread.show.indexerid) == show:
+                    continue
 
             if isinstance(searchThread, sickbeard.search_queue.ManualSearchQueueItem):
                 if not [x for x in episodes if x['episodeindexid'] == searchThread.segment.indexerid]:
@@ -1953,7 +1953,7 @@ class Home(WebRoot):
                 if not [i for i, j in zip(searchThread.segment, episodes) if i.indexerid == j['episodeindexid']]:
                     episodes += getEpisodes(searchThread, searchstatus)
 
-        return json.dumps({'show': show, 'episodes': episodes})
+        return json.dumps({'episodes': episodes})
 
     def getQualityClass(self, ep_obj):
         # return the correct json value
@@ -2068,7 +2068,7 @@ class Home(WebRoot):
         return json.dumps(result)
 
 
-    def retryEpisode(self, show, season, episode):
+    def retryEpisode(self, show, season, episode, downCurQuality):
 
         # retrieve the episode object and fail if we can't get one
         ep_obj = self._getEpisode(show, season, episode)
@@ -2076,7 +2076,7 @@ class Home(WebRoot):
             return json.dumps({'result': 'failure'})
 
         # make a queue item for it and put it on the queue
-        ep_queue_item = search_queue.FailedQueueItem(ep_obj.show, [ep_obj])
+        ep_queue_item = search_queue.FailedQueueItem(ep_obj.show, [ep_obj], bool(int(downCurQuality)))
         sickbeard.searchQueueScheduler.action.add_item(ep_queue_item)
 
         if not ep_queue_item.started and ep_queue_item.success is None:
@@ -2099,7 +2099,7 @@ class HomePostProcess(Home):
         return t.respond()
 
     def processEpisode(self, dir=None, nzbName=None, jobName=None, quiet=None, process_method=None, force=None,
-                       is_priority=None, failed="0", type="auto", *args, **kwargs):
+                       is_priority=None, delete_on="0", failed="0", type="auto", *args, **kwargs):
 
         if failed == "0":
             failed = False
@@ -2115,12 +2115,17 @@ class HomePostProcess(Home):
             is_priority = True
         else:
             is_priority = False
-
+            
+        if delete_on in ["on", "1"]:
+            delete_on = True
+        else:
+            delete_on = False
+            
         if not dir:
             return self.redirect("/home/postprocess/")
         else:
             result = processTV.processDir(dir, nzbName, process_method=process_method, force=force,
-                                          is_priority=is_priority, failed=failed, type=type)
+                                          is_priority=is_priority, delete_on=delete_on, failed=failed, type=type)
             if quiet is not None and int(quiet) == 1:
                 return result
 
@@ -2341,7 +2346,7 @@ class HomeAddShows(Home):
                     [int(show['ids'][indexers[sickbeard.TRAKT_DEFAULT_INDEXER - 1]]),
                      'http://www.trakt.tv/shows/%s' % show['ids']['slug'], show['title'],
                      show['overview'],
-                     None if show['first_aired'] is None else dateutil_parser.parse(show['first_aired']).strftime('%Y%m%d')]
+                     None if show['first_aired'] is None else dateutil_parser.parse(show['first_aired']).strftime(sickbeard.DATE_PRESET)]
                     for show in recommendedlist if not helpers.findCertainShow(sickbeard.showList, [
                     int(show['ids'][indexers[sickbeard.TRAKT_DEFAULT_INDEXER - 1]])])))
         except (traktException, traktAuthException, traktServerBusy) as e:
@@ -2389,21 +2394,56 @@ class HomeAddShows(Home):
         trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
 
         try:
-            shows = trakt_api.traktRequest("shows/trending?limit=50&extended=full,images") or []
+            if sickbeard.TRAKT_BLACKLIST_NAME is not None:
+                not_liked_show = trakt_api.traktRequest("users/" + sickbeard.TRAKT_USERNAME + "/lists/" + sickbeard.TRAKT_BLACKLIST_NAME + "/items") or []
+
+            limit_show = 50 + len(not_liked_show)
+
+            shows = trakt_api.traktRequest("shows/trending?limit=" + str(limit_show) + "&extended=full,images") or []
+
+            library_shows = trakt_api.traktRequest("sync/collection/shows?extended=full") or []
             for show in shows:
                 try:
                     tvdb_id = int(show['show']['ids']['tvdb'])
                     tvrage_id = int(show['show']['ids']['tvrage'] or 0)
                     if not helpers.findCertainShow(sickbeard.showList,
                                                    [tvdb_id, tvrage_id]):
-                        t.trending_shows += [show]
+                        if show['show']['ids']['tvdb'] not in (lshow['show']['ids']['tvdb'] for lshow in library_shows):
+                            if not_liked_show:
+                                if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb'] for show in not_liked_show if show['type'] == 'show'):	
+                                    t.trending_shows += [show]
                 except exceptions.MultipleShowObjectsException:
                     continue
+
+            if sickbeard.TRAKT_BLACKLIST_NAME != '':
+                t.blacklist = True
+            else:
+                t.blacklist = False
+
         except (traktException, traktAuthException, traktServerBusy) as e:
             logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
 
         return t.respond()
 
+    def addShowToBlacklist(self, indexer_id):
+
+        # URL parameters
+        data = {
+            'shows': [
+                {
+                    'ids': {
+                           'tvdb': indexer_id
+                           }
+                }
+            ]
+        }
+
+        trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
+
+        result=trakt_api.traktRequest("users/" + sickbeard.TRAKT_USERNAME + "/lists/" + sickbeard.TRAKT_BLACKLIST_NAME + "/items", data, method='POST')
+
+        return self.redirect('/home/addShows/trendingShows/')
+
     def existingShows(self):
         """
         Prints out the page to add existing shows from a root dir
@@ -3579,7 +3619,7 @@ class ConfigGeneral(Config):
                     web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None,
                     handle_reverse_proxy=None, sort_article=None, auto_update=None, notify_on_update=None,
                     proxy_setting=None, proxy_indexers=None, anon_redirect=None, git_path=None, git_remote=None,
-                    calendar_unprotected=None,
+                    calendar_unprotected=None, no_restart=None,
                     display_filesize=None, fuzzy_dating=None, trim_zero=None, date_preset=None, date_preset_na=None, time_preset=None,
                     indexer_timeout=None, play_videos=None, download_url=None, rootDir=None, theme_name=None,
                     git_reset=None, git_username=None, git_password=None, git_autoissues=None):
@@ -3615,6 +3655,7 @@ class ConfigGeneral(Config):
         sickbeard.GIT_PATH = git_path
         sickbeard.GIT_REMOTE = git_remote
         sickbeard.CALENDAR_UNPROTECTED = config.checkbox_to_value(calendar_unprotected)
+        sickbeard.NO_RESTART = config.checkbox_to_value(no_restart)
         # sickbeard.LOG_DIR is set in config.change_LOG_DIR()
 
         sickbeard.WEB_PORT = config.to_int(web_port)
@@ -3752,11 +3793,11 @@ class ConfigSearch(Config):
                    nzbget_password=None, nzbget_category=None, nzbget_category_anime=None, nzbget_priority=None,
                    nzbget_host=None, nzbget_use_https=None, backlog_days=None, backlog_frequency=None,
                    dailysearch_frequency=None, nzb_method=None, torrent_method=None, usenet_retention=None,
-                   download_propers=None, check_propers_interval=None, allow_high_priority=None,
+                   download_propers=None, check_propers_interval=None, allow_high_priority=None, sab_forced=None,
                    randomize_providers=None, backlog_startup=None, use_failed_downloads=None, delete_failed=None,
                    dailysearch_startup=None, torrent_dir=None, torrent_username=None, torrent_password=None, torrent_host=None,
                    torrent_label=None, torrent_label_anime=None, torrent_path=None, torrent_verify_cert=None,
-                   torrent_seed_time=None, torrent_paused=None, torrent_high_bandwidth=None,
+                   torrent_seed_time=None, torrent_paused=None, torrent_high_bandwidth=None, coming_eps_missed_range=None,
                    torrent_rpcurl=None, torrent_auth_type = None, ignore_words=None, require_words=None):
 
         results = []
@@ -3768,9 +3809,11 @@ class ConfigSearch(Config):
             results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."]
 
         config.change_DAILYSEARCH_FREQUENCY(dailysearch_frequency)
+        
 
         config.change_BACKLOG_FREQUENCY(backlog_frequency)
         sickbeard.BACKLOG_DAYS = config.to_int(backlog_days, default=7)
+        sickbeard.COMING_EPS_MISSED_RANGE = config.to_int(coming_eps_missed_range,default=7)
 
         sickbeard.USE_NZBS = config.checkbox_to_value(use_nzbs)
         sickbeard.USE_TORRENTS = config.checkbox_to_value(use_torrents)
@@ -3785,6 +3828,22 @@ class ConfigSearch(Config):
         sickbeard.RANDOMIZE_PROVIDERS = config.checkbox_to_value(randomize_providers)
 
         sickbeard.DOWNLOAD_PROPERS = config.checkbox_to_value(download_propers)
+        config.change_DOWNLOAD_PROPERS(sickbeard.DOWNLOAD_PROPERS)
+
+        if sickbeard.DOWNLOAD_PROPERS and not sickbeard.properFinderScheduler.isAlive():
+            sickbeard.properFinderScheduler.silent = False
+            try:
+                sickbeard.properFinderScheduler.start()
+            except:
+                pass
+        elif not sickbeard.DOWNLOAD_PROPERS:
+            sickbeard.properFinderScheduler.stop.set()
+            sickbeard.properFinderScheduler.silent = True
+            try:
+                sickbeard.properFinderScheduler.join(5)
+            except:
+                pass
+
         sickbeard.CHECK_PROPERS_INTERVAL = check_propers_interval
 
         sickbeard.ALLOW_HIGH_PRIORITY = config.checkbox_to_value(allow_high_priority)
@@ -3800,6 +3859,7 @@ class ConfigSearch(Config):
         sickbeard.SAB_CATEGORY = sab_category
         sickbeard.SAB_CATEGORY_ANIME = sab_category_anime
         sickbeard.SAB_HOST = config.clean_url(sab_host)
+        sickbeard.SAB_FORCED = config.checkbox_to_value(sab_forced)
 
         sickbeard.NZBGET_USERNAME = nzbget_username
         sickbeard.NZBGET_PASSWORD = nzbget_password
@@ -3850,7 +3910,7 @@ class ConfigPostProcessing(Config):
                            kodi_data=None, kodi_12plus_data=None, mediabrowser_data=None, sony_ps3_data=None,
                            wdtv_data=None, tivo_data=None, mede8er_data=None,
                            keep_processed_dir=None, process_method=None, del_rar_contents=None, process_automatically=None,
-                           rename_episodes=None, airdate_episodes=None, unpack=None,
+                           no_delete=None, rename_episodes=None, airdate_episodes=None, unpack=None,
                            move_associated_files=None, sync_files=None, postpone_if_sync_files=None, nfo_rename=None,
                            tv_download_dir=None, naming_custom_abd=None,
                            naming_anime=None,
@@ -3890,7 +3950,7 @@ class ConfigPostProcessing(Config):
                 results.append("Unpacking Not Supported, disabling unpack setting")
         else:
             sickbeard.UNPACK = config.checkbox_to_value(unpack)
-
+        sickbeard.NO_DELETE = config.checkbox_to_value(no_delete)
         sickbeard.KEEP_PROCESSED_DIR = config.checkbox_to_value(keep_processed_dir)
         sickbeard.PROCESS_METHOD = process_method
         sickbeard.DELRARCONTENTS = config.checkbox_to_value(del_rar_contents)
@@ -4536,7 +4596,7 @@ class ConfigNotifications(Config):
                           use_trakt=None, trakt_username=None, trakt_password=None,
                           trakt_remove_watchlist=None, trakt_sync_watchlist=None, trakt_method_add=None,
                           trakt_start_paused=None, trakt_use_recommended=None, trakt_sync=None,
-                          trakt_default_indexer=None, trakt_remove_serieslist=None, trakt_disable_ssl_verify=None, trakt_timeout=None,
+                          trakt_default_indexer=None, trakt_remove_serieslist=None, trakt_disable_ssl_verify=None, trakt_timeout=None, trakt_blacklist_name=None,
                           use_synologynotifier=None, synologynotifier_notify_onsnatch=None,
                           synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
                           use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None,
@@ -4660,6 +4720,7 @@ class ConfigNotifications(Config):
         sickbeard.TRAKT_DEFAULT_INDEXER = int(trakt_default_indexer)
         sickbeard.TRAKT_DISABLE_SSL_VERIFY = config.checkbox_to_value(trakt_disable_ssl_verify)
         sickbeard.TRAKT_TIMEOUT = int(trakt_timeout)
+        sickbeard.TRAKT_BLACKLIST_NAME = trakt_blacklist_name
 
         if sickbeard.USE_TRAKT:
             sickbeard.traktCheckerScheduler.silent = False
diff --git a/tests/config_tests.py b/tests/config_tests.py
index 8632f40b035f799262fea998efe6f5faab7ceb66..4b1ffc82516272651d8efb2b69a6b571aabf2b3c 100644
--- a/tests/config_tests.py
+++ b/tests/config_tests.py
@@ -14,6 +14,7 @@ class QualityTests(unittest.TestCase):
         self.assertEqual(config.clean_url("google.com/xml.rpc"), "http://google.com/xml.rpc")
         self.assertEqual(config.clean_url("google.com"), "http://google.com/")
         self.assertEqual(config.clean_url("http://www.example.com/folder/"), "http://www.example.com/folder/")
+        self.assertEqual(config.clean_url("scgi:///home/user/.config/path/socket"), "scgi:///home/user/.config/path/socket")
 
 if __name__ == '__main__':
     suite = unittest.TestLoader().loadTestsFromTestCase(QualityTests)