diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..69844f8edcd6dec8f0d55112811b4d6929baade3
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,79 @@
+# Set the default behavior, in case people don't have core.autocrlf set.
+
+# Handle line endings automatically for files detected as text
+# and leave all files detected as binary untouched.
+* text=auto
+
+#
+# The above will handle all files NOT found below
+#
+
+#
+## These files are text and should be normalized (Convert crlf => lf)
+#
+
+# git config
+.gitattributes	text
+.gitignore		text
+
+# Documentation
+*.md			text
+CHANGES			text
+
+# Startup script
+init.*			text
+
+# Various
+*.ini			text
+*.txt			text
+*.less			text
+*.h				text
+*.in			text
+
+# Python Source files
+*.pxd			text
+*.py 			text
+*.py3 			text
+*.pyw 			text
+*.pyx  			text
+
+# Cheetah template
+*.tmpl			text
+
+# Web file
+*.htm text
+*.html text
+*.css text
+*.js text
+*.xml text
+
+#
+## These files are binary and should be left untouched
+#
+
+# Python Binary files
+*.db			binary
+*.p 			binary
+*.pkl 			binary
+*.pyc 			binary
+*.pyd			binary
+*.pyo 			binary
+
+# These files are binary and should be left untouched
+# (binary is a macro for -text -diff)
+*.png			binary
+*.jpg			binary
+*.jpeg			binary
+*.gif			binary
+*.ico			binary
+*.swf			binary
+*.gz			binary
+*.zip			binary
+*.7z			binary
+*.ttf			binary
+*.svg			binary
+*.woff			binary
+*.eot			binary
+*.rar			binary
+*.dll			binary
+*.lib
diff --git a/.gitignore b/.gitignore
index f465a798f8c5123fa51dec28a139b4bda0fafc4a..14f2ce438504f43fe23b3672e605a81a577899c6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,59 +1,59 @@
-#  SR User Related   #
-######################
-cache/
-Logs/
-restore/
-backup/
-cache.db*
-config.ini*
-sickbeard.db*
-failed.db*
-autoProcessTV/autoProcessTV.cfg
-server.crt
-server.key
-
-#  SR Test Related   #
-######################
-tests/Logs/*
-tests/cache/*
-tests/sickbeard.db*
-tests/cache.db*
-tests/failed.db
-
-#  Compiled source   #
-######################
-*.py[co]
-
-#  IDE specific      #
-######################
-*.bak
-*.tmp
-*.wpr
-*.project
-*.pydevproject
-*.cproject
-*.tmproj
-*.tmproject
-*.sw?
-Session.vim
-.ropeproject/*
-*.iml
-.idea
-*.ipr
-.settings/*
-
-# OS generated files #
-######################
-.Spotlight-V100
-.Trashes
-.DS_Store
-desktop.ini
-ehthumbs.db
-Thumbs.db
-.directory
-*~
-*.torrent
-
-# Unrar Executable   #
-######################
-lib/unrar2/UnRAR.exe
+#  SR User Related   #
+######################
+/cache/
+/Logs/
+/restore/
+/backup/
+cache.db*
+config.ini*
+sickbeard.db*
+failed.db*
+/autoProcessTV/autoProcessTV.cfg
+server.crt
+server.key
+
+#  SR Test Related   #
+######################
+/tests/Logs/*
+/tests/cache/*
+/tests/sickbeard.db*
+/tests/cache.db*
+/tests/failed.db
+
+#  Compiled source   #
+######################
+*.py[co]
+
+#  IDE specific      #
+######################
+*.bak
+*.tmp
+*.wpr
+*.project
+*.pydevproject
+*.cproject
+*.tmproj
+*.tmproject
+*.sw?
+Session.vim
+.ropeproject/*
+*.iml
+.idea
+*.ipr
+.settings/*
+
+# OS generated files #
+######################
+.Spotlight-V100
+.Trashes
+.DS_Store
+desktop.ini
+ehthumbs.db
+Thumbs.db
+.directory
+*~
+*.torrent
+
+# Unrar Executable   #
+######################
+lib/unrar2/UnRAR.exe
diff --git a/CHANGES.md b/CHANGES.md
index ae53e9f0548897f22524918f031d5f8afcbc0d73..e4758094de0f4750783dccfcbbd26284e86a17e9 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,247 @@
+### 4.0.14 (2014-03-29)
+
+[full changelog](https://github.com/SiCKRAGETV/SickRage/compare/v4.0.13...v4.0.14)
+
+* Feature: Added Free Space check in PP before deleting files
+* Added DEBUG setting to UI
+* Added better note to Extra Scripts setting in PP
+* Added more network logs
+* Added anime regex for 003-004. Show Name - Ep Name.ext
+* Added Quality check even if SR itself downloaded the episode
+* Added Default Info Language in general settings 
+* Fixed password encryption when restoring backup to another computer
+* Fixed T411 torrent name with special chars
+* Fixed PP when higher quality have lower size making SR re-download multiple times
+* Fixed Trakt exceptions
+* Fixed debug and db events in log when debug log is disabled
+* Fixed showupdater when user changed updater hour and thread didn't restart
+* Fixed inc_top.tmpl loading unnecessary stuff when not logged 
+* Fixed gitignore issue with relative folder
+* Fixed Add show with improperly decoded utf-8 characters
+* Fixed PP episodes with Airdate. Check without season 0 first
+* Fixed don't display prev and next airs if date is invalid
+* Fixed verify_freespace to check show location and check old file exists
+* Fixed tntvillage provider (cosmetic)
+* Fixed active status on home accordingly to manage rolling download enable/disable
+* Fixed trending blacklist management
+* Fixed Hounddawgs (various fixes)
+* Fixed Torrent file content is empty on magnet torrent
+* Fixed search show name in Home
+* Hide Proxy indexers settings when proxy host is empty
+* Change removed episodes status from IGNORED to ARCHIVED 
+
+### 4.0.13 (2014-03-22)
+
+[full changelog](https://github.com/SiCKRAGETV/SickRage/compare/v4.0.12...v4.0.13)
+
+* Fix HDTorrents proper search
+* Fix restart page
+* Fix comingEpisodes JS when manual search
+* Fix properfinder thread not starting or stopping when status changed.
+* Fix webapi errors about sqlite3
+* Fix network sorting with small poster, banner in home
+* Fix network sorting with comingEpisodes
+* Fix default for new special status (default is SKIPPED)
+* Fix subliminal not working properly on Windows.
+* Fix proper find not working for private trackers
+* Fix error in decryption of git passwords (Important: if you encrypted your password before this update then you need to re-enter the git password for it to work again
+* Added possibility to blacklist show in trending
+* Added scraper for MoreThan.TV.
+* Added 'tp' (Beyond TV) to mediaExtensions
+* Added AnimeNZB provider
+* Added message about TVRAGE don't support banner/posters
+* Added Log directory to config page (useful when use don't know the location of log files)
+* Added shutdown option instead of restart after update (Added Checkbox in Config - General - Advanced "Restart Only") - usefull for NAS
+* Added setting "Coming Episodes Missed Range" to UI (default is 7) If you have a missed episode older than 7 days, I won't show in coming episodes
+* Added subtitle status to mass edit page (show if subtitle is enable/disable for that show)
+* Added Percent to Footer and Link to Snatched
+* Added T411 Provider subcategory 639 (tv-show not tv-series)
+* Added a check for the ssl warning for providers proxies
+* Hide bad shows (title is null) from Trakt Recommendations
+* Hide subtitle setting if subtitle feature not enabled
+* Hide webroot in /config if is not enabled
+* Hide "Find Propers Search" if its disable
+* Use SickRage date preset config to show trakt first aired in recommendations
+* Updated mass edit text "Subtitle" to "Search Subtitle" - the action is force search subtitles
+* Update Wombles for tv-x264 and tv-dvd
+* Minor adjustments in editshow page
+* Re-arrange items so proper settings be in sequence in search settings
+* Removed hardlink and symlink from actions if Win32
+* Removed Fanzub (shutdown)
+* PP: Add option to delete files/folders when using manual Post Processing
+    * Adds a checkbox to delete files/folders just like auto Processing does
+    * Defaults to off
+* PP Changed failure handling:
+    * Will now continue if an error is encountered with a single file
+    * Will skip only the current folder when syncfiles are encountered
+    * Will show a summary at the end of Post Processing listing all failed items (files and folders) with the reason for failing.
+* PP: Option to not delete empty folders during Post Processing
+    * Adds option to not delete empty folders to Post Processing Config.
+    * Only valid when Auto Post Processing, manual overrides the setting.
+* New Feature: DisplayShow: Manual Search ask 2 questions (please give us feedback!):
+    * If failed enable, ask to mark release failed Yes/No.
+    * If to download: include current quality in new search or search only for higher quality
+* New Feature: Added option to use forced priority in SAB (starts download even then SAB is paused)
+* New Feature: Added the ability to choose displaying columns in home page
+
+
+### 4.0.12 (2014-03-15)
+
+[full changelog](https://github.com/SiCKRAGETV/SickRage/compare/v4.0.11...v4.0.12)
+
+* Auto update or manual updated will be aborted: remote DB has new DB version or Post-Processor or ShowUpdater are running
+* RSS feeds now can use global proxy (if enabled)
+* Show invalid date in UI when indexer has invalidar data
+* Don't add episode to backlog paused shows when setting status to Wanted
+* Post-Processor: ignores hidden folders inside folders
+* Post-Processor: ignore folders that are not empty
+* Show message instead of error when trying to update a show that is already being update
+* Added Kaerizaki-Fansub regex
+* Fixed log rotation in windows ("cannot access the file because it is being used by another process")
+* New TorrentDay URL
+* Added WebSafe filter to log viewer.
+* Show the name of Syncfiles in log when Postpone PP halts because of them
+* Better unrar message error
+* Fix submit issue not reading all log files
+* Disable daemon mode in MAC/OSX
+* Fix ASCII decode errors when downloading subtitles
+* Change tvrage error to warning
+* IPT: forcing search in eponly mode (sponly not available)
+* TorrentDay: Improved logging
+* Improved Deluged
+* PP: fix already processed episodes when downloading the same release.
+* Fix various providers issue in webui
+* Subliminal: reverted commit for path in unicode
+* Fix mede8er xml declarations
+* Show "No update need" notification even if auto_update is True
+* Added WebSafe filter to log viewer.
+* Added limit title length when Submitting log
+* Send LOCALE when submitting issue
+
+### 4.0.11 (2014-03-08)
+[full changelog](https://github.com/SiCKRAGETV/SickRage/compare/v4.0.10...v4.0.11)
+
+* Use Scene Exceptions in Post Processing
+* Fix some PP issues related to message "Problem(s) during Processing"
+* Fix github issue when didn't return list of branches
+* Manage backlog now only show WANTED. Don't show snatched anymore
+* Fix error while showing invalid dates from indexer
+* Fix unable to add torrent rss when nzb search is disable
+* Fix metadata errors when info is missing from indexer
+* Fix git password not encrypted
+* Fix for Pushbullet update notification
+* Fix to delete ALL associated files while replacing old episodes (PP)
+* Faster PP copy method
+* Added field for custom title tag in torrent rss provider
+* New TRAKT features and fixes
+* Viewlog page can read the logs from all the logfiles (you can search in all your log files using UI)
+* If you missed this feature: you can change the number of logs in settings and size per file)
+* WARNING: Windows users: please set number of logs to 0 (zero) to avoid errors. Known issue.
+
+### 4.0.10 (2014-03-03)
+[full changelog](https://github.com/SiCKRAGETV/SickRage/compare/v4.0.9...v4.0.10)
+
+* Add "Use failed downloads" to search settings
+* Add a missing urllib3.disbale_warning()
+* Add a warning when gh object is set to None
+* Add normalized locale code.
+* Add option to delete RAR contents when Process_method != Move
+* Add Provider AR
+* Add RARBG provider
+* Add SD search to RARBG provider
+* Added a specific regex for horriblesubs
+* Added apikey to censoredformatter log
+* Added auto backup when updating
+* Added Date field in email body
+* Added failed.db and cache.db to backup
+* Added missing network logos
+* Added several network logos
+* Added sponly to season pack search
+* Added support for Plex Media Center updates with Auth Token
+* Added sync file extensions to Post Processing Config page
+* AniDB: Catch exception when generating white and blacklist
+* AniDB: Fix generating exception on timeout in auth
+* AniDB: Fix generating exception on timeout in PP
+* AniDB: Show error in ui when unable to retreive Fansub Groups when
+* BTN: Fix searching in season mode for episode
+* Change the language dropdown in editShow.tmpl
+* Check actual branch before switch
+* Check for UnicodeDecodeError in Cheetah's Filter
+* Check if we are on windows before query Windows version.
+* Disable urllib3 InsecureRequestWarning
+* Don't remove logs folder when git reset is enabled in UI
+* Don't use system.encoding for Cheetah's Filter
+* Fix .gitignore
+* Fix add newznab provider
+* Fix backup issue with invalid restore folder
+* Fix changing episode scene number not updating the show's episode cache.
+* Fix color in displayShow when manually searching
+* Fix compiling error
+* Fix downconverting path from unicode to str
+* Fix list_associated_files and DeleteRAR contents
+* Fix low quality snatch not showing in backlog overview
+* Fix missing en_US alias in certain system.
+* Fix msg created witout MIMEext
+* Fix pyUnrar2 on bad archive
+* Fix rarbg provider searchstring encoding
+* Fix restart timeout 
+* Fix set date/time to local tz when local is choosen
+* Fix Show Lookups on Indexer
+* Fix time display for fuzzy dates with trim zero and 24 hour time style
+* Fix typo in emailnotify.py
+* Fix viewlog.tmpl
+* Fixes shows with double quotes
+* Handles multi-page results and improved login for Freshon
+* HBO and Starz logos where a little stretched. Replaced them.
+* Hide submit errors button if no git user/pass and auto submit
+* Improved logging to detect CloudFlare blocking
+* Improved rTorrent support
+* IPT: force eponly search since as it isn't supported by the provider.
+* Kodi fix
+* Limit number of pages for RSS search
+* New Feature - Log search and log filter by SR thread
+* OldPirateBay Replaced url tv/latest
+* Opensubtitle - show only error not traceback
+* Prettier file sizes in DisplayShow
+* Provider SCC: Catch exception on getURL
+* Redone restart.js
+* Remove blue color from progress bar
+* Remove part of the condition to enable utf8 on Windows
+* Remove traceback from generic.py
+* Remove trademark from filename
+* Removed old TMF network logo. (channel stopped 2011)
+* Removed 'page' parameter
+* Removed some comment
+* Renamed network logo of se“ries+ to series+
+* Replace os.rmdir to shutil.rmtree
+* Replace the language selection in add show
+* Replaced adult swim network logo with colored version.
+* Replaced white network logos with colored versions.
+* Restored back previous Comedy Central logos.
+* Reworked the backup/restore to properly handle the cache directory
+* SCC: Fix season search only in sponly
+* Set condition for tntvillage parameters
+* Skip anidb query if previously failed
+* Subliminal: Fix ogg subtitles track with und language
+* Subtitles: Path is always unicode
+* Suppressed torrent list not found error msg
+* Suppressing subliminal logs on windows
+* T411: Change addresse from t411.me to t411.io
+* Trakt: Catch error when trying to delete library.
+* Update config.js to compareDB on update
+* Update default trakt timeout
+* Update T411 to its new domain name
+* Update traktchecker - remove traceback
+* Update traktchecker - remove traceback
+* Update webserve.py to add delete_failed
+* Update webserve.py to compareDB on checkout
+* Updated OldPirateBay file list parsing
+* Updated Requests to 2.5.1
+* Use hostname rather than IP
+* Use sbdatetime instead of self
+* UTF-8 encode url that is used in urllib.quote_plus(url) 
+* Windows UTF-8 console via cp65001
+
 ### 0.x.x (2014-11-11 xx:xx:xx UTC)
 
 * Add Bootstrap for UI features
diff --git a/gui/slick/images/network/4sd.png b/gui/slick/images/network/4sd.png
new file mode 100644
index 0000000000000000000000000000000000000000..bd920c58d5c2e2e66639ab9556f687daff2205a6
Binary files /dev/null and b/gui/slick/images/network/4sd.png differ
diff --git a/gui/slick/images/network/america one television network.png b/gui/slick/images/network/america one television network.png
new file mode 100644
index 0000000000000000000000000000000000000000..c317e2cf96cab0f5e80cac7769b9d96a6d40afd2
Binary files /dev/null and b/gui/slick/images/network/america one television network.png differ
diff --git a/gui/slick/images/network/america one.png b/gui/slick/images/network/america one.png
new file mode 100644
index 0000000000000000000000000000000000000000..c317e2cf96cab0f5e80cac7769b9d96a6d40afd2
Binary files /dev/null and b/gui/slick/images/network/america one.png differ
diff --git a/gui/slick/images/network/byu television.png b/gui/slick/images/network/byu television.png
new file mode 100644
index 0000000000000000000000000000000000000000..8838ea815f5ad35e82d2a41726d0ee48952eec64
Binary files /dev/null and b/gui/slick/images/network/byu television.png differ
diff --git a/gui/slick/images/network/carlton television.png b/gui/slick/images/network/carlton television.png
new file mode 100644
index 0000000000000000000000000000000000000000..c387ff09bf3f57497a4d6a1eebb8281b5795d0b3
Binary files /dev/null and b/gui/slick/images/network/carlton television.png differ
diff --git a/gui/slick/images/network/cnbc.png b/gui/slick/images/network/cnbc.png
index bd6585c756aacd4c1d13759cba9fee3686fc8b22..4f812b15ce6f4e6fbacc2aa7fd96cf8b008bf71a 100644
Binary files a/gui/slick/images/network/cnbc.png and b/gui/slick/images/network/cnbc.png differ
diff --git a/gui/slick/images/network/comedy entral old.png b/gui/slick/images/network/comedy entral old.png
deleted file mode 100644
index 31bf0876351bb67f043f6f93234da03eb605023c..0000000000000000000000000000000000000000
Binary files a/gui/slick/images/network/comedy entral old.png and /dev/null differ
diff --git a/gui/slick/images/network/dumont television network.png b/gui/slick/images/network/dumont television network.png
new file mode 100644
index 0000000000000000000000000000000000000000..244c7c9dba2a1ac76a1c46bb3165269a9a2e7aca
Binary files /dev/null and b/gui/slick/images/network/dumont television network.png differ
diff --git a/gui/slick/images/network/eredivisie live 1.png b/gui/slick/images/network/eredivisie live 1.png
deleted file mode 100644
index 55f8b4aafd34b1b74b104070618ab2f383eae237..0000000000000000000000000000000000000000
Binary files a/gui/slick/images/network/eredivisie live 1.png and /dev/null differ
diff --git a/gui/slick/images/network/eredivisie live 2.png b/gui/slick/images/network/eredivisie live 2.png
deleted file mode 100644
index 55f8b4aafd34b1b74b104070618ab2f383eae237..0000000000000000000000000000000000000000
Binary files a/gui/slick/images/network/eredivisie live 2.png and /dev/null differ
diff --git a/gui/slick/images/network/eredivisie live.png b/gui/slick/images/network/eredivisie live.png
deleted file mode 100644
index 55f8b4aafd34b1b74b104070618ab2f383eae237..0000000000000000000000000000000000000000
Binary files a/gui/slick/images/network/eredivisie live.png and /dev/null differ
diff --git a/gui/slick/images/network/esquire network.png b/gui/slick/images/network/esquire network.png
index 43d4adf0c208f70d83407cf70a9f371fede534a6..e7afa1e53a358c4352f79746bbdab9fbedb3b9ad 100644
Binary files a/gui/slick/images/network/esquire network.png and b/gui/slick/images/network/esquire network.png differ
diff --git a/gui/slick/images/network/hallmark channel.png b/gui/slick/images/network/hallmark channel.png
new file mode 100644
index 0000000000000000000000000000000000000000..259e013765ac205b9f198a933af7de120ea07725
Binary files /dev/null and b/gui/slick/images/network/hallmark channel.png differ
diff --git a/gui/slick/images/network/ion television.png b/gui/slick/images/network/ion television.png
new file mode 100644
index 0000000000000000000000000000000000000000..668bd413c394304ff7bb65c60941335f60da9a53
Binary files /dev/null and b/gui/slick/images/network/ion television.png differ
diff --git a/gui/slick/images/network/london weekend television (lwt).png b/gui/slick/images/network/london weekend television (lwt).png
new file mode 100644
index 0000000000000000000000000000000000000000..72d73f152af799c1b867f489fda07552cfc24193
Binary files /dev/null and b/gui/slick/images/network/london weekend television (lwt).png differ
diff --git a/gui/slick/images/network/london weekend television.png b/gui/slick/images/network/london weekend television.png
new file mode 100644
index 0000000000000000000000000000000000000000..72d73f152af799c1b867f489fda07552cfc24193
Binary files /dev/null and b/gui/slick/images/network/london weekend television.png differ
diff --git a/gui/slick/images/network/nat geo.png b/gui/slick/images/network/nat geo.png
index e687e024deb7721f330d1ea035a3df193b2a5384..b4a5f277b5081829253bf9b6564293f0ebfcc820 100644
Binary files a/gui/slick/images/network/nat geo.png and b/gui/slick/images/network/nat geo.png differ
diff --git a/gui/slick/images/network/national geographic.png b/gui/slick/images/network/national geographic.png
index 99088c7d93817d52077ccb99cd4c4a5a6cfd94fa..b4a5f277b5081829253bf9b6564293f0ebfcc820 100644
Binary files a/gui/slick/images/network/national geographic.png and b/gui/slick/images/network/national geographic.png differ
diff --git a/gui/slick/images/network/ngc.png b/gui/slick/images/network/ngc.png
index 99088c7d93817d52077ccb99cd4c4a5a6cfd94fa..b4a5f277b5081829253bf9b6564293f0ebfcc820 100644
Binary files a/gui/slick/images/network/ngc.png and b/gui/slick/images/network/ngc.png differ
diff --git a/gui/slick/images/network/niconico.png b/gui/slick/images/network/niconico.png
index 545588b04b48e111094d43bdd818691e3f26600e..99855e4fecb505ff3d852034846368675626e1bd 100644
Binary files a/gui/slick/images/network/niconico.png and b/gui/slick/images/network/niconico.png differ
diff --git a/gui/slick/images/network/nonetwork.png b/gui/slick/images/network/nonetwork.png
index 246590181c6a60f769ef4273454ea747d67bcf52..4398a439eec3f9a7b5a19f147e80a3d832e506d1 100644
Binary files a/gui/slick/images/network/nonetwork.png and b/gui/slick/images/network/nonetwork.png differ
diff --git a/gui/slick/images/network/sky sport news.png b/gui/slick/images/network/sky sport news.png
index bd0408d0ac7d7598b9f4cb96bba5dcf7b43c0b4b..2a3e6caaa31afdbdf5adb39d6e13b9661a4a888d 100644
Binary files a/gui/slick/images/network/sky sport news.png and b/gui/slick/images/network/sky sport news.png differ
diff --git a/gui/slick/images/network/sony entertainment television.png b/gui/slick/images/network/sony entertainment television.png
new file mode 100644
index 0000000000000000000000000000000000000000..b7d168268c4f4dcebd83333dcf160cfa8a0d7eec
Binary files /dev/null and b/gui/slick/images/network/sony entertainment television.png differ
diff --git a/gui/slick/images/network/sveriges television.png b/gui/slick/images/network/sveriges television.png
new file mode 100644
index 0000000000000000000000000000000000000000..c12c2fc7569597eb78892bce907da87a457fb0ba
Binary files /dev/null and b/gui/slick/images/network/sveriges television.png differ
diff --git a/gui/slick/images/network/thames television.png b/gui/slick/images/network/thames television.png
new file mode 100644
index 0000000000000000000000000000000000000000..2271b691f05c26194f37cb9c343dd620354cf540
Binary files /dev/null and b/gui/slick/images/network/thames television.png differ
diff --git a/gui/slick/interfaces/default/config_general.tmpl b/gui/slick/interfaces/default/config_general.tmpl
index 1f27d446a9e5cdde53351c89e901d8f70ece05e6..8ef7f7f10b26e2d42fb971f5f8673b3ad346f149 100644
--- a/gui/slick/interfaces/default/config_general.tmpl
+++ b/gui/slick/interfaces/default/config_general.tmpl
@@ -30,6 +30,27 @@
 
 <script type="text/javascript" src="$sbRoot/js/config.js?$sbPID"></script>
 <script type="text/javascript" src="$sbRoot/js/rootDirs.js?$sbPID"></script>
+<script type="text/javascript" src="$sbRoot/js/lib/bootstrap-formhelpers.min-2.3.0.js?$sbPID"></script>
+
+<script type="text/javascript" charset="utf-8">
+	<!--
+	\$(document).ready(function(){
+		if (\$("input[name='proxy_setting']").val().length == 0) {
+			\$("input[id='proxy_indexers']").prop('checked', false);
+			\$("label[for='proxy_indexers']").hide();
+		}
+		
+		\$("input[name='proxy_setting']").on('input', function() {
+			if( \$(this).val().length === 0 ) {
+				\$("input[id='proxy_indexers']").prop('checked', false);
+				\$("label[for='proxy_indexers']").hide();
+			} else {
+				\$("label[for='proxy_indexers']").show();
+			}
+		});
+	});
+	//-->
+</script>
 
 <div id="config">
 	<div id="config-content">
@@ -53,6 +74,17 @@
 					</div>
 
 					<fieldset class="component-group-list">
+
+						<div class="field-pair">
+							<label for="indexerDefaultLang">
+								<span class="component-title">Default Indexer Language</span>
+								<span class="component-desc">
+									<select name="indexerDefaultLang" id="indexerDefaultLang" class="form-control form-control-inline input-sm bfh-languages" data-language=$sickbeard.INDEXER_DEFAULT_LANGUAGE data-available="#echo ','.join($sickbeard.indexerApi().config['valid_languages'])#"></select>
+									<span>for adding shows and metadata providers</span>
+								</span>
+							</label>
+						</div>
+
 						<div class="field-pair">
 							<label for="launch_browser">
 								<span class="component-title">Launch browser</span>
@@ -339,16 +371,6 @@
 							</span>
 						</div>
 
-						<div class="field-pair">
-							<label for="play_videos">
-								<span class="component-title">Browser video player</span>
-								<span class="component-desc">
-									<input type="checkbox" name="play_videos" id="play_videos" #if $sickbeard.PLAY_VIDEOS then 'checked="checked"' else ''#/>
-									<p>play video files from display show page</p>
-								</span>
-							</label>
-						</div>
-
 						<div class="field-pair">
 							<label for="download_url">
 						    		<span class="component-title">Download url</span>
@@ -519,6 +541,16 @@
 							</label>
 						</div>
 						
+						<div class="field-pair">
+							<label for="debug">
+								<span class="component-title">Enable debug</span>
+								<span class="component-desc">
+									<input type="checkbox" name="debug" id="debug" #if $sickbeard.DEBUG then 'checked="checked"' else ''#/>
+									<p>Enable debug logs<p>
+								</span>
+							</label>
+						</div>
+						
 						<div class="field-pair">
 							<label for="no_restart">
 								<span class="component-title">No Restart</span>
diff --git a/gui/slick/interfaces/default/config_notifications.tmpl b/gui/slick/interfaces/default/config_notifications.tmpl
index c0743d160b04e7d7ca3ebd21c3f7007a14cd4726..f0babbb26ff37775534e945f863c0ca43d0a3501 100644
--- a/gui/slick/interfaces/default/config_notifications.tmpl
+++ b/gui/slick/interfaces/default/config_notifications.tmpl
@@ -1,5 +1,6 @@
 #import sickbeard
 #from sickbeard.helpers import anon_url
+#from sickbeard.common import *
 
 #set global $title="Config - Notifications"
 #set global $header="Notifications"
@@ -1498,14 +1499,70 @@
                             </div>
                             <div class="field-pair">
                                 <label for="trakt_blacklist_name">
-                                    <span class="component-title">Traktv BlackList name:</span>
+                                    <span class="component-title">Trakt blackList name:</span>
                                     <input type="text" name="trakt_blacklist_name" id="trakt_blacklist_name" value="$sickbeard.TRAKT_BLACKLIST_NAME" class="form-control input-sm input150" />
                                 </label>
                                 <label>
                                     <span class="component-title">&nbsp;</span>
-                                    <span class="component-desc">Name(slug) of List on Traktv for blacklisting show on tranding page</span>
+                                    <span class="component-desc">Name(slug) of List on Trakt for blacklisting show on tranding page</span>
                                 </label>
                             </div>
+                            <div class="field-pair">
+                                <label for="trakt_use_rolling_download">
+                                    <span class="component-title">Use rolling download:</span>
+                                    <span class="component-desc">
+                                        <input type="checkbox" class="enabler" name="trakt_use_rolling_download" id="trakt_use_rolling_download" #if $sickbeard.TRAKT_USE_ROLLING_DOWNLOAD then "checked=\"checked\"" else ""# />
+                                        <p>Collect defined number of episodes after last watched one</p>
+                                    </span>
+                                </label>
+                            </div>
+                            <div id="content_trakt_use_rolling_download">
+              	                <div class="field-pair">
+                                    <label for="trakt_rolling_num_ep">
+                                        <span class="component-title">Number of Episode:</span>
+                                        <span class="component-desc">
+                                	    <input type="number" name="trakt_rolling_num_ep" id="trakt_rolling_num_ep" value="$sickbeard.TRAKT_ROLLING_NUM_EP" class="form-control input-sm input75"/>
+                                    </label>
+                                    <label>
+                                        <span class="component-title">&nbsp;</span>
+                                        <span class="component-desc">numebr of episode that SickBeard try to download fron last watched episode</span>
+                                    </label>    
+                                </div>
+                                <div class="field-pair">
+                                    <label for="trakt_rolling_frequency">
+                                        <span class="component-title">Rolling frequency check:</span>
+                                        <input type="text" name="trakt_rolling_frequency" id="trakt_rolling_frequency" value="$sickbeard.TRAKT_ROLLING_FREQUENCY" class="form-control input-sm input250" />
+                                    </label>
+                                    <p>
+                                        <span class="component-desc">Minutes between check.</span>
+                                    </p>
+                                </div>
+              	                <div class="field-pair">
+                                    <label for="trakt_rolling_add_paused">
+                                        <span class="component-title">Should new show to be added paused?:</span>
+                                        <span class="component-desc">
+                                	    <input type="checkbox" name="trakt_rolling_add_paused" id="trakt_rolling_add_paused" #if $sickbeard.TRAKT_ROLLING_ADD_PAUSED then "checked=\"checked\"" else ""# />
+                                    </label>
+                                    <label>
+                                        <span class="component-title">&nbsp;</span>
+                                        <span class="component-desc">This feauture will try to snatch <i>number of episode</i> if the show is active. Whould you like to add new show in paused mode(this override previous choice)?</span>
+                                    </label>    
+                                </div>
+                                <div class="field-pair">
+                                    <label for="trakt_rolling_default_watched_status">
+                                        <span class="component-title">Default watched status:</span>
+                                            <select name="trakt_rolling_default_watched_status" id="trakt_rolling_default_watched_status" class="form-control form-control-inline input-sm">
+                                            #for $defStatus in [$ARCHIVED, $IGNORED]:
+                                            <option value="$defStatus" #if $defStatus == $sickbeard.TRAKT_ROLLING_DEFAULT_WATCHED_STATUS then 'selected="selected"' else ''#>$statusStrings[$defStatus]</option>
+                                            #end for
+                                            </select>
+                                    </label>
+                                    <label>
+                                        <span class="component-title">&nbsp;</span>
+                                        <span class="component-desc">Define the status to be set for watched episode. This will be set only on show add.</span>
+                                    </label>
+                                </div>
+                            </div>
                             <div class="testNotification" id="testTrakt-result">Click below to test.</div>
                             <input type="button" class="btn" value="Test Trakt" id="testTrakt" />
                             <input type="submit" class="btn config_submitter" value="Save Changes" />
diff --git a/gui/slick/interfaces/default/config_postProcessing.tmpl b/gui/slick/interfaces/default/config_postProcessing.tmpl
index 3b953750de413b3eddbfa20878235662937449bc..dbb1a4da648643bf20f45a8bbc740159ae9d5f8b 100644
--- a/gui/slick/interfaces/default/config_postProcessing.tmpl
+++ b/gui/slick/interfaces/default/config_postProcessing.tmpl
@@ -118,12 +118,23 @@
 						    </label>
 						    <label class="nocheck">
 						        <span class="component-title">&nbsp;</span>
-						        <span class="component-desc">Additional scripts separated by <b>|</b>.</span>
+						        <span class="component-desc"><b>NOTE:</b></span>
 						    </label>
 						    <label class="nocheck">
 						        <span class="component-title">&nbsp;</span>
-						        <span class="component-desc"><b>NOTE:</b> Scripts are called after SickRage's own post-processing.</span>
-						    </label>						    
+								<span class="component-desc">
+									<ul>
+										<li>See <a href="https://github.com/SiCKRAGETV/SickRage/wiki/Post%20Processing"><font color='red'><b>Wiki</b></font></a> for a script arguments description.</li>
+										<li>Additional scripts separated by <b>|</b>.</li>
+										<li>Scripts are called after SickRage's own post-processing.</li>
+										<li>For any scripted languages, include the interpreter executable before the script. See the following example:</li>
+										<ul>
+											<li>For Windows: <pre>C:\Python27\pythonw.exe C:\Script\test.py</pre></li>
+											<li>For Linux: <pre>python /Script/test.py</pre></li>
+										</ul>
+									</ul>
+								</span>
+						    </label>
 						</div>
 
                         <div class="field-pair">
diff --git a/gui/slick/interfaces/default/config_subtitles.tmpl b/gui/slick/interfaces/default/config_subtitles.tmpl
index 90d18bc3834235ebbb84c73daaf4eafea656b813..dd91d46968e62e5560bb5d97b31a97c7317038e7 100644
--- a/gui/slick/interfaces/default/config_subtitles.tmpl
+++ b/gui/slick/interfaces/default/config_subtitles.tmpl
@@ -63,56 +63,69 @@
                     
 					<fieldset class="component-group-list">
 			            <div class="field-pair">
-                            <input type="checkbox" class="enabler" #if $sickbeard.USE_SUBTITLES then " checked=\"checked\"" else ""# id="use_subtitles" name="use_subtitles">
                             <label for="use_subtitles" class="clearfix">
                                 <span class="component-title">Search Subtitles</span>
+								<span class="component-desc">
+									<input type="checkbox" class="enabler" #if $sickbeard.USE_SUBTITLES then " checked=\"checked\"" else ""# id="use_subtitles" name="use_subtitles">
+								</span>
                             </label>
                         </div>
                         <div id="content_use_subtitles">
 	                        	<div class="field-pair">
-		                            <label class="nocheck">
+		                            <label>
 		                                <span class="component-title">Subtitle Languages</span>
 		                                <span class="component-desc"><input type="text" id="subtitles_languages" name="subtitles_languages" /></span>
 		                            </label>
 		                        </div>
                         		<div class="field-pair">
-	                        		<label class="nocheck">
+	                        		<label>
 		                                <span class="component-title">Subtitle Directory</span>
 		                                <input type="text" value="$sickbeard.SUBTITLES_DIR" id="subtitles_dir" name="subtitles_dir" class="form-control input-sm input350">
 		                            </label>
-	                            	<label class="nocheck">
+	                            	<label>
 	    	                            	<span class="component-title">&nbsp;</span>
 			                                <span class="component-desc">The directory where SickRage should store your <i>Subtitles</i> files.</span>
 	          	                	</label>
-	                            	<label class="nocheck">
+	                            	<label>
 	    	                            	<span class="component-title">&nbsp;</span>
 			                                <span class="component-desc"><b>NOTE:</b> Leave empty if you want store subtitle in episode path.</span>
 	          	                	</label>
                         		</div>
 	                            <div class="field-pair">
-	                            	<label class="nocheck">
+	                            	<label>
 	                                	<span class="component-title">Subtitle Find Frequency</span>
 	                                    <input type="number" name="subtitles_finder_frequency" value="$sickbeard.SUBTITLES_FINDER_FREQUENCY" hours="1" class="form-control input-sm input75" />
-	                                </label>
-	                                <label class="nocheck">
-	                                	<span class="component-title">&nbsp;</span>
-	                                    <span class="component-desc">Time in hours between scans (hours. 1)</span>
+										<span class="component-desc">time in hours between scans (default: 1)</span>
 	                                </label>
 	                            </div>
 		                        <div class="field-pair">
-		                            <input type="checkbox" name="subtitles_history" id="subtitles_history" #if $sickbeard.SUBTITLES_HISTORY then " checked=\"checked\"" else ""#/>
 		                            <label class="clearfix" for="subtitles_history">
 		                                <span class="component-title">Subtitles History</span>
-		                                <span class="component-desc">Log downloaded Subtitle on History page?</span>
+		                                <span class="component-desc">
+											<input type="checkbox" name="subtitles_history" id="subtitles_history" #if $sickbeard.SUBTITLES_HISTORY then " checked=\"checked\"" else ""#/>
+											<p>Log downloaded Subtitle on History page?</p>
+										</span>
 		                            </label>
 		                        </div>                        		
 		                        <div class="field-pair">
-		                            <input type="checkbox" name="subtitles_multi" id="subtitles_multi" #if $sickbeard.SUBTITLES_MULTI then " checked=\"checked\"" else ""#/>
 		                            <label class="clearfix" for="subtitles_multi">
 		                                <span class="component-title">Subtitles Multi-Language</span>
-		                                <span class="component-desc">Append language codes to subtitle filenames?</span>
+		                                <span class="component-desc">
+											<input type="checkbox" name="subtitles_multi" id="subtitles_multi" #if $sickbeard.SUBTITLES_MULTI then " checked=\"checked\"" else ""#/>
+											<p>Append language codes to subtitle filenames?</p>
+										</span>
 		                            </label>
 		                        </div>
+								<div class="field-pair">
+									<label class="clearfix" for="embedded_subtitles_all">
+										<span class="component-title">Embedded Subtitles</span>
+										<span class="component-desc">
+											<input type="checkbox" name="embedded_subtitles_all" id="embedded_subtitles_all" #if $sickbeard.EMBEDDED_SUBTITLES_ALL then " checked=\"checked\"" else ""#/>
+											<p>Ignore subtitles embedded inside video file?</p>
+											<p><b>Warning: </b>this will ignore <u>all</u> embedded subtitles for every video file!</p>
+										</span>
+									</label>
+								</div>   
 	                    <br/><input type="submit" class="btn config_submitter" value="Save Changes" /><br/>
                         </div>
                     </fieldset>
diff --git a/gui/slick/interfaces/default/displayShow.tmpl b/gui/slick/interfaces/default/displayShow.tmpl
index c442761841ec5cc6be3761b0bf1db7250ebba883..2e86b1655a639f4192f6ac12d427128c00a33845 100644
--- a/gui/slick/interfaces/default/displayShow.tmpl
+++ b/gui/slick/interfaces/default/displayShow.tmpl
@@ -15,8 +15,6 @@
 #include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_top.tmpl")
 
 <script type="text/javascript" src="$sbRoot/js/lib/jquery.bookmarkscroll.js?$sbPID"></script>
-<script type="text/javascript" src="$sbRoot/js/jwplayer/jwplayer.js"></script>
-<script type="text/javascript">jwplayer.key="Zq3m618ITHrFxeKGi3Gf33ovC+XtdGQz19MMug==";</script>
 
 <input type="hidden" id="sbRoot" value="$sbRoot" />
 
@@ -360,9 +358,6 @@
 			
 			<tr id="season-$epResult["season"]-cols" class="seasoncols">
 				<th class="col-checkbox"><input type="checkbox" class="seasonCheck" id="$epResult["season"]" /></th>
-			#if $sickbeard.PLAY_VIDEOS:
-				<th>Video</th>
-			#end if
 				<th class="col-metadata">NFO</th>
 				<th class="col-metadata">TBN</th>
 				<th class="col-ep">Episode</th>
@@ -406,18 +401,6 @@
 			<input type="checkbox" class="epCheck" id="<%=str(epResult["season"])+'x'+str(epResult["episode"])%>" name="<%=str(epResult["season"]) +"x"+str(epResult["episode"]) %>" />
 		#end if
 		</td>
-    
-		#if $sickbeard.PLAY_VIDEOS:
-			<td align="center">
-			#if $epResult["location"]:
-				#set $video_root = $os.path.dirname($show._location)
-				#set $video_source = $sbRoot + $epResult["location"].replace($video_root, '/videos')
-				<div id="$video_source" class="jwvideo">Loading the player...</div>
-			#else:
-				No Video
-			#end if
-			</td>
-		#end if
 		
 		<td align="center"><img src="$sbRoot/images/#if $epResult["hasnfo"] == 1 then "nfo.gif\" alt=\"Y" else "nfo-no.gif\" alt=\"N"#" width="23" height="11" /></td>
 		
@@ -529,9 +512,9 @@
 		<td class="col-search">
 			#if int($epResult["season"]) != 0:
 			#if ( int($epResult["status"]) in $Quality.SNATCHED or int($epResult["status"]) in $Quality.DOWNLOADED ) and $sickbeard.USE_FAILED_DOWNLOADS:
-				<a class="epRetry" id="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" name="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" href="retryEpisode?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/search16.png" height="16" alt="retry" title="Retry Download" /></a>
+				<a class="epRetry" id="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" name="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" href="retryEpisode?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/search16.png" height="16" alt="retry" title="Retry Download" /></a>
 			#else:
-				<a class="epSearch" id="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" name="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" href="searchEpisode?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/search16.png" width="16" height="16" alt="search" title="Manual Search" /></a>
+				<a class="epSearch" id="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" name="#echo $str($show.indexerid)+'x'+$str(epResult["season"])+'x'+$str(epResult["episode"])#" href="searchEpisode?show=$show.indexerid&amp;season=$epResult["season"]&amp;episode=$epResult["episode"]"><img src="$sbRoot/images/search16.png" width="16" height="16" alt="search" title="Manual Search" /></a>
 			#end if
 			#end if
 			
diff --git a/gui/slick/interfaces/default/editShow.tmpl b/gui/slick/interfaces/default/editShow.tmpl
index 2f24180004b5663e5d24ca95da6edbca6a708c13..aafd072732e54d27d7e9909c90c3b68af3dd2403 100644
--- a/gui/slick/interfaces/default/editShow.tmpl
+++ b/gui/slick/interfaces/default/editShow.tmpl
@@ -100,12 +100,12 @@
 
 <b>Air by date: </b>
 <input type="checkbox" name="air_by_date" #if $show.air_by_date == 1 then "checked=\"checked\"" else ""# /><br />
-(check this if the show is released as Show.03.02.2010 rather than Show.S02E03)<br />
+(check this if the show is released as Show.03.02.2010 rather than Show.S02E03. <span style="color:red">In case air date conflict between regular and special episodes, the later will be ignored.</span>)<br />
 <br />
 
 <b>Sports: </b>
 <input type="checkbox" name="sports" #if $show.sports == 1 then "checked=\"checked\"" else ""# /><br />
-(check this if the show is a sporting or MMA event and released as Show.03.02.2010 rather than Show.S02E03)<br />
+(check this if the show is a sporting or MMA event and released as Show.03.02.2010 rather than Show.S02E03. <span style="color:red">In case air date conflict between regular and special episodes, the later will be ignored.</span>)<br />
 <br />
 
 <b>Anime: </b>
@@ -115,7 +115,7 @@
 
 <b>DVD Order: </b>
 <input type="checkbox" name="dvdorder" #if $show.dvdorder == 1 then "checked=\"checked\"" else ""# /><br/>
-(check this if you wish to use the DVD order instead of the Airing order)
+(check this if you wish to use the DVD order instead of the Airing order. A "Force Full Update" is necessary, and if you have existing episodes you need to move them)
 <br/><br/>
 
 #if $anyQualities + $bestQualities
diff --git a/gui/slick/interfaces/default/home.tmpl b/gui/slick/interfaces/default/home.tmpl
index 9d3cf6f9e4a8ccffcc5d947b8aea42be8699fff9..621b60be57b0ea56a29af66cfa03d4e71e56d4df 100644
--- a/gui/slick/interfaces/default/home.tmpl
+++ b/gui/slick/interfaces/default/home.tmpl
@@ -248,7 +248,7 @@
 		</select>
 		#if $layout != 'poster':
         Search:
-			<input class="search form-control form-control-inline input-sm input200" type="search" data-column="1" placeholder="Search Show Name">
+			<input class="search form-control form-control-inline input-sm input200" type="search" data-column="2" placeholder="Search Show Name">
 			<button type="button" class="resetshows resetanime btn btn-inline">Reset Search</button>
 		#end if
 	</span>
@@ -585,26 +585,32 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
 	
 	#if $cur_airs_next
     #set $ldatetime = $sbdatetime.sbdatetime.convert_to_setting($network_timezones.parse_date_time($cur_airs_next,$curShow.airs,$curShow.network))
-		<td align="center" class="nowrap"><div class="${fuzzydate}">
-		#try
-		  $sbdatetime.sbdatetime.sbfdate($ldatetime)
-		#except ValueError
-		  Invalid date
-		#end try
-		</div><span class="sort_data">$calendar.timegm($ldatetime.timetuple())</span></td>
+    	#try
+    		#set $temp_sbfdate_next = $sbdatetime.sbdatetime.sbfdate($ldatetime)
+    		#set $temp_timegm_next = $calendar.timegm($ldatetime.timetuple())
+    		<td align="center" class="nowrap">
+    			<div class="${fuzzydate}">$temp_sbfdate_next</div>
+    			<span class="sort_data">$temp_timegm_next</span>
+    		</td>
+    	#except ValueError
+    		<td align="center" class="nowrap"></td>
+    	#end try
     #else:
     	<td align="center" class="nowrap"></td>
     #end if
 
     #if $cur_airs_prev
     #set $pdatetime = $sbdatetime.sbdatetime.convert_to_setting($network_timezones.parse_date_time($cur_airs_prev,$curShow.airs,$curShow.network))
-        <td align="center" class="nowrap"><div class="${fuzzydate}">
-        #try
-          $sbdatetime.sbdatetime.sbfdate($pdatetime)
-        #except ValueError
-          Invalid date
-        #end try
-        </div><span class="sort_data">$calendar.timegm($pdatetime.timetuple())</span></td>
+    	#try
+    		#set $temp_sbfdate_prev = $sbdatetime.sbdatetime.sbfdate($pdatetime)
+    		#set $temp_timegm_prev = $calendar.timegm($pdatetime.timetuple())
+    		<td align="center" class="nowrap">
+    			<div class="${fuzzydate}">$temp_sbfdate_prev</div>
+    			<span class="sort_data">$temp_timegm_prev</span>
+    		</td>
+    	#except ValueError
+    		<td align="center" class="nowrap"></td>
+    	#end try
     #else:
         <td align="center" class="nowrap"></td>
     #end if
@@ -680,7 +686,11 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
 		</td>
 	
         <td align="center">
+#if sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT
+			<img src="$sbRoot/images/#if int($curShow.paused) == 0 then "yes16.png\" alt=\"Y\"" else "no16.png\" alt=\"N\""# width="16" height="16" />
+#else
 			<img src="$sbRoot/images/#if int($curShow.paused) == 0 and $curShow.status == "Continuing" then "yes16.png\" alt=\"Y\"" else "no16.png\" alt=\"N\""# width="16" height="16" />
+#end if
 		</td>
 		
         <td align="center">
@@ -706,4 +716,4 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
 #end if
 #end for
 
-#include $os.path.join($sickbeard.PROG_DIR,"gui/slick/interfaces/default/inc_bottom.tmpl")
\ No newline at end of file
+#include $os.path.join($sickbeard.PROG_DIR,"gui/slick/interfaces/default/inc_bottom.tmpl")
diff --git a/gui/slick/interfaces/default/home_newShow.tmpl b/gui/slick/interfaces/default/home_newShow.tmpl
index 4c1b435832197f38d29088ce3d6d931a60c212bb..cd9f843070521e73667deef2f7290559cdfcb87f 100644
--- a/gui/slick/interfaces/default/home_newShow.tmpl
+++ b/gui/slick/interfaces/default/home_newShow.tmpl
@@ -54,7 +54,7 @@
 				
 				<input type="text" id="nameToSearch" value="$default_show_name" class="form-control form-control-inline input-sm input350" />
 				&nbsp;&nbsp;
-				<select name="indexerLang" id="indexerLangSelect" class="form-control form-control-inline input-sm bfh-languages" data-language="en" data-available="#echo ','.join($sickbeard.indexerApi().config['valid_languages'])#">
+				<select name="indexerLang" id="indexerLangSelect" class="form-control form-control-inline input-sm bfh-languages" data-language="#echo $sickbeard.INDEXER_DEFAULT_LANGUAGE#" data-available="#echo ','.join($sickbeard.indexerApi().config['valid_languages'])#">
 				</select><b>*</b>
 				&nbsp;
 				<select name="providedIndexer" id="providedIndexer" class="form-control form-control-inline input-sm">
diff --git a/gui/slick/interfaces/default/inc_top.tmpl b/gui/slick/interfaces/default/inc_top.tmpl
index f4370bdc3ab64283abbe0e37ccd0046ac3fec0a9..2ea64165b7881de449d7eba3ddb92546a4a1a3aa 100644
--- a/gui/slick/interfaces/default/inc_top.tmpl
+++ b/gui/slick/interfaces/default/inc_top.tmpl
@@ -38,16 +38,19 @@
 		<link rel="stylesheet" type="text/css" href="$sbRoot/css/browser.css?$sbPID" />
 		<link rel="stylesheet" type="text/css" href="$sbRoot/css/lib/jquery-ui-1.10.4.custom.css?$sbPID" />
 		<link rel="stylesheet" type="text/css" href="$sbRoot/css/lib/jquery.qtip-2.2.1.min.css?$sbPID"/>
-		<link rel="stylesheet" type="text/css" href="$sbRoot/css/lib/pnotify.custom.min.css?$sbPID" />
 		<link rel="stylesheet" type="text/css" href="$sbRoot/css/style.css?$sbPID"/>
 		<link rel="stylesheet" type="text/css" href="$sbRoot/css/${sickbeard.THEME_NAME}.css?$sbPID" />
+		#if $sbLogin:
+		<link rel="stylesheet" type="text/css" href="$sbRoot/css/lib/pnotify.custom.min.css?$sbPID" />
 		<link rel="stylesheet" type="text/css" href="$sbRoot/css/country-flags.css?$sbPID"/>
+		#end if
 
 
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery-1.11.2.min.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/bootstrap.min.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/bootstrap-hover-dropdown.min.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery-ui-1.10.4.custom.min.js?$sbPID"></script>
+		#if $sbLogin:
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery.cookie.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery.cookiejar.js?$sbPID"></script>
 		<script type="text/javascript" src="$sbRoot/js/lib/jquery.json-2.2.min.js?$sbPID"></script>
@@ -129,7 +132,8 @@
 			});
 		//-->
 		</script>
-        <script type="text/javascript" src="$sbRoot/js/confirmations.js?$sbPID"></script>
+	<script type="text/javascript" src="$sbRoot/js/confirmations.js?$sbPID"></script>
+	#end if
 	</head>
 
 	<body>
@@ -255,13 +259,13 @@
 		</div>
 		#end if
 	  	
-	  	#if $sickbeard.BRANCH and $sickbeard.BRANCH != 'master' and not $sickbeard.DEVELOPER
+	  	#if $sickbeard.BRANCH and $sickbeard.BRANCH != 'master' and not $sickbeard.DEVELOPER and $sbLogin
 		<div class="alert alert-danger upgrade-notification" role="alert">
 			<span>You're using the $sickbeard.BRANCH branch. Please use 'master' unless specifically asked</span>
 		</div>
 		#end if
 		
-		#if $sickbeard.NEWEST_VERSION_STRING:	
+		#if $sickbeard.NEWEST_VERSION_STRING and $sbLogin	
 		<div class="alert alert-success upgrade-notification" role="alert">
 			<span>$sickbeard.NEWEST_VERSION_STRING</span>
 		</div>
diff --git a/gui/slick/interfaces/default/viewlogs.tmpl b/gui/slick/interfaces/default/viewlogs.tmpl
index 885f0c6cd9b6cfc6be23afbf16638284b759781e..f3935c613c036c026144339dcc5254c79597e4b8 100644
--- a/gui/slick/interfaces/default/viewlogs.tmpl
+++ b/gui/slick/interfaces/default/viewlogs.tmpl
@@ -1,102 +1,105 @@
-#import sickbeard
-#from sickbeard import classes
-#from sickbeard.common import *
-#from sickbeard.logger import reverseNames
-#set global $header="Log File"
-#set global $title="Logs"
-
-#set global $sbPath = ".."
-
-#set global $topmenu="errorlogs"#
-#import os.path
-#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_top.tmpl")
-
-<script type="text/javascript" charset="utf-8">
-<!--
-\$(document).ready(
-
-function(){
-    \$('#minLevel,#logFilter,#logSearch').change(function(){
-        if ( \$('#logSearch').val().length > 0 ) {
-            \$('#logSearch').prop('disabled', true);
-            \$('#logFilter option[value=\\<NONE\\>]').prop('selected', true); 
-            \$('#minLevel option[value=5]').prop('selected', true); 
-        }
-        \$('#minLevel').prop('disabled', true);
-        \$('#logFilter').prop('disabled', true);
-        \$('#logSearch').prop('disabled', true);
-        document.body.style.cursor='wait'
-        url = '$sbRoot/errorlogs/viewlog/?minLevel='+\$('select[name=minLevel]').val()+'&logFilter='+\$('select[name=logFilter]').val()+'&logSearch='+\$('#logSearch').val()
-        window.location.href = url
-
-    });
-
-    \$(window).load(function(){
-
-        if ( \$('#logSearch').val().length == 0 ) {
-            \$('#minLevel').prop('disabled', false);
-            \$('#logFilter').prop('disabled', false);
-            \$('#logSearch').prop('disabled', false);
-        } else { 
-            \$('#minLevel').prop('disabled', true);
-            \$('#logFilter').prop('disabled', true);
-            \$('#logSearch').prop('disabled', false);
-	    } 
-
-         document.body.style.cursor='default';
-    });
-
-    \$('#logSearch').keyup(function() {
-        if ( \$('#logSearch').val().length == 0 ) {
-            \$('#logFilter option[value=\\<NONE\\>]').prop('selected', true); 
-            \$('#minLevel option[value=20]').prop('selected', true); 
-            \$('#minLevel').prop('disabled', false);
-            \$('#logFilter').prop('disabled', false);
-            url = '$sbRoot/errorlogs/viewlog/?minLevel='+\$('select[name=minLevel]').val()+'&logFilter='+\$('select[name=logFilter]').val()+'&logSearch='+\$('#logSearch').val()
-            window.location.href = url
-        } else {
-            \$('#minLevel').prop('disabled', true);
-            \$('#logFilter').prop('disabled', true);
-        }
-    });
-});
-//-->
-</script>
-
-#if $varExists('header') 
-	<h1 class="header">$header</h1>
-#else 
-	<h1 class="title">$title</h1>
-#end if
-
-<div class="h2footer pull-right">Minimum logging level to display: <select name="minLevel" id="minLevel" class="form-control form-control-inline input-sm">
-#set $levels = $reverseNames.keys()
-$levels.sort(lambda x,y: cmp($reverseNames[$x], $reverseNames[$y]))
-#for $level in $levels:
-<option value="$reverseNames[$level]" #if $minLevel == $reverseNames[$level] then "selected=\"selected\"" else ""#>$level.title()</option>
-#end for
-</select>
-
-Filter log by: <select name="logFilter" id="logFilter" class="form-control form-control-inline input-sm">
-#for $logNameFilter in sorted($logNameFilters)
-<option value="$logNameFilter" #if $logFilter == $logNameFilter then "selected=\"selected\"" else ""#>$logNameFilters[$logNameFilter]</option>
-#end for
-</select>
-Search log by:
-<input type="text" name="logSearch" placeholder="clear to reset" id="logSearch" value="#if $logSearch then $logSearch else ""#" class="form-control form-control-inline input-sm" />
-</div>
-<br />
-<div class="align-left"><pre>
-#filter WebSafe
-$logLines
-#end filter
-</pre>
-</div>
-<br />
-<script type="text/javascript" charset="utf-8">
-<!--
-window.setInterval( "location.reload(true)", 600000); // Refresh every 10 minutes
-//-->
-</script>
-
-#include $os.path.join($sickbeard.PROG_DIR,"gui/slick/interfaces/default/inc_bottom.tmpl")
+#import sickbeard
+#from sickbeard import classes
+#from sickbeard.common import *
+#from sickbeard.logger import reverseNames
+#set global $header="Log File"
+#set global $title="Logs"
+
+#set global $sbPath = ".."
+
+#set global $topmenu="errorlogs"#
+#import os.path
+#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_top.tmpl")
+
+<script type="text/javascript" charset="utf-8">
+<!--
+\$(document).ready(
+
+function(){
+    \$('#minLevel,#logFilter,#logSearch').change(function(){
+        if ( \$('#logSearch').val().length > 0 ) {
+            \$('#logSearch').prop('disabled', true);
+            \$('#logFilter option[value=\\<NONE\\>]').prop('selected', true); 
+            \$('#minLevel option[value=5]').prop('selected', true); 
+        }
+        \$('#minLevel').prop('disabled', true);
+        \$('#logFilter').prop('disabled', true);
+        \$('#logSearch').prop('disabled', true);
+        document.body.style.cursor='wait'
+        url = '$sbRoot/errorlogs/viewlog/?minLevel='+\$('select[name=minLevel]').val()+'&logFilter='+\$('select[name=logFilter]').val()+'&logSearch='+\$('#logSearch').val()
+        window.location.href = url
+
+    });
+
+    \$(window).load(function(){
+
+        if ( \$('#logSearch').val().length == 0 ) {
+            \$('#minLevel').prop('disabled', false);
+            \$('#logFilter').prop('disabled', false);
+            \$('#logSearch').prop('disabled', false);
+        } else { 
+            \$('#minLevel').prop('disabled', true);
+            \$('#logFilter').prop('disabled', true);
+            \$('#logSearch').prop('disabled', false);
+	    } 
+
+         document.body.style.cursor='default';
+    });
+
+    \$('#logSearch').keyup(function() {
+        if ( \$('#logSearch').val().length == 0 ) {
+            \$('#logFilter option[value=\\<NONE\\>]').prop('selected', true); 
+            \$('#minLevel option[value=20]').prop('selected', true); 
+            \$('#minLevel').prop('disabled', false);
+            \$('#logFilter').prop('disabled', false);
+            url = '$sbRoot/errorlogs/viewlog/?minLevel='+\$('select[name=minLevel]').val()+'&logFilter='+\$('select[name=logFilter]').val()+'&logSearch='+\$('#logSearch').val()
+            window.location.href = url
+        } else {
+            \$('#minLevel').prop('disabled', true);
+            \$('#logFilter').prop('disabled', true);
+        }
+    });
+});
+//-->
+</script>
+
+#if $varExists('header') 
+	<h1 class="header">$header</h1>
+#else 
+	<h1 class="title">$title</h1>
+#end if
+
+<div class="h2footer pull-right">Minimum logging level to display: <select name="minLevel" id="minLevel" class="form-control form-control-inline input-sm">
+#set $levels = $reverseNames.keys()
+$levels.sort(lambda x,y: cmp($reverseNames[$x], $reverseNames[$y]))
+#for $level in $levels:
+	#if not $sickbeard.DEBUG and ($level == 'DEBUG' or $level == 'DB')
+		#continue
+	#end if
+<option value="$reverseNames[$level]" #if $minLevel == $reverseNames[$level] then "selected=\"selected\"" else ""#>$level.title()</option>
+#end for
+</select>
+
+Filter log by: <select name="logFilter" id="logFilter" class="form-control form-control-inline input-sm">
+#for $logNameFilter in sorted($logNameFilters)
+<option value="$logNameFilter" #if $logFilter == $logNameFilter then "selected=\"selected\"" else ""#>$logNameFilters[$logNameFilter]</option>
+#end for
+</select>
+Search log by:
+<input type="text" name="logSearch" placeholder="clear to reset" id="logSearch" value="#if $logSearch then $logSearch else ""#" class="form-control form-control-inline input-sm" />
+</div>
+<br />
+<div class="align-left"><pre>
+#filter WebSafe
+$logLines
+#end filter
+</pre>
+</div>
+<br />
+<script type="text/javascript" charset="utf-8">
+<!--
+window.setInterval( "location.reload(true)", 600000); // Refresh every 10 minutes
+//-->
+</script>
+
+#include $os.path.join($sickbeard.PROG_DIR,"gui/slick/interfaces/default/inc_bottom.tmpl")
diff --git a/gui/slick/js/configNotifications.js b/gui/slick/js/configNotifications.js
index b35c840c7490182c9465324b5685fbabc45abc3b..2d88287e9bd77a970b7a28648c6e81ef214ac399 100644
--- a/gui/slick/js/configNotifications.js
+++ b/gui/slick/js/configNotifications.js
@@ -299,6 +299,7 @@ $(document).ready(function(){
     $('#testTrakt').click(function () {
         var trakt_username = $.trim($('#trakt_username').val());
         var trakt_password = $.trim($('#trakt_password').val());
+        var trakt_trending_blacklist = $.trim($('#trakt_blacklist_name').val());
         var trakt_disable_ssl_verify = $('#trakt_disable_ssl_verify').is(':checked');
         if (!trakt_username || !trakt_password) {
             $('#testTrakt-result').html('Please fill out the necessary fields above.');
@@ -314,10 +315,17 @@ $(document).ready(function(){
 			}
             return;
         }
+
+        if (/\s/g.test(trakt_trending_blacklist)) {
+            $('#testTrakt-result').html('Check blacklist name; the value need to be a trakt slug');
+	    $('#trakt_blacklist_name').addClass('warning');
+            return;
+        }
 		$('#trakt_username,#trakt_password').removeClass('warning');
+	        $('#trakt_blacklist_name').removeClass('warning');
         $(this).prop('disabled', true);
         $('#testTrakt-result').html(loading);
-        $.get(sbRoot + '/home/testTrakt', {'username': trakt_username, 'password': trakt_password, 'disable_ssl': trakt_disable_ssl_verify})
+        $.get(sbRoot + '/home/testTrakt', {'username': trakt_username, 'password': trakt_password, 'disable_ssl': trakt_disable_ssl_verify, 'blacklist_name': trakt_trending_blacklist})
             .done(function (data) {
                 $('#testTrakt-result').html(data);
                 $('#testTrakt').prop('disabled', false);
diff --git a/gui/slick/js/displayShow.js b/gui/slick/js/displayShow.js
index 756fcca1b063ee4817c16179208afc933168dbb5..4825c2cbcc45420f9fd67a2aa8199a504fd0c04f 100644
--- a/gui/slick/js/displayShow.js
+++ b/gui/slick/js/displayShow.js
@@ -260,12 +260,4 @@ $(document).ready(function () {
         }
         setAbsoluteSceneNumbering(forAbsolute, sceneAbsolute);
     });
-
-    $('.jwvideo').each(function () {
-        jwplayer(this.id).setup({
-            file: $(this).attr("id"),
-            width:120,
-            height:120
-        });
-    });
 });
diff --git a/gui/slick/js/jwplayer/jwplayer.html5.js b/gui/slick/js/jwplayer/jwplayer.html5.js
deleted file mode 100644
index 471175f599a6d7c4e8262c938192df3bb197c662..0000000000000000000000000000000000000000
--- a/gui/slick/js/jwplayer/jwplayer.html5.js
+++ /dev/null
@@ -1,243 +0,0 @@
-(function(g){g.html5={};g.html5.version="6.9.4867";g=g.utils.css;g(".jwplayer ".slice(0,-1)+" div span a img ul li video".split(" ").join(", .jwplayer ")+", .jwclick",{margin:0,padding:0,border:0,color:"#000000","font-size":"100%",font:"inherit","vertical-align":"baseline","background-color":"transparent","text-align":"left",direction:"ltr","-webkit-tap-highlight-color":"rgba(255, 255, 255, 0)"});g(".jwplayer ul",{"list-style":"none"})})(jwplayer);
-(function(g){var l=document;g.parseDimension=function(c){return"string"==typeof c?""===c?0:-1<c.lastIndexOf("%")?c:parseInt(c.replace("px",""),10):c};g.timeFormat=function(c){if(0<c){var a=Math.floor(c/3600),d=Math.floor((c-3600*a)/60);c=Math.floor(c%60);return(a?a+":":"")+(10>d?"0":"")+d+":"+(10>c?"0":"")+c}return"00:00"};g.bounds=function(c){var a={left:0,right:0,width:0,height:0,top:0,bottom:0};if(!c||!l.body.contains(c))return a;if(c.getBoundingClientRect){c=c.getBoundingClientRect(c);var d=window.pageYOffset,
-k=window.pageXOffset;if(!c.width&&!c.height&&!c.left&&!c.top)return a;a.left=c.left+k;a.right=c.right+k;a.top=c.top+d;a.bottom=c.bottom+d;a.width=c.right-c.left;a.height=c.bottom-c.top}else{a.width=c.offsetWidth|0;a.height=c.offsetHeight|0;do a.left+=c.offsetLeft|0,a.top+=c.offsetTop|0;while(c=c.offsetParent);a.right=a.left+a.width;a.bottom=a.top+a.height}return a};g.empty=function(c){if(c)for(;0<c.childElementCount;)c.removeChild(c.children[0])}})(jwplayer.utils);
-(function(g){var l=g.stretching={NONE:"none",FILL:"fill",UNIFORM:"uniform",EXACTFIT:"exactfit"};g.scale=function(c,a,d,k,b){var e="";a=a||1;d=d||1;k|=0;b|=0;if(1!==a||1!==d)e="scale("+a+", "+d+")";if(k||b)e="translate("+k+"px, "+b+"px)";g.transform(c,e)};g.stretch=function(c,a,d,k,b,e){if(!a||!d||!k||!b||!e)return!1;c=c||l.UNIFORM;var f=2*Math.ceil(d/2)/b,C=2*Math.ceil(k/2)/e,p="video"===a.tagName.toLowerCase(),q=!1,v="jw"+c.toLowerCase();switch(c.toLowerCase()){case l.FILL:f>C?C=f:f=C;q=!0;break;
-case l.NONE:f=C=1;case l.EXACTFIT:q=!0;break;default:f>C?0.95<b*C/d?(q=!0,v="jwexactfit"):(b*=C,e*=C):0.95<e*f/k?(q=!0,v="jwexactfit"):(b*=f,e*=f),q&&(f=2*Math.ceil(d/2)/b,C=2*Math.ceil(k/2)/e)}p?(c={left:"",right:"",width:"",height:""},q?(d<b&&(c.left=c.right=Math.ceil((d-b)/2)),k<e&&(c.top=c.bottom=Math.ceil((k-e)/2)),c.width=b,c.height=e,g.scale(a,f,C,0,0)):(q=!1,g.transform(a)),g.css.style(a,c)):a.className=a.className.replace(/\s*jw(none|exactfit|uniform|fill)/g,"")+" "+v;return q}})(jwplayer.utils);
-(function(g){g.dfxp=function(){var l=jwplayer.utils.seconds;this.parse=function(c){var a=[{begin:0,text:""}];c=c.replace(/^\s+/,"").replace(/\s+$/,"");var d=c.split("\x3c/p\x3e"),k=c.split("\x3c/tt:p\x3e"),b=[];for(c=0;c<d.length;c++)0<=d[c].indexOf("\x3cp")&&(d[c]=d[c].substr(d[c].indexOf("\x3cp")+2).replace(/^\s+/,"").replace(/\s+$/,""),b.push(d[c]));for(c=0;c<k.length;c++)0<=k[c].indexOf("\x3ctt:p")&&(k[c]=k[c].substr(k[c].indexOf("\x3ctt:p")+5).replace(/^\s+/,"").replace(/\s+$/,""),b.push(k[c]));
-d=b;for(c=0;c<d.length;c++){k=d[c];b={};try{var e=k.indexOf('begin\x3d"'),k=k.substr(e+7),e=k.indexOf('" end\x3d"');b.begin=l(k.substr(0,e));k=k.substr(e+7);e=k.indexOf('"');b.end=l(k.substr(0,e));e=k.indexOf('"\x3e');k=k.substr(e+2);b.text=k}catch(f){}k=b;k.text&&(a.push(k),k.end&&(a.push({begin:k.end,text:""}),delete k.end))}if(1<a.length)return a;throw{message:"Invalid DFXP file:"};}}})(jwplayer.parsers);
-(function(g){g.srt=function(){var l=jwplayer.utils,c=l.seconds;this.parse=function(a,d){var k=d?[]:[{begin:0,text:""}];a=l.trim(a);var b=a.split("\r\n\r\n");1==b.length&&(b=a.split("\n\n"));for(var e=0;e<b.length;e++)if("WEBVTT"!=b[e]){var f,g=b[e];f={};var p=g.split("\r\n");1==p.length&&(p=g.split("\n"));try{g=1;0<p[0].indexOf(" --\x3e ")&&(g=0);var q=p[g].indexOf(" --\x3e ");0<q&&(f.begin=c(p[g].substr(0,q)),f.end=c(p[g].substr(q+5)));if(p[g+1]){f.text=p[g+1];for(g+=2;g<p.length;g++)f.text+="\x3cbr/\x3e"+
-p[g]}}catch(v){}f.text&&(k.push(f),f.end&&!d&&(k.push({begin:f.end,text:""}),delete f.end))}if(1<k.length)return k;throw{message:"Invalid SRT file"};}}})(jwplayer.parsers);
-(function(g){var l=g.utils,c=g.events,a=c.state,d=!0,k=!1;g.html5.video=function(b,e){function f(b,a){O&&R.sendEvent(b,a)}function g(){}function p(e){t(e);O&&(W==a.PLAYING&&!J)&&(A=(10*b.currentTime|0)/10,x=d,f(c.JWPLAYER_MEDIA_TIME,{position:A,duration:u}))}function q(){f(c.JWPLAYER_MEDIA_META,{duration:b.duration,height:b.videoHeight,width:b.videoWidth})}function v(a){O&&(x||(x=d,h()),"loadedmetadata"==a.type&&(b.muted&&(b.muted=k,b.muted=d),q()))}function t(){x&&(0<H&&!$)&&(y?setTimeout(function(){0<
-H&&wa(H)},200):wa(H))}function h(){z||(z=d,f(c.JWPLAYER_MEDIA_BUFFER_FULL))}function m(c){O&&!J&&(b.paused?b.currentTime==b.duration&&3<b.duration||Ba():(!l.isFF()||!("play"==c.type&&W==a.BUFFERING))&&F(a.PLAYING))}function D(){O&&(J||F(a.BUFFERING))}function w(b){var a;if("array"==l.typeOf(b)&&0<b.length){a=[];for(var c=0;c<b.length;c++){var d=b[c],f={};f.label=d.label&&d.label?d.label?d.label:0:c;a[c]=f}}return a}function n(c,d){K=T[X];F(a.BUFFERING);S=setInterval(r,100);H=0;b.src!==K.file||s||
-I?(z=x=k,u=d?d:-1,b.src=K.file,b.load()):(0===c&&(H=-1,wa(c)),q(),b.play());A=b.currentTime;s&&h();l.isIOS()&&R.getFullScreen()&&(b.controls=!0);0<c&&wa(c)}function F(b){if(!(b==a.PAUSED&&W==a.IDLE)&&!J&&W!=b){var d=W;W=b;f(c.JWPLAYER_PLAYER_STATE,{oldstate:d,newstate:b})}}function r(){if(O){var a;a=!b.duration||0===b.buffered.length?0:b.buffered.end(b.buffered.length-1)/b.duration;a!=Y&&(Y=a,f(c.JWPLAYER_MEDIA_BUFFER,{bufferPercent:Math.round(100*Y)}));1<=a&&clearInterval(S)}}function j(b){f("fullscreenchange",
-{target:b.target,jwstate:ma})}e=e||"";var y=l.isMSIE(),s=l.isMobile(),I=l.isSafari(),L={abort:g,canplay:v,canplaythrough:g,durationchange:function(){if(O){var a=(10*b.duration|0)/10;u!=a&&(u=a);$&&(0<H&&a>H)&&wa(H);p()}},emptied:g,ended:function(){O&&W!=a.IDLE&&(X=-1,Ca=d,f(c.JWPLAYER_MEDIA_BEFORECOMPLETE),O&&(F(a.IDLE),Ca=k,f(c.JWPLAYER_MEDIA_COMPLETE)))},error:function(){O&&(l.log("Error playing media: %o",b.error),f(c.JWPLAYER_MEDIA_ERROR,{message:"Error loading media: File could not be played"}),
-F(a.IDLE))},loadeddata:g,loadedmetadata:v,loadstart:g,pause:m,play:m,playing:m,progress:t,ratechange:g,readystatechange:g,seeked:function(){!J&&W!=a.PAUSED&&F(a.PLAYING)},seeking:y?D:g,stalled:g,suspend:g,timeupdate:p,volumechange:function(){f(c.JWPLAYER_MEDIA_VOLUME,{volume:Math.round(100*b.volume)});f(c.JWPLAYER_MEDIA_MUTE,{mute:b.muted})},waiting:D,webkitbeginfullscreen:function(a){ma=!0;j(a);l.isIOS()&&(b.controls=k)},webkitendfullscreen:function(a){ma=!1;j(a);l.isIOS()&&(b.controls=k)}},B,K,
-u,A,x=k,z,H=0,J=k,W=a.IDLE,E,S=-1,Y=-1,O=k,T,X=-1,$=l.isAndroidNative(),ha=l.isIOS(7),ka=[],Ca=k,ma=null,R=l.extend(this,new c.eventdispatcher(e));R.load=function(b){if(O){T=b.sources;0>X&&(X=0);if(T)for(var a=l.getCookies().qualityLabel,d=0;d<T.length;d++)if(T[d]["default"]&&(X=d),a&&T[d].label==a){X=d;break}(a=w(T))&&R.sendEvent(c.JWPLAYER_MEDIA_LEVELS,{levels:a,currentQuality:X});n(b.starttime||0,b.duration)}};R.stop=function(){O&&(b.removeAttribute("src"),y||b.load(),X=-1,clearInterval(S),F(a.IDLE))};
-R.destroy=function(){clearInterval(S)};R.play=function(){O&&!J&&b.play()};var Ba=R.pause=function(){O&&(b.pause(),F(a.PAUSED))};R.seekDrag=function(a){O&&((J=a)?b.pause():b.play())};var wa=R.seek=function(a){if(O)if(!J&&0===H&&f(c.JWPLAYER_MEDIA_SEEK,{position:A,offset:a}),x){H=0;try{b.currentTime=a}catch(d){H=a}}else H=a},Ra=R.volume=function(a){l.exists(a)&&(b.volume=Math.min(Math.max(0,a/100),1),E=100*b.volume)};R.mute=function(a){l.exists(a)||(a=!b.muted);a?(E=100*b.volume,b.muted=d):(Ra(E),b.muted=
-k)};R.addCaptions=function(a){if(l.isIOS()&&b.addTextTrack){var c=window.TextTrackCue;l.foreach(a,function(a,d){if(d.data){var f=b.addTextTrack(d.kind,d.label);l.foreach(d.data,function(b,a){1==b%2&&f.addCue(new c(a.begin,d.data[parseInt(b)+1].begin,a.text))});ka.push(f);f.mode="hidden"}})}};R.resetCaptions=function(){};R.fsCaptions=function(a){if(l.isIOS()&&b.addTextTrack){var c=null;l.foreach(ka,function(b,d){!a&&"showing"==d.mode&&(c=parseInt(b));a||(d.mode="hidden")});if(!a)return c}};this.checkComplete=
-function(){return Ca};R.detachMedia=function(){O=k;return b};R.attachMedia=function(b){O=d;b||(x=k);Ca&&(F(a.IDLE),f(c.JWPLAYER_MEDIA_COMPLETE),Ca=k)};R.setContainer=function(a){B=a;a.appendChild(b)};R.getContainer=function(){return B};R.remove=function(){b&&(b.removeAttribute("src"),y||b.load());clearInterval(S);X=-1;B===b.parentNode&&B.removeChild(b)};R.setVisibility=function(b){b||$?l.css.style(B,{visibility:"visible",opacity:1}):l.css.style(B,{visibility:"",opacity:0})};R.resize=function(a,c,
-d){return l.stretch(d,b,a,c,b.videoWidth,b.videoHeight)};R.setControls=function(a){b.controls=!!a};R.supportsFullscreen=function(){return!0};R.setFullScreen=function(a){if(a=!!a){try{var c=b.webkitEnterFullscreen||b.webkitEnterFullScreen;c&&c.apply(b)}catch(d){return!1}return R.getFullScreen()}(c=b.webkitExitFullscreen||b.webkitExitFullScreen)&&c.apply(b);return a};R.getFullScreen=function(){return ma||b.webkitDisplayingFullscreen};R.audioMode=function(){if(!T)return k;var b=T[0].type;return"oga"==
-b||"aac"==b||"mp3"==b||"vorbis"==b};R.setCurrentQuality=function(a){if(X!=a&&(a=parseInt(a,10),0<=a&&T&&T.length>a)){X=a;l.saveCookie("qualityLabel",T[a].label);f(c.JWPLAYER_MEDIA_LEVEL_CHANGED,{currentQuality:a,levels:w(T)});a=(10*b.currentTime|0)/10;var d=(10*b.duration|0)/10;0>=d&&(d=u);n(a,d)}};R.getCurrentQuality=function(){return X};R.getQualityLevels=function(){return w(T)};b||(b=document.createElement("video"));l.foreach(L,function(a,c){b.addEventListener(a,c,k)});ha||(b.controls=d,b.controls=
-k);b.setAttribute("x-webkit-airplay","allow");O=d}})(jwplayer);
-(function(g,l){function c(){return!1}function a(){}var d=g.jwplayer,k=d.utils,b=d.events,e=b.state,f=new k.scriptloader(g.location.protocol+"//www.youtube.com/iframe_api"),C=k.isMobile(),p=k.isSafari();g.onYouTubeIframeAPIReady=function(){f=null};d.html5.youtube=function(a){function c(a){g.YT&&g.YT.loaded?(A=g.YT,h(a)):setTimeout(c,100)}function t(){}function h(){var b;if(b=!!A)b=z.parentNode,b||(E||(d(a).onReady(h),E=!0),b=null),b=!!b;b&&S&&S.apply(u)}function m(c){var d={oldstate:J,newstate:c};
-J=c;clearInterval(O);c!==e.IDLE&&(O=setInterval(D,250),c===e.PLAYING?k.css("#"+a+" .jwcontrols",{display:""}):c===e.BUFFERING&&w());u.sendEvent(b.JWPLAYER_PLAYER_STATE,d)}function D(){if(x&&x.getPlayerState){var a=x.getPlayerState();null!==a&&(void 0!==a&&a!==T)&&(T=a,j({data:a}));var c=A.PlayerState;a===c.PLAYING?(w(),a={position:(10*x.getCurrentTime()|0)/10,duration:x.getDuration()},u.sendEvent(b.JWPLAYER_MEDIA_TIME,a)):a===c.BUFFERING&&w()}}function w(){var a=0;x&&x.getVideoLoadedFraction&&(a=
-Math.round(100*x.getVideoLoadedFraction()));W!==a&&(W=a,u.sendEvent(b.JWPLAYER_MEDIA_BUFFER,{bufferPercent:a}))}function n(){var a={duration:x.getDuration(),width:z.clientWidth,height:z.clientHeight};u.sendEvent(b.JWPLAYER_MEDIA_META,a)}function F(a,b){if(!a)throw"invalid Youtube ID";if(!z.parentNode)throw"Youtube iFrame removed from DOM";var c={height:"100%",width:"100%",videoId:a,playerVars:k.extend({autoplay:0,controls:0,showinfo:0,rel:0,modestbranding:0,playsinline:1,origin:location.protocol+
-"//"+location.hostname},b),events:{onReady:r,onStateChange:j,onPlaybackQualityChange:y,onError:s}};u.setVisibility(!0);x=new A.Player(z,c);z=x.getIframe();S=null;I()}function r(){Y&&(Y.apply(u),Y=null)}function j(a){var c=A.PlayerState;switch(a.data){case c.UNSTARTED:m(e.BUFFERING);break;case c.ENDED:J!=e.IDLE&&(X=!0,u.sendEvent(b.JWPLAYER_MEDIA_BEFORECOMPLETE,void 0),m(e.IDLE),X=!1,u.sendEvent(b.JWPLAYER_MEDIA_COMPLETE,void 0));break;case c.PLAYING:$=!1;ha&&(ha=!1,n(),a={levels:u.getQualityLevels(),
-currentQuality:u.getCurrentQuality()},u.sendEvent(b.JWPLAYER_MEDIA_LEVELS,a));m(e.PLAYING);break;case c.PAUSED:m(e.PAUSED);break;case c.BUFFERING:m(e.BUFFERING);break;case c.CUED:m(e.IDLE)}}function y(){ha&&u.play()}function s(){u.sendEvent(b.JWPLAYER_MEDIA_ERROR,{message:"Error loading YouTube: Video could not be played"})}function I(){if(C||p)u.setVisibility(!0),k.css("#"+a+" .jwcontrols",{display:"none"})}function L(){clearInterval(O);if(x&&x.stopVideo)try{x.stopVideo(),x.clearVideo()}catch(a){}}
-function B(){L();z&&(H&&H===z.parentNode)&&H.removeChild(z);S=Y=x=null}function K(a){Y=null;var b=k.youTubeID(a.sources[0].file);a.image||(a.image="http://i.ytimg.com/vi/"+b+"/0.jpg");u.setVisibility(!0);if(A)if(x)if(x.getPlayerState)if(x.getVideoData().video_id!==b){$?(L(),x.cueVideoById(b)):x.loadVideoById(b);var c=x.getPlayerState(),d=A.PlayerState;(c===d.UNSTARTED||c===d.CUED)&&I()}else 0<x.getCurrentTime()&&x.seekTo(0),n();else Y=function(){u.load(a)};else F(b,{autoplay:$?0:1});else S=function(){F(b)},
-h()}var u=k.extend(this,new b.eventdispatcher("html5.youtube")),A=g.YT,x=null,z=l.createElement("div"),H,J=e.IDLE,W=-1,E=!1,S=null,Y=null,O=-1,T=-1,X=!1,$=C||p,ha=!0;!A&&f&&(f.addEventListener(b.COMPLETE,c),f.addEventListener(b.ERROR,t),f.load());z.id=a+"_youtube";u.init=function(a){K(a)};u.destroy=function(){B();H=z=A=u=null};u.getElement=function(){return z};u.load=function(a){m(e.BUFFERING);K(a);u.play()};u.stop=function(){L();m(e.IDLE)};u.play=function(){$||x.playVideo&&x.playVideo()};u.pause=
-function(){$||x.pauseVideo&&x.pauseVideo()};u.seek=function(a){$||x.seekTo&&x.seekTo(a)};u.volume=function(a){x&&x.setVolume(a)};u.mute=function(a){x&&a&&x.setVolume(0)};u.detachMedia=function(){return l.createElement("video")};u.attachMedia=function(){X&&(m(e.IDLE),u.sendEvent(b.JWPLAYER_MEDIA_COMPLETE,void 0),X=!1)};u.setContainer=function(a){H=a;a.appendChild(z);u.setVisibility(!0)};u.getContainer=function(){return H};u.supportsFullscreen=function(){return!(!H||!H.requestFullscreen&&!H.requestFullScreen&&
-!H.webkitRequestFullscreen&&!H.webkitRequestFullScreen&&!H.webkitEnterFullscreen&&!H.webkitEnterFullScreen&&!H.mozRequestFullScreen&&!H.msRequestFullscreen)};u.remove=function(){B()};u.setVisibility=function(a){a?(k.css.style(z,{display:"block"}),k.css.style(H,{visibility:"visible",opacity:1})):!C&&!p&&k.css.style(H,{opacity:0})};u.resize=function(a,b,c){return k.stretch(c,z,a,b,z.clientWidth,z.clientHeight)};u.checkComplete=function(){return X};u.getCurrentQuality=function(){if(x){if(x.getAvailableQualityLevels){var a=
-x.getPlaybackQuality();return x.getAvailableQualityLevels().indexOf(a)}return-1}};u.getQualityLevels=function(){if(x){var a=[];if(x.getAvailableQualityLevels)for(var b=x.getAvailableQualityLevels(),c=b.length;c--;)a.push({label:b[c]});return a}};u.setCurrentQuality=function(a){if(x&&x.getAvailableQualityLevels){var b=x.getAvailableQualityLevels();b.length&&x.setPlaybackQuality(b[b.length-a-1])}}};d.html5.youtube.prototype={seekDrag:a,setFullScreen:c,getFullScreen:c,setControls:a,audioMode:c}})(window,
-document);
-(function(g){var l=g.utils,c=l.css,a=g.events,d=80,k=30;g.html5.adskipbutton=function(b,e,f,g){function p(a){0>F||(a=f.replace(/xx/gi,Math.ceil(F-a)),t(a))}function q(a,b){if("number"==l.typeOf(y))F=y;else if("%"==y.slice(-1)){var c=parseFloat(y.slice(0,-1));b&&!isNaN(c)&&(F=b*c/100)}else"string"==l.typeOf(y)?F=l.seconds(y):isNaN(y)||(F=y)}function v(){r&&B.sendEvent(a.JWPLAYER_AD_SKIPPED)}function t(a){a=a||g;var b=n.getContext("2d");b.clearRect(0,0,d,k);m(b,0,0,d,k,5,!0,!1,!1);m(b,0,0,d,k,5,!1,
-!0,!1);b.fillStyle="#979797";b.globalAlpha=1;var c=n.height/2,f=n.width/2;b.textAlign="center";b.font="Bold 12px Sans-Serif";a===g&&(f-=s.width,b.drawImage(s,n.width-(n.width-b.measureText(g).width)/2-4,(k-s.height)/2));b.fillText(a,f,c+4)}function h(a){a=a||g;var b=n.getContext("2d");b.clearRect(0,0,d,k);m(b,0,0,d,k,5,!0,!1,!0);m(b,0,0,d,k,5,!1,!0,!0);b.fillStyle="#FFFFFF";b.globalAlpha=1;var c=n.height/2,f=n.width/2;b.textAlign="center";b.font="Bold 12px Sans-Serif";a===g&&(f-=s.width,b.drawImage(I,
-n.width-(n.width-b.measureText(g).width)/2-4,(k-s.height)/2));b.fillText(a,f,c+4)}function m(a,b,c,d,f,e,h,k,m){"undefined"==typeof k&&(k=!0);"undefined"===typeof e&&(e=5);a.beginPath();a.moveTo(b+e,c);a.lineTo(b+d-e,c);a.quadraticCurveTo(b+d,c,b+d,c+e);a.lineTo(b+d,c+f-e);a.quadraticCurveTo(b+d,c+f,b+d-e,c+f);a.lineTo(b+e,c+f);a.quadraticCurveTo(b,c+f,b,c+f-e);a.lineTo(b,c+e);a.quadraticCurveTo(b,c,b+e,c);a.closePath();k&&(a.strokeStyle="white",a.globalAlpha=m?1:0.25,a.stroke());h&&(a.fillStyle=
-"#000000",a.globalAlpha=0.5,a.fill())}function D(a,b){var c=document.createElement(a);b&&(c.className=b);return c}var w,n,F=-1,r=!1,j,y=0,s,I,L=!1,B=l.extend(this,new a.eventdispatcher);B.updateSkipTime=function(a,b){q(a,b);0<=F&&(c.style(w,{visibility:j?"visible":"hidden"}),0<F-a?(p(a),r&&(r=!1,w.style.cursor="default")):r||(r||(r=!0,w.style.cursor="pointer"),L?h():t()))};this.reset=function(a){r=!1;y=a;q(0,0);p(0)};B.show=function(){j=!0;0<F&&c.style(w,{visibility:"visible"})};B.hide=function(){j=
-!1;c.style(w,{visibility:"hidden"})};this.element=function(){return w};s=new Image;s.src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAICAYAAAArzdW1AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAA3NpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNS1jMDE0IDc5LjE1MTQ4MSwgMjAxMy8wMy8xMy0xMjowOToxNSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo0ODkzMWI3Ny04YjE5LTQzYzMtOGM2Ni0wYzdkODNmZTllNDYiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6RDI0OTcxRkE0OEM2MTFFM0I4MTREM0ZBQTFCNDE3NTgiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6RDI0OTcxRjk0OEM2MTFFM0I4MTREM0ZBQTFCNDE3NTgiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIChNYWNpbnRvc2gpIj4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6NDA5ZGQxNDktNzdkMi00M2E3LWJjYWYtOTRjZmM2MWNkZDI0IiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOjQ4OTMxYjc3LThiMTktNDNjMy04YzY2LTBjN2Q4M2ZlOWU0NiIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PqAZXX0AAABYSURBVHjafI2BCcAwCAQ/kr3ScRwjW+g2SSezCi0kYHpwKLy8JCLDbWaGTM+MAFzuVNXhNiTQsh+PS9QhZ7o9JuFMeUVNwjsamDma4K+3oy1cqX/hxyPAAAQwNKV27g9PAAAAAElFTkSuQmCC";
-s.className="jwskipimage jwskipout";I=new Image;I.src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAICAYAAAArzdW1AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAA3NpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNS1jMDE0IDc5LjE1MTQ4MSwgMjAxMy8wMy8xMy0xMjowOToxNSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo0ODkzMWI3Ny04YjE5LTQzYzMtOGM2Ni0wYzdkODNmZTllNDYiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6RDI0OTcxRkU0OEM2MTFFM0I4MTREM0ZBQTFCNDE3NTgiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6RDI0OTcxRkQ0OEM2MTFFM0I4MTREM0ZBQTFCNDE3NTgiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIChNYWNpbnRvc2gpIj4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6NDA5ZGQxNDktNzdkMi00M2E3LWJjYWYtOTRjZmM2MWNkZDI0IiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOjQ4OTMxYjc3LThiMTktNDNjMy04YzY2LTBjN2Q4M2ZlOWU0NiIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PvgIj/QAAABYSURBVHjadI6BCcAgDAS/0jmyih2tm2lHSRZJX6hQQ3w4FP49LKraSHV3ZLDzAuAi3cwaqUhSfvft+EweznHneUdTzPGRmp5hEJFhAo3LaCnjn7blzCvAAH9YOSCL5RZKAAAAAElFTkSuQmCC";
-I.className="jwskipimage jwskipover";w=D("div","jwskip");w.id=b+"_skipcontainer";n=D("canvas");w.appendChild(n);B.width=n.width=d;B.height=n.height=k;w.appendChild(I);w.appendChild(s);c.style(w,{visibility:"hidden",bottom:e});w.addEventListener("mouseover",function(){L=!0;r&&h()});w.addEventListener("mouseout",function(){L=!1;r&&t()});l.isMobile()?(new l.touch(w)).addEventListener(l.touchEvents.TAP,v):w.addEventListener("click",v)};c(".jwskip",{position:"absolute","float":"right",display:"inline-block",
-width:d,height:k,right:10});c(".jwskipimage",{position:"relative",display:"none"})})(window.jwplayer);
-(function(g){var l=g.html5,c=g.utils,a=g.events,d=a.state,k=g.parsers,b=c.css,e=c.isAndroid(4,!0),f="playing",C=document;l.captions=function(b,q){function l(a){c.log("CAPTIONS("+a+")")}function t(a){(J=a.fullscreen)?(h(),setTimeout(h,500)):n(!0)}function h(){var a=s.offsetHeight,b=s.offsetWidth;0!==a&&0!==b&&B.resize(b,Math.round(0.94*a))}function m(a,b){c.ajax(a,function(a){var c=a.responseXML?a.responseXML.firstChild:null;x++;if(c){"xml"==k.localName(c)&&(c=c.nextSibling);for(;c.nodeType==c.COMMENT_NODE;)c=
-c.nextSibling}c=c&&"tt"==k.localName(c)?new g.parsers.dfxp:new g.parsers.srt;try{var d=c.parse(a.responseText);u<A.length&&(A[b].data=d);n(!1)}catch(f){l(f.message+": "+A[b].file)}x==A.length&&(0<z&&(r(z),z=-1),w())},D,!0)}function D(a){x++;l(a);x==A.length&&(0<z&&(r(z),z=-1),w())}function w(){for(var b=[],c=0;c<A.length;c++)b.push(A[c]);W.sendEvent(a.JWPLAYER_CAPTIONS_LOADED,{captionData:b})}function n(a){A.length?K==f&&0<H?(B.show(),J?t({fullscreen:!0}):(F(),a&&setTimeout(F,500))):B.hide():B.hide()}
-function F(){B.resize()}function r(b){0<b?(u=b-1,H=b|0,u>=A.length||(A[u].data?B.populate(A[u].data):x==A.length?(l("file not loaded: "+A[u].file),0!==H&&j(a.JWPLAYER_CAPTIONS_CHANGED,A,0),H=0):z=b,n(!1))):(H=0,n(!1))}function j(a,b,c){W.sendEvent(a,{type:a,tracks:b,track:c})}function y(){for(var a=[{label:"Off"}],b=0;b<A.length;b++)a.push({label:A[b].label});return a}var s,I={back:!0,color:"#FFFFFF",fontSize:15,fontFamily:"Arial,sans-serif",fontOpacity:100,backgroundColor:"#000",backgroundOpacity:100,
-edgeStyle:null,windowColor:"#FFFFFF",windowOpacity:0},L={fontStyle:"normal",fontWeight:"normal",textDecoration:"none"},B,K,u,A=[],x=0,z=-1,H=0,J=!1,W=new a.eventdispatcher;c.extend(this,W);this.element=function(){return s};this.getCaptionsList=function(){return y()};this.getCurrentCaptions=function(){return H};this.setCurrentCaptions=function(b){0<=b&&(H!=b&&b<=A.length)&&(r(b),b=y(),c.saveCookie("captionLabel",b[H].label),j(a.JWPLAYER_CAPTIONS_CHANGED,b,H))};s=C.createElement("div");s.id=b.id+"_caption";
-s.className="jwcaptions";b.jwAddEventListener(a.JWPLAYER_PLAYER_STATE,function(a){switch(a.newstate){case d.IDLE:K="idle";n(!1);break;case d.PLAYING:K=f,n(!1)}});b.jwAddEventListener(a.JWPLAYER_PLAYLIST_ITEM,function(){u=0;A=[];B.update(0);x=0;for(var d=b.jwGetPlaylist()[b.jwGetPlaylistIndex()].tracks,f=[],h=0,k="",g=0,k="",h=0;h<d.length;h++)k=d[h].kind.toLowerCase(),("captions"==k||"subtitles"==k)&&f.push(d[h]);H=0;if(!e){for(h=0;h<f.length;h++)if(k=f[h].file)f[h].label||(f[h].label=h.toString()),
-A.push(f[h]),m(A[h].file,h);for(h=0;h<A.length;h++)if(A[h]["default"]){g=h+1;break}d=c.getCookies();if(k=d.captionLabel){d=y();for(h=0;h<d.length;h++)if(k==d[h].label){g=h;break}}0<g&&r(g);n(!1);j(a.JWPLAYER_CAPTIONS_LIST,y(),H)}});b.jwAddEventListener(a.JWPLAYER_MEDIA_ERROR,l);b.jwAddEventListener(a.JWPLAYER_ERROR,l);b.jwAddEventListener(a.JWPLAYER_READY,function(){c.foreach(I,function(a,b){q&&(void 0!==q[a]?b=q[a]:void 0!==q[a.toLowerCase()]&&(b=q[a.toLowerCase()]));L[a]=b});B=new g.html5.captions.renderer(L,
-s);n(!1)});b.jwAddEventListener(a.JWPLAYER_MEDIA_TIME,function(a){B.update(a.position)});b.jwAddEventListener(a.JWPLAYER_FULLSCREEN,t);b.jwAddEventListener(a.JWPLAYER_RESIZE,function(){n(!1)})};b(".jwcaptions",{position:"absolute",cursor:"pointer",width:"100%",height:"100%",overflow:"hidden"})})(jwplayer);
-(function(g){var l=g.utils,c=l.css.style;g.html5.captions.renderer=function(a,d){function k(a){a=a||"";m="hidden";c(p,{visibility:m});v.innerHTML=a;a.length&&(m="visible",setTimeout(b,16))}function b(){if("visible"===m){var b=p.clientWidth,d=Math.pow(b/400,0.6),f=a.fontSize*d;c(v,{maxWidth:b+"px",fontSize:Math.round(f)+"px",lineHeight:Math.round(1.4*f)+"px",padding:Math.round(1*d)+"px "+Math.round(8*d)+"px"});a.windowOpacity&&c(q,{padding:Math.round(5*d)+"px",borderRadius:Math.round(5*d)+"px"});c(p,
-{visibility:m})}}function e(){for(var a=-1,b=0;b<g.length;b++)if(g[b].begin<=h&&(b==g.length-1||g[b+1].begin>=h)){a=b;break}-1==a?k(""):a!=t&&(t=a,k(g[b].text))}function f(a,b,c){c=l.hexToRgba("#000000",c);"dropshadow"===a?b.textShadow="0 2px 1px "+c:"raised"===a?b.textShadow="0 0 5px "+c+", 0 1px 5px "+c+", 0 2px 5px "+c:"depressed"===a?b.textShadow="0 -2px 1px "+c:"uniform"===a&&(b.textShadow="-2px 0 1px "+c+",2px 0 1px "+c+",0 -2px 1px "+c+",0 2px 1px "+c+",-1px 1px 1px "+c+",1px 1px 1px "+c+",1px -1px 1px "+
-c+",1px 1px 1px "+c)}var g,p,q,v,t,h,m="visible",D=-1;this.hide=function(){clearInterval(D);c(p,{display:"none"})};this.populate=function(a){t=-1;g=a;e()};this.resize=function(){b()};this.show=function(){c(p,{display:"block"});b();clearInterval(D);D=setInterval(b,250)};this.update=function(a){h=a;g&&e()};var w=a.fontOpacity,n=a.windowOpacity,F=a.edgeStyle,r=a.backgroundColor,j={display:"inline-block"},y={color:l.hexToRgba(l.rgbHex(a.color),w),display:"inline-block",fontFamily:a.fontFamily,fontStyle:a.fontStyle,
-fontWeight:a.fontWeight,textAlign:"center",textDecoration:a.textDecoration,wordWrap:"break-word"};n&&(j.backgroundColor=l.hexToRgba(l.rgbHex(a.windowColor),n));f(F,y,w);a.back?y.backgroundColor=l.hexToRgba(l.rgbHex(r),a.backgroundOpacity):null===F&&f("uniform",y);p=document.createElement("div");q=document.createElement("div");v=document.createElement("span");c(p,{display:"block",height:"auto",position:"absolute",bottom:"20px",textAlign:"center",width:"100%"});c(q,j);c(v,y);q.appendChild(v);p.appendChild(q);
-d.appendChild(p)}})(jwplayer);
-(function(g){function l(a){return a?parseInt(a.width,10)+"px "+parseInt(a.height,10)+"px":"0 0"}var c=g.html5,a=g.utils,d=g.events,k=d.state,b=a.css,e=a.transitionStyle,f=a.isMobile(),C=a.isAndroid(4,!0),p="button",q="text",v="slider",t="none",h="100%",m=!1,D=!0,w=null,n="",F={display:t},r={display:"block"},j={display:n},y="array",s=m,I=window,L=document;c.controlbar=function(e,K){function u(a,b,c){return{name:a,type:b,className:c}}function A(c){b.block(aa);var d=c.duration==Number.POSITIVE_INFINITY,
-h=0===c.duration&&0!==c.position&&a.isSafari()&&!f;d||h?(U.setText(e.jwGetPlaylist()[e.jwGetPlaylistIndex()].title||"Live broadcast"),ka(!1)):(G.elapsed&&(d=a.timeFormat(c.position),G.elapsed.innerHTML=d),G.duration&&(d=a.timeFormat(c.duration),G.duration.innerHTML=d),0<c.duration?Fa(c.position/c.duration):Fa(0),qa=c.duration,ra=c.position,sa||U.setText())}function x(){var a=e.jwGetMute();Ga=e.jwGetVolume()/100;ma("mute",a||0===Ga);Sa(a?0:Ga)}function z(){b.style([G.hd,G.cc],F);b.style(G.cast,s?j:
-F);Za();ba()}function H(a){Ta=a.currentQuality|0;G.hd&&(G.hd.querySelector("button").className=2===ia.length&&0===Ta?"off":n);na&&0<=Ta&&na.setActive(a.currentQuality)}function J(a){ga&&(La=a.track|0,G.cc&&(G.cc.querySelector("button").className=2===ga.length&&0===La?"off":n),oa&&0<=La&&oa.setActive(a.track))}function W(a){if(G.cast){s=a.available;b.style(G.cast,a.available?j:F);var c=G.cast.className.replace(/\s*jwcancast/,"");a.available&&(c+=" jwcancast");G.cast.className=c}E(a)}function E(a){Ma=
-a;G.cast&&(G.cast.querySelector("button").className=a.active?n:"off");ba()}function S(){la=a.extend({},ta,Z.getComponentSettings("controlbar"),K);Ha=V("background").height;var c=ua?0:la.margin;b.style(P,{height:Ha,bottom:c,left:c,right:c,"max-width":ua?n:la.maxwidth});b(Y(".jwtext"),{font:la.fontsize+"px/"+V("background").height+"px "+la.font,color:la.fontcolor,"font-weight":la.fontweight});b(Y(".jwoverlay"),{bottom:Ha})}function Y(a){return"#"+aa+(a?" "+a:n)}function O(){return L.createElement("span")}
-function T(c,d,e,f,h){var k=O(),g=V(c);f=f?" left center":" center";var m=l(g);k.className="jw"+c;k.innerHTML="\x26nbsp;";if(g&&g.src)return e=e?{background:"url('"+g.src+"') repeat-x "+f,"background-size":m,height:h?g.height:n}:{background:"url('"+g.src+"') no-repeat"+f,"background-size":m,width:g.width,height:h?g.height:n},k.skin=g,b(Y((h?".jwvertical ":n)+".jw"+c),a.extend(e,d)),G[c]=k}function X(a,c,d,e){c&&c.src&&(b(a,{width:c.width,background:"url("+c.src+") no-repeat center","background-size":l(c)}),
-d.src&&!f&&b(a+":hover,"+a+".off:hover",{background:"url("+d.src+") no-repeat center","background-size":l(d)}),e&&e.src&&b(a+".off",{background:"url("+e.src+") no-repeat center","background-size":l(e)}))}function $(a){return function(b){yb[a]&&(yb[a](),f&&U.sendEvent(d.JWPLAYER_USER_ACTION));b.preventDefault&&b.preventDefault()}}function ha(b){a.foreach(kb,function(a,c){a!=b&&("cc"==a&&M(),"hd"==a&&Q(),c.hide())})}function ka(a){P&&G.alt&&(void 0===a&&(a=P.parentNode&&320<=P.parentNode.clientWidth),
-a?b.style(Ua,j):b.style(Ua,F))}function Ca(){!ua&&!sa&&(b.block(aa),fa.show(),Ia("volume",fa),ha("volume"))}function ma(b,c){a.exists(c)||(c=!lb[b]);G[b]&&(G[b].className="jw"+b+(c?" jwtoggle jwtoggling":" jwtoggling"),setTimeout(function(){G[b].className=G[b].className.replace(" jwtoggling",n)},100));lb[b]=c}function R(){ia&&2<ia.length&&(mb&&(clearTimeout(mb),mb=void 0),b.block(aa),na.show(),Ia("hd",na),ha("hd"))}function Ba(){ga&&2<ga.length&&(nb&&(clearTimeout(nb),nb=void 0),b.block(aa),oa.show(),
-Ia("cc",oa),ha("cc"))}function wa(a){0<=a&&a<ia.length&&(e.jwSetCurrentQuality(a),Q(),na.hide())}function Ra(a){0<=a&&a<ga.length&&(e.jwSetCurrentCaptions(a),M(),oa.hide())}function Qa(){2==ga.length&&Ra((La+1)%2)}function Aa(){2==ia.length&&wa((Ta+1)%2)}function Da(a){a.preventDefault();L.onselectstart=function(){return m}}function Ea(a){cb();xa=a;I.addEventListener("mouseup",Na,m)}function cb(){I.removeEventListener("mouseup",Na);xa=w}function vb(){G.timeRail.className="jwrail";e.jwGetState()!=
-k.IDLE&&(e.jwSeekDrag(D),Ea("time"),Va(),U.sendEvent(d.JWPLAYER_USER_ACTION))}function jb(b){if(xa){var c=G[xa].querySelector(".jwrail"),c=a.bounds(c),c=b.x/c.width;100<c&&(c=100);b.type==a.touchEvents.DRAG_END?(e.jwSeekDrag(m),G.timeRail.className="jwrail",cb(),db.time(c),Wa()):(Fa(c),500<ra-ob&&(ob=ra,db.time(c)));U.sendEvent(d.JWPLAYER_USER_ACTION)}}function Oa(b){var c=G.time.querySelector(".jwrail"),c=a.bounds(c);b=b.x/c.width;100<b&&(b=100);e.jwGetState()!=k.IDLE&&(db.time(b),U.sendEvent(d.JWPLAYER_USER_ACTION))}
-function wb(a){return function(b){b.button||(G[a+"Rail"].className="jwrail","time"===a?e.jwGetState()!=k.IDLE&&(e.jwSeekDrag(D),Ea(a)):Ea(a))}}function Na(b){if(xa&&!b.button){var c=G[xa].querySelector(".jwrail"),d=a.bounds(c),c=xa,d=G[c].vertical?(d.bottom-b.pageY)/d.height:(b.pageX-d.left)/d.width;"mouseup"==b.type?("time"==c&&e.jwSeekDrag(m),G[c+"Rail"].className="jwrail",cb(),db[c.replace("H",n)](d)):("time"==xa?Fa(d):Sa(d),500<ra-ob&&(ob=ra,db[xa.replace("H",n)](d)));return!1}}function Va(a){a&&
-N.apply(this,arguments);ja&&(qa&&!ua&&!f)&&(b.block(aa),ja.show(),Ia("time",ja))}function Wa(){I.removeEventListener("mousemove",Na);ja&&ja.hide()}function N(b){Ja=a.bounds(P);if((Xa=a.bounds(pb))&&0!==Xa.width)b=b.pageX?b.pageX-Xa.left:b.x,ja.positionX(Math.round(b)),xb(qa*b/Xa.width)}function ya(){a.foreach(eb,function(a,c){var d={};"%"===c.position.toString().slice(-1)?d.left=c.position:0<qa?(d.left=(100*c.position/qa).toFixed(2)+"%",d.display=null):(d.left=0,d.display="none");b.style(c.element,
-d)})}function pa(){nb=setTimeout(oa.hide,500)}function qb(){mb=setTimeout(na.hide,500)}function Pa(a,c,d,e){if(!f){var h=a.element();c.appendChild(h);c.addEventListener("mousemove",d,m);e?c.addEventListener("mouseout",e,m):c.addEventListener("mouseout",a.hide,m);b.style(h,{left:"50%"})}}function za(b,c,e,h){if(f){var k=b.element();c.appendChild(k);(new a.touch(c)).addEventListener(a.touchEvents.TAP,function(){var a=e;"cc"==h?(2==ga.length&&(a=Qa),fb?(M(),b.hide()):(fb=setTimeout(function(){b.hide();
-fb=void 0},4E3),a()),U.sendEvent(d.JWPLAYER_USER_ACTION)):"hd"==h&&(2==ia.length&&(a=Aa),gb?(Q(),b.hide()):(gb=setTimeout(function(){b.hide();gb=void 0},4E3),a()),U.sendEvent(d.JWPLAYER_USER_ACTION))})}}function $a(d){var e=O();e.className="jwgroup jw"+d;Ka[d]=e;if(ca[d]){var e=ca[d],k=Ka[d];if(e&&0<e.elements.length)for(var g=0;g<e.elements.length;g++){var j;a:{j=e.elements[g];var u=d;switch(j.type){case q:u=void 0;j=j.name;var u={},x=V(("alt"==j?"elapsed":j)+"Background");if(x.src){var D=O();D.id=
-aa+"_"+j;"elapsed"==j||"duration"==j?(D.className="jwtext jw"+j+" jwhidden",Ua.push(D)):D.className="jwtext jw"+j;u.background="url("+x.src+") repeat-x center";u["background-size"]=l(V("background"));b.style(D,u);D.innerHTML="alt"!=j?"00:00":n;u=G[j]=D}else u=null;j=u;break a;case p:if("blank"!=j.name){j=j.name;x=u;if(!V(j+"Button").src||f&&("mute"==j||0===j.indexOf("volume"))||C&&/hd|cc/.test(j))j=w;else{var u=O(),D=O(),s=void 0,s=rb,A=T(s.name);A||(A=O(),A.className="jwblankDivider");s.className&&
-(A.className+=" "+s.className);s=A;A=L.createElement("button");u.style+=" display:inline-block";u.className="jw"+j+" jwbuttoncontainer";"left"==x?(u.appendChild(D),u.appendChild(s)):(u.appendChild(s),u.appendChild(D));f?"hd"!=j&&"cc"!=j&&(new a.touch(A)).addEventListener(a.touchEvents.TAP,$(j)):A.addEventListener("click",$(j),m);A.innerHTML="\x26nbsp;";A.tabIndex=-1;D.appendChild(A);x=V(j+"Button");D=V(j+"ButtonOver");s=V(j+"ButtonOff");X(Y(".jw"+j+" button"),x,D,s);(x=Eb[j])&&X(Y(".jw"+j+".jwtoggle button"),
-V(x+"Button"),V(x+"ButtonOver"));j=G[j]=u}break a}break;case v:u=void 0;s=j.name;if(f&&0===s.indexOf("volume"))u=void 0;else{j=O();var D="volume"==s,y=s+("time"==s?"Slider":n)+"Cap",x=D?"Top":"Left",u=D?"Bottom":"Right",A=T(y+x,w,m,m,D),E=T(y+u,w,m,m,D),r;r=s;var z=D,K=x,B=u,S=O(),W=["Rail","Buffer","Progress"],H=void 0,I=void 0;S.className="jwrail";for(var J=0;J<W.length;J++){var I="time"==r?"Slider":n,ka=r+I+W[J],M=T(ka,w,!z,0===r.indexOf("volume"),z),N=T(ka+"Cap"+K,w,m,m,z),R=T(ka+"Cap"+B,w,m,
-m,z),P=V(ka+"Cap"+K),U=V(ka+"Cap"+B);if(M){var Q=O();Q.className="jwrailgroup "+W[J];N&&Q.appendChild(N);Q.appendChild(M);R&&(Q.appendChild(R),R.className+=" jwcap"+(z?"Bottom":"Right"));b(Y(".jwrailgroup."+W[J]),{"min-width":z?n:P.width+U.width});Q.capSize=z?P.height+U.height:P.width+U.width;b(Y("."+M.className),{left:z?n:P.width,right:z?n:U.width,top:z?P.height:n,bottom:z?U.height:n,height:z?"auto":n});2==J&&(H=Q);2==J&&!z?(M=O(),M.className="jwprogressOverflow",M.appendChild(Q),G[ka]=M,S.appendChild(M)):
-(G[ka]=Q,S.appendChild(Q))}}if(K=T(r+I+"Thumb",w,m,m,z))b(Y("."+K.className),{opacity:"time"==r?0:1,"margin-top":z?K.skin.height/-2:n}),K.className+=" jwthumb",(z&&H?H:S).appendChild(K);f?(z=new a.touch(S),z.addEventListener(a.touchEvents.DRAG_START,vb),z.addEventListener(a.touchEvents.DRAG,jb),z.addEventListener(a.touchEvents.DRAG_END,jb),z.addEventListener(a.touchEvents.TAP,Oa)):(H=r,"volume"==H&&!z&&(H+="H"),S.addEventListener("mousedown",wb(H),m));"time"==r&&!f&&(S.addEventListener("mousemove",
-Va,m),S.addEventListener("mouseout",Wa,m));r=G[r+"Rail"]=S;S=V(y+x);y=V(y+x);j.className="jwslider jw"+s;A&&j.appendChild(A);j.appendChild(r);E&&(D&&(E.className+=" jwcapBottom"),j.appendChild(E));b(Y(".jw"+s+" .jwrail"),{left:D?n:S.width,right:D?n:y.width,top:D?S.height:n,bottom:D?y.height:n,width:D?h:n,height:D?"auto":n});G[s]=j;j.vertical=D;"time"==s?(ja=new c.overlay(aa+"_timetooltip",Z),hb=new c.thumbs(aa+"_thumb"),ib=L.createElement("div"),ib.className="jwoverlaytext",sb=L.createElement("div"),
-u=hb.element(),sb.appendChild(u),sb.appendChild(ib),ja.setContents(sb),pb=r,xb(0),u=ja.element(),r.appendChild(u),G.timeSliderRail||b.style(G.time,F),G.timeSliderThumb&&b.style(G.timeSliderThumb,{"margin-left":V("timeSliderThumb").width/-2}),u=V("timeSliderCue"),x={"z-index":1},u&&u.src?(T("timeSliderCue"),x["margin-left"]=u.width/-2):x.display=t,b(Y(".jwtimeSliderCue"),x),va(0),Fa(0),Fa(0),va(0)):0===s.indexOf("volume")&&(s=j,A="volume"+(D?n:"H"),E=D?"vertical":"horizontal",b(Y(".jw"+A+".jw"+E),
-{width:V(A+"Rail",D).width+(D?0:V(A+"Cap"+x).width+V(A+"RailCap"+x).width+V(A+"RailCap"+u).width+V(A+"Cap"+u).width),height:D?V(A+"Cap"+x).height+V(A+"Rail").height+V(A+"RailCap"+x).height+V(A+"RailCap"+u).height+V(A+"Cap"+u).height:n}),s.className+=" jw"+E);u=j}j=u;break a}j=void 0}j&&("volume"==e.elements[g].name&&j.vertical?(fa=new c.overlay(aa+"_volumeOverlay",Z),fa.setContents(j)):k.appendChild(j))}}}function ba(){clearTimeout(zb);zb=setTimeout(U.redraw,0)}function Za(){!tb&&1<e.jwGetPlaylist().length&&
-(!L.querySelector("#"+e.id+" .jwplaylist")||e.jwGetFullscreen())?(b.style(G.next,j),b.style(G.prev,j)):(b.style(G.next,F),b.style(G.prev,F))}function Ia(b,c){Ja||(Ja=a.bounds(P));c.constrainX(Ja,!0)}function va(a){G.timeSliderBuffer&&(a=Math.min(Math.max(0,a),1),b.style(G.timeSliderBuffer,{width:(100*a).toFixed(1)+"%",opacity:0<a?1:0}))}function Ya(a,c){if(G[a]){var d=G[a].vertical,e=a+("time"===a?"Slider":n),f=100*Math.min(Math.max(0,c),1)+"%",h=G[e+"Progress"],e=G[e+"Thumb"],j;h&&(j={},d?(j.height=
-f,j.bottom=0):j.width=f,"volume"!==a&&(j.opacity=0<c||xa?1:0),b.style(h,j));e&&(j={},d?j.top=0:j.left=f,b.style(e,j))}}function Sa(a){Ya("volume",a);Ya("volumeH",a)}function Fa(a){Ya("time",a)}function V(a){var b="controlbar",c=a;0===a.indexOf("volume")&&(0===a.indexOf("volumeH")?c=a.replace("volumeH","volume"):b="tooltip");return(a=Z.getSkinElement(b,c))?a:{width:0,height:0,src:n,image:void 0,ready:m}}function M(){clearTimeout(fb);fb=void 0}function Q(){clearTimeout(gb);gb=void 0}function da(b){b=
-(new g.parsers.srt).parse(b.responseText,!0);if(a.typeOf(b)!==y)return ea("Invalid data");U.addCues(b)}function ea(b){a.log("Cues failed to load: "+b)}var Z,rb=u("divider","divider"),ta={margin:8,maxwidth:800,font:"Arial,sans-serif",fontsize:11,fontcolor:15658734,fontweight:"bold",layout:{left:{position:"left",elements:[u("play",p),u("prev",p),u("next",p),u("elapsed",q)]},center:{position:"center",elements:[u("time",v),u("alt",q)]},right:{position:"right",elements:[u("duration",q),u("hd",p),u("cc",
-p),u("mute",p),u("volume",v),u("volumeH",v),u("cast",p),u("fullscreen",p)]}}},la,ca,G,Ha,P,aa,qa,ra,ia,Ta,ga,La,Ga,Ma={},fa,Ja,pb,Xa,ja,sb,hb,ib,mb,gb,na,nb,fb,oa,zb,ab=-1,ua=m,sa=m,tb=m,ub=m,xa=w,ob=0,eb=[],bb,Eb={play:"pause",mute:"unmute",fullscreen:"normalscreen"},lb={play:m,mute:m,fullscreen:m},yb={play:function(){lb.play?e.jwPause():e.jwPlay()},mute:function(){var a=!lb.mute;e.jwSetMute(a);!a&&0===Ga&&e.jwSetVolume(20);x()},fullscreen:function(){e.jwSetFullscreen()},next:function(){e.jwPlaylistNext()},
-prev:function(){e.jwPlaylistPrev()},hd:Aa,cc:Qa,cast:function(){Ma.active?e.jwStopCasting():e.jwStartCasting()}},db={time:function(a){bb?(a=bb.position,a="%"===a.toString().slice(-1)?qa*parseFloat(a.slice(0,-1))/100:parseFloat(a)):a*=qa;e.jwSeek(a)},volume:function(a){Sa(a);0.1>a&&(a=0);0.9<a&&(a=1);e.jwSetVolume(100*a)}},kb={},Ua=[],U=a.extend(this,new d.eventdispatcher),xb,Ab,Fb=function(a){b.style(ja.element(),{width:a});Ia("time",ja)};xb=function(c){var d=hb.updateTimeline(c,Fb);if(bb){if((c=
-bb.text)&&c!==Ab)Ab=c,b.style(ja.element(),{width:32<c.length?160:n})}else c=a.timeFormat(c),d||b.style(ja.element(),{width:n});ib.innerHTML!==c&&(ib.innerHTML=c);Ia("time",ja)};U.setText=function(a){b.block(aa);var c=G.alt,d=G.time;G.timeSliderRail?b.style(d,a?F:r):b.style(d,F);c&&(b.style(c,a?r:F),c.innerHTML=a||n);ba()};var Ka={};U.redraw=function(c){b.block(aa);c&&U.visible&&U.show(D);S();var d=top!==window.self&&a.isMSIE();c=Ma.active;b.style(G.fullscreen,{display:ua||c||ub||d?t:n});b.style(G.volumeH,
-{display:ua||sa?"block":t});(d=la.maxwidth|0)&&P.parentNode&&a.isIE()&&(!ua&&P.parentNode.clientWidth>d+(la.margin|0)?b.style(P,{width:d}):b.style(P,{width:n}));fa&&b.style(fa.element(),{display:!ua&&!sa?"block":t});b.style(G.hd,{display:!ua&&!c&&!sa&&ia&&1<ia.length&&na?n:t});b.style(G.cc,{display:!ua&&!c&&!sa&&ga&&1<ga.length&&oa?n:t});ya();b.unblock(aa);U.visible&&(c=V("capLeft"),d=V("capRight"),c={left:Math.round(a.parseDimension(Ka.left.offsetWidth)+c.width),right:Math.round(a.parseDimension(Ka.right.offsetWidth)+
-d.width)},b.style(Ka.center,c))};U.audioMode=function(a){void 0!==a&&a!==ua&&(ua=!!a,ba());return ua};U.instreamMode=function(a){void 0!==a&&a!==sa&&(sa=!!a,b.style(G.cast,sa?F:j));return sa};U.adMode=function(a){if(void 0!==a&&a!==tb){tb=!!a;if(a){var c=Ua,d=c.indexOf(G.elapsed);-1<d&&c.splice(d,1);c=Ua;d=c.indexOf(G.duration);-1<d&&c.splice(d,1)}else c=Ua,d=G.elapsed,-1===c.indexOf(d)&&c.push(d),c=Ua,d=G.duration,-1===c.indexOf(d)&&c.push(d);b.style([G.cast,G.elapsed,G.duration],a?F:j);Za()}return tb};
-U.hideFullscreen=function(a){void 0!==a&&a!==ub&&(ub=!!a,ba());return ub};U.element=function(){return P};U.margin=function(){return parseInt(la.margin,10)};U.height=function(){return Ha};U.show=function(c){if(!U.visible||c)U.visible=!0,b.style(P,{display:"inline-block"}),Ja=a.bounds(P),ka(),b.block(aa),x(),ba(),clearTimeout(ab),ab=-1,ab=setTimeout(function(){b.style(P,{opacity:1})},0)};U.showTemp=function(){this.visible||(P.style.opacity=0,P.style.display="inline-block")};U.hideTemp=function(){this.visible||
-(P.style.display=t)};U.addCues=function(b){a.foreach(b,function(a,b){if(b.text){var c=b.begin,d=b.text;if(/^[\d\.]+%?$/.test(c.toString())){var e=T("timeSliderCue"),f=G.timeSliderRail,j={position:c,text:d,element:e};e&&f&&(f.appendChild(e),e.addEventListener("mouseover",function(){bb=j},!1),e.addEventListener("mouseout",function(){bb=w},!1),eb.push(j))}ya()}})};U.hide=function(){U.visible&&(U.visible=!1,b.style(P,{opacity:0}),clearTimeout(ab),ab=-1,ab=setTimeout(function(){b.style(P,{display:t})},
-250))};G={};aa=e.id+"_controlbar";qa=ra=0;P=O();P.id=aa;P.className="jwcontrolbar";Z=e.skin;ca=Z.getComponentLayout("controlbar");ca||(ca=ta.layout);a.clearCss(Y());b.block(aa+"build");S();var Bb=T("capLeft"),Cb=T("capRight"),Db=T("background",{position:"absolute",left:V("capLeft").width,right:V("capRight").width,"background-repeat":"repeat-x"},D);Db&&P.appendChild(Db);Bb&&P.appendChild(Bb);$a("left");$a("center");$a("right");P.appendChild(Ka.left);P.appendChild(Ka.center);P.appendChild(Ka.right);
-G.hd&&(na=new c.menu("hd",aa+"_hd",Z,wa),f?za(na,G.hd,R,"hd"):Pa(na,G.hd,R,qb),kb.hd=na);G.cc&&(oa=new c.menu("cc",aa+"_cc",Z,Ra),f?za(oa,G.cc,Ba,"cc"):Pa(oa,G.cc,Ba,pa),kb.cc=oa);G.mute&&(G.volume&&G.volume.vertical)&&(fa=new c.overlay(aa+"_volumeoverlay",Z),fa.setContents(G.volume),Pa(fa,G.mute,Ca),kb.volume=fa);b.style(Ka.right,{right:V("capRight").width});Cb&&P.appendChild(Cb);b.unblock(aa+"build");e.jwAddEventListener(d.JWPLAYER_MEDIA_TIME,A);e.jwAddEventListener(d.JWPLAYER_PLAYER_STATE,function(a){switch(a.newstate){case k.BUFFERING:case k.PLAYING:G.timeSliderThumb&&
-b.style(G.timeSliderThumb,{opacity:1});ma("play",D);break;case k.PAUSED:xa||ma("play",m);break;case k.IDLE:ma("play",m),G.timeSliderThumb&&b.style(G.timeSliderThumb,{opacity:0}),G.timeRail&&(G.timeRail.className="jwrail"),va(0),A({position:0,duration:0})}});e.jwAddEventListener(d.JWPLAYER_PLAYLIST_ITEM,function(b){if(!sa){b=e.jwGetPlaylist()[b.index].tracks;var c=m,d=G.timeSliderRail;a.foreach(eb,function(a,b){d.removeChild(b.element)});eb.length=0;if(a.typeOf(b)==y&&!f)for(var j=0;j<b.length;j++)if(!c&&
-(b[j].file&&b[j].kind&&"thumbnails"==b[j].kind.toLowerCase())&&(hb.load(b[j].file),c=D),b[j].file&&b[j].kind&&"chapters"==b[j].kind.toLowerCase()){var h=b[j].file;h?a.ajax(h,da,ea,D):eb.length=0}c||hb.load()}});e.jwAddEventListener(d.JWPLAYER_MEDIA_MUTE,x);e.jwAddEventListener(d.JWPLAYER_MEDIA_VOLUME,x);e.jwAddEventListener(d.JWPLAYER_MEDIA_BUFFER,function(a){va(a.bufferPercent/100)});e.jwAddEventListener(d.JWPLAYER_FULLSCREEN,function(a){ma("fullscreen",a.fullscreen);Za();U.visible&&U.show(D)});
-e.jwAddEventListener(d.JWPLAYER_PLAYLIST_LOADED,z);e.jwAddEventListener(d.JWPLAYER_MEDIA_LEVELS,function(a){ia=a.levels;if(!sa&&ia&&1<ia.length&&na){b.style(G.hd,j);na.clearOptions();for(var c=0;c<ia.length;c++)na.addOption(ia[c].label,c);H(a)}else b.style(G.hd,F);ba()});e.jwAddEventListener(d.JWPLAYER_MEDIA_LEVEL_CHANGED,H);e.jwAddEventListener(d.JWPLAYER_CAPTIONS_LIST,function(a){ga=a.tracks;if(!sa&&ga&&1<ga.length&&oa){b.style(G.cc,j);oa.clearOptions();for(var c=0;c<ga.length;c++)oa.addOption(ga[c].label,
-c);J(a)}else b.style(G.cc,F);ba()});e.jwAddEventListener(d.JWPLAYER_CAPTIONS_CHANGED,J);e.jwAddEventListener(d.JWPLAYER_RESIZE,function(){Ja=a.bounds(P);0<Ja.width&&U.show(D)});e.jwAddEventListener(d.JWPLAYER_CAST_AVAILABLE,W);e.jwAddEventListener(d.JWPLAYER_CAST_SESSION,E);f||(P.addEventListener("mouseover",function(){I.addEventListener("mousedown",Da,m)},!1),P.addEventListener("mouseout",function(){I.removeEventListener("mousedown",Da);L.onselectstart=null},!1));setTimeout(x,0);z();U.visible=!1;
-W({available:s})};b("span.jwcontrolbar",{position:"absolute",margin:"auto",opacity:0,display:t});b("span.jwcontrolbar span",{height:h});a.dragStyle("span.jwcontrolbar span",t);b("span.jwcontrolbar .jwgroup",{display:"inline"});b("span.jwcontrolbar span, span.jwcontrolbar .jwgroup button,span.jwcontrolbar .jwleft",{position:"relative","float":"left"});b("span.jwcontrolbar .jwright",{position:"relative","float":"right"});b("span.jwcontrolbar .jwcenter",{position:"absolute"});b("span.jwcontrolbar buttoncontainer,span.jwcontrolbar button",
-{display:"inline-block",height:h,border:t,cursor:"pointer"});b("span.jwcontrolbar .jwcapRight,span.jwcontrolbar .jwtimeSliderCapRight,span.jwcontrolbar .jwvolumeCapRight",{right:0,position:"absolute"});b("span.jwcontrolbar .jwcapBottom",{bottom:0,position:"absolute"});b("span.jwcontrolbar .jwtime",{position:"absolute",height:h,width:h,left:0});b("span.jwcontrolbar .jwthumb",{position:"absolute",height:h,cursor:"pointer"});b("span.jwcontrolbar .jwrail",{position:"absolute",cursor:"pointer"});b("span.jwcontrolbar .jwrailgroup",
-{position:"absolute",width:h});b("span.jwcontrolbar .jwrailgroup span",{position:"absolute"});b("span.jwcontrolbar .jwdivider+.jwdivider",{display:t});b("span.jwcontrolbar .jwtext",{padding:"0 5px","text-align":"center"});b("span.jwcontrolbar .jwcast",{display:t});b("span.jwcontrolbar .jwcast.jwcancast",{display:"block"});b("span.jwcontrolbar .jwalt",{display:t,overflow:"hidden"});b("span.jwcontrolbar .jwalt",{position:"absolute",left:0,right:0,"text-align":"left"},D);b("span.jwcontrolbar .jwoverlaytext",
-{padding:3,"text-align":"center"});b("span.jwcontrolbar .jwvertical *",{display:"block"});b("span.jwcontrolbar .jwvertical .jwvolumeProgress",{height:"auto"},D);b("span.jwcontrolbar .jwprogressOverflow",{position:"absolute",overflow:"hidden"});e("span.jwcontrolbar","opacity .25s, background .25s, visibility .25s");e("span.jwcontrolbar button","opacity .25s, background .25s, visibility .25s");e("span.jwcontrolbar .jwtoggling",t)})(window.jwplayer);
-(function(g){var l=g.utils,c=g.events,a=c.state,d=g.playlist,k=!0,b=!1;g.html5.controller=function(e,f){function C(){return e.getVideo()}function p(a){K.sendEvent(a.type,a)}function q(a){t(k);switch(l.typeOf(a)){case "string":var b=new d.loader;b.addEventListener(c.JWPLAYER_PLAYLIST_LOADED,function(a){q(a.playlist)});b.addEventListener(c.JWPLAYER_ERROR,function(a){q([]);a.message="Could not load playlist: "+a.message;p(a)});b.load(a);break;case "object":case "array":e.setPlaylist(new g.playlist(a));
-break;case "number":e.setItem(a)}}function v(d){l.exists(d)||(d=k);if(!d)return h();try{0<=j&&(q(j),j=-1);if(!y&&(y=k,K.sendEvent(c.JWPLAYER_MEDIA_BEFOREPLAY),y=b,L)){L=b;s=null;return}if(e.state==a.IDLE){if(0===e.playlist.length)return b;C().load(e.playlist[e.item])}else e.state==a.PAUSED&&C().play();return k}catch(f){K.sendEvent(c.JWPLAYER_ERROR,f),s=null}return b}function t(d){s=null;try{return e.state!=a.IDLE?C().stop():d||(I=k),y&&(L=k),k}catch(f){K.sendEvent(c.JWPLAYER_ERROR,f)}return b}function h(d){s=
-null;l.exists(d)||(d=k);if(!d)return v();try{switch(e.state){case a.PLAYING:case a.BUFFERING:C().pause();break;default:y&&(L=k)}return k}catch(f){K.sendEvent(c.JWPLAYER_ERROR,f)}return b}function m(a){l.css.block(e.id+"_next");q(a);v();l.css.unblock(e.id+"_next")}function D(){m(e.item+1)}function w(){e.state==a.IDLE&&(I?I=b:(s=w,e.repeat?D():e.item==e.playlist.length-1?(j=0,t(k),setTimeout(function(){K.sendEvent(c.JWPLAYER_PLAYLIST_COMPLETE)},0)):D()))}function n(a){return function(){r?F(a,arguments):
-B.push({method:a,arguments:arguments})}}function F(a,b){var c=[],d;for(d=0;d<b.length;d++)c.push(b[d]);a.apply(this,c)}var r=b,j=-1,y=b,s,I=b,L,B=[],K=l.extend(this,new c.eventdispatcher(e.id,e.config.debug));this.play=n(v);this.pause=n(h);this.seek=n(function(b){e.state!=a.PLAYING&&v(k);C().seek(b)});this.stop=function(){e.state==a.IDLE&&(I=k);n(t)()};this.load=n(q);this.next=n(D);this.prev=n(function(){m(e.item-1)});this.item=n(m);this.setVolume=n(e.setVolume);this.setMute=n(e.setMute);this.setFullscreen=
-n(function(a){f.fullscreen(a)});this.detachMedia=function(){try{return e.getVideo().detachMedia()}catch(a){return null}};this.attachMedia=function(a){try{e.getVideo().attachMedia(a),"function"==typeof s&&s()}catch(b){return null}};this.setCurrentQuality=n(function(a){C().setCurrentQuality(a)});this.getCurrentQuality=function(){return C()?C().getCurrentQuality():-1};this.getQualityLevels=function(){return C()?C().getQualityLevels():null};this.setCurrentCaptions=n(function(a){f.setCurrentCaptions(a)});
-this.getCurrentCaptions=function(){return f.getCurrentCaptions()};this.getCaptionsList=function(){return f.getCaptionsList()};this.checkBeforePlay=function(){return y};this.playerReady=function(a){if(!r){f.completeSetup();K.sendEvent(a.type,a);g.utils.exists(g.playerReady)&&g.playerReady(a);e.addGlobalListener(p);f.addGlobalListener(p);K.sendEvent(g.events.JWPLAYER_PLAYLIST_LOADED,{playlist:g(e.id).getPlaylist()});K.sendEvent(g.events.JWPLAYER_PLAYLIST_ITEM,{index:e.item});q();e.autostart&&!l.isMobile()&&
-v();for(r=k;0<B.length;)a=B.shift(),F(a.method,a.arguments)}};e.addEventListener(c.JWPLAYER_MEDIA_BUFFER_FULL,function(){C().play()});e.addEventListener(c.JWPLAYER_MEDIA_COMPLETE,function(){setTimeout(w,25)});e.addEventListener(c.JWPLAYER_MEDIA_ERROR,function(a){a=l.extend({},a);a.type=c.JWPLAYER_ERROR;K.sendEvent(a.type,a)})}})(jwplayer);(function(g){g.html5.defaultskin=function(){return g.utils.parseXML('\x3c?xml version\x3d"1.0" ?\x3e\x3cskin author\x3d"JW Player" name\x3d"Six" target\x3d"6.7" version\x3d"3.0"\x3e\x3ccomponents\x3e\x3ccomponent name\x3d"controlbar"\x3e\x3csettings\x3e\x3csetting name\x3d"margin" value\x3d"10"/\x3e\x3csetting name\x3d"maxwidth" value\x3d"800"/\x3e\x3csetting name\x3d"fontsize" value\x3d"11"/\x3e\x3csetting name\x3d"fontweight" value\x3d"normal"/\x3e\x3csetting name\x3d"fontcase" value\x3d"normal"/\x3e\x3csetting name\x3d"fontcolor" value\x3d"0xd2d2d2"/\x3e\x3c/settings\x3e\x3celements\x3e\x3celement name\x3d"background" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAeCAYAAADtlXTHAAAANklEQVR4AWMUFRW/x2RiYqLI9O3bNwam////MzAxAAGcAImBWf9RuRAxnFyEUQgDCLKATLCDAFb+JfgLDLOxAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"capLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAeCAYAAAARgF8NAAAAr0lEQVR4AWNhAAJRUXEFIFUOxNZAzMOABFiAkkpAeh0fH5+IgoKCKBsQoCgA4lJeXl5ReXl5qb9//zJ8+/aNAV2Btbi4uOifP39gYhgKeFiBAEjjUAAFlCn4/5+gCf9pbwVhNwxhKxAm/KdDZA16E778/v37DwsLKwsuBUdfvXopISUlLYpLQc+vX78snz17yigqKibAAgQoCuTlFe4+fPggCKio9OnTJzZAMW5kBQAEFD9DdqDrQQAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"capRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAeCAYAAAARgF8NAAAArklEQVR4Ad2TMQrCQBBF/y5rYykEa++QxibRK3gr0dt4BPUSLiTbKMYUSlgt3IFxyogJsRHFB6/7/A+7jIqiYYZnvLgV56IzcRyPUOMuOOcGVVWNAcxUmk4ZNZRS0Fojz/O9936lkmTCaICIgrV2Z9CCMaYHoK/RQWfAMHcEAP7QxPsNAP/BBDN/+7N+uoEoEIBba0NRHM8A1i8vSUJZni4hhAOAZdPxXsWNuBCzB0E+V9jBVxF8AAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"playButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAeCAQAAACcJxZuAAAAtElEQVR4AWOgLRgFnAyiDPwMzMRrkHuwuCSdQZ14Tbpv9v/cf2UN8ZoMHu5/uP/l/h9EazK4sx8Cn+7/RpQmg+v74RBo11eCmgwu7keFd/d/wavJ4PR+THhj/6f9N1ZODWTgxKLhyH7scMvK3iCsGvbtx4Tz1oZn4HTSjv2ocObakAy8nt60HwGnrA3KIBisa/dD4IS1/lDFBJLGiv0r9ves9YUpJpz4Ji72hiomNXnTH4wCAAxXpSnKMgKaAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"playButtonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAeCAQAAACcJxZuAAAAtElEQVR4AWOgLRgFPAwyDCIMLMRr0Hhws6SLwYR4TTZv/v/8f+UZ8ZocHv5/+P/l/x9Ea3K48x8Cn/7/RpQmh+v/4RBo11eCmhwu/keFd/9/wavJ4fR/THjj/6f/Nx5OzWHgwaLhyH/scMuj3lysGvb9x4Tznod343TSjv+ocObzkG68nt70HwGnPA/qJhisa/9D4ITn/lDFBJLGiv8r/vc894UpJpz4Jt7yhiomNXnTH4wCAHC8wQF60KqlAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"pauseButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAeCAQAAACcJxZuAAAAYElEQVR4AWOgNRgFPAwqDAZAqAJkofPhgBFJg8r/2VDBVIY7GHwoYEG24RmchcnHpoHhDxDj4WNq+I0m+ZvqGn6hSf6iuoafaJI/SbaB7hroHw9f/sBZ6HzSkzdtwSgAADNtJoABsotOAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"pauseButtonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAeCAQAAACcJxZuAAAAWklEQVR4AWOgNRgFAgwGDA5AaABkofOxAoP/UMBggMGHAxZkG57BWeh87BoY/gAxHj6mht9okr+pruEXmuQvqmv4iSb5k2Qb6K6B/vHw4Q+chc4nPXnTFowCADYgMi8+iyldAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"prevButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAAeCAQAAACLBYanAAAAmElEQVR4AWMYMDAKeBgkgBgGmBn4GUQZONEVqfzfz6ACV6Bekv5gMYMcuiKDR/sZDGAKrqz5sf/lfgZdDEW39jPYQxR82/94/y0gZDDAUHR+f3rpjZWf99/efx4CsSk6sj+pbMvKI/vhEJuiXWDrQjNmr921HwyxKVoPd3hAxsS16/evx+JwleUoQeCbMRkRBIQDk/5gFAAAvD5I9xunLg8AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"prevButtonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAAeCAQAAACLBYanAAAAmUlEQVR4AWMYMDAKBBgUgBgGWBhEGGQYeNAVGfz/z2AAV2BS0vXgJoMGuiKHR/8ZHGAKrjz78f/lfwYbDEW3/jOEQBR8+//4/y0gZHDAUHT+f/qcGw8//7/9/zwEYlN05H/S3C2PjvyHQ2yKdoGtC+2e/XzXfzDEpmg93OEB3ROfr/+/HovDDZajBIFv9+RbDBpEByb9wSgAAHeuVc8xgA8jAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"nextButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABEAAAAeCAQAAABgMj2kAAAAlUlEQVR4AWOgAxgFnAyiDPwMzHA+D4MEEKMAuQeLS9IZ1OHKVP7vZ1BBVaL7cv+P/VfWwJUZPNrPYICqxODW/lv7H+//BlNmfwtTyfn9EHh7/+f9N1aml57HVHJkPwJuWZlUdgRTya79EDh7bWgGyKJdGEp01+9fv3/i2oAMmHPXYyiRm7zYNwPZ08vBniYcdDQHowAA/MZI93f1cSkAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"nextButtonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABEAAAAeCAQAAABgMj2kAAAAlUlEQVR4AWOgAxgFPAwyDCIMLHC+AIMCEKMAjQc3S7oYTODKDP7/ZzBAVWLz8v+P/1eewZU5PPrP4ICqxOHW/1v/H///BlMWcgtTyfn/EHj7/+f/Nx6mzzmPqeTIfwTc8ihp7hFMJbv+Q+Ds56HdIIt2YSixWf9//f+JzwO6Yc5dj6FEY/It325kTy8He5pw0NEcjAIAWP9Vz4mR7dgAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"elapsedBackground" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAYAAAAeCAYAAAAPSW++AAAAD0lEQVQoU2NgGAWjYKQAAALuAAGL6/H9AAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"durationBackground" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAYAAAAeCAYAAAAPSW++AAAAD0lEQVQoU2NgGAWjYKQAAALuAAGL6/H9AAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"timeSliderCapLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAMAAAAeCAYAAADpYKT6AAAAFElEQVR42mP4//8/AwwzjHIGhgMAcFgNAkNCQTAAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"timeSliderCapRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAMAAAAeCAYAAADpYKT6AAAAFElEQVR42mP4//8/AwwzjHIGhgMAcFgNAkNCQTAAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"timeSliderRail" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAeCAYAAADtlXTHAAAALklEQVQI12NgIBmIior/ZxIVFWNgAgI4wcjAxMgI4zIyMkJYYMUM////5yXJCgBxnwX/1bpOMAAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"timeSliderRailCapLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAeCAYAAADkftS9AAAAnUlEQVR42t3NSwrCMBSF4TsQBHHaaklJKRTalKZJ+lAXoTPBDTlyUYprKo6PN4F2D3rgm/yQG/rfRdHuwp5smsNdCImiKKFUAx/OaSpR1xpNYwKK4/2rLBXa1s1CnIxxsLZbhGhtD+eGBSWJePt7fX9YUFXVVylzdN2IYTgGBGCVZfmDQWuDcTyB/ACsOdz8Kf7jQ/P8C7ZhW/rlfQGDz0pa/ncctQAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"timeSliderRailCapRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAeCAYAAADkftS9AAAAn0lEQVR42t3MTwqCQBTH8bcIgmirJYoiCOowzh8ds0PULjpRqw5VdCZr/WueMJfwC5/NezOP1lcUHWbv5V0o1LYSVVUjTXP4xYM4KTWYEB2ybFlcSSmLoK4F4vj4JmN6BFpbHs5krUNgzMDDLw3DCQHfTZL0Q85NYH0/Is9LNI240Tie0XUaRVGyJ4AN+Rs//qKUuQPYEgdg7+2WF2voDzqVSl5A2koAAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"timeSliderBuffer" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAeCAYAAADtlXTHAAAAKElEQVQI12NgIA/IyMj9Z2JhYWFgAgIGJkZGRhDBwMDEwMAI5TKQDwCHIAF/C8ws/gAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"timeSliderBufferCapLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAeCAYAAADkftS9AAAAY0lEQVR42uXJyxGAIAxFUfrgI5CgzajdqlWxQffxaeiCzJyZ5MYMNtb6zTl/OhfuP2BZQ4h1mpLEmOWPCMd3pESSM2vE0YiKdBqJuDEXUT0yzydIp7GUZYMKAhr7Y4cLHjPGvMB5JcRMsOVwAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"timeSliderBufferCapRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAeCAYAAADkftS9AAAAYElEQVQoz+WLyxGAIAwF6YM/CdqMlCtdcRHvMSIw9sCb2ctuIsQaU8pUpfQppT6mdC6QtZ6McYUPUpMhIHkP9EYOuUmASAOOV5OIkQYAWLvc6Mf3HuNOncKkIW8mT7HOHpUUJcPzmTX0AAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"timeSliderProgress" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAeCAQAAABHnLxMAAAAH0lEQVQI12NgIAT+/2e6x8D0k4HpOxj9AJM/CWpjAACWQgi68LWdTgAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"timeSliderProgressCapLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAeCAQAAABOdxw2AAAARUlEQVQYV2NkgANG+jP/+zJkMtgCmf99vi38KPQTJPpq6xsvqIKznxh4ocwjCOaebQyeUOZmX4YFDEJQw9b4QQ2DAfoyAVkTEmC7RwxJAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"timeSliderProgressCapRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAeCAQAAABOdxw2AAAASklEQVQYV8XLIRKAMAxE0R4QbhrXoQqJxWJxCGZqaKs/m1yi+80TSUqzRmNjCd48jMoqXnhvEU+iTzyImrgT+UFG1exv1q2YY95+oTIxx/xENX8AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"timeSliderThumb" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAeCAQAAACP8FaaAAABMElEQVR4AeWSv0rzYBjFfy1NlU5RKC3dCjqZDwRXEapOuuik+BfbNLdUeg86pHSrm1Z3G3w7VAdbB+sNFFKIZ1FCjTjL95wQOOd3IC/vE/6vSZEmQ5Z5KUtGLhWjshYLbHCIKx2wLmcp/cJzOFTb/vtoGk7D8bDtc4GjNP2J/+ENzFv0FBnpORpHA4OnVBWwKFANTD96jKkfBYYqRVFyVC5bCr/pqsWmKDZHd8Okwv2IY1HyuL0wqRCE1EUp/lR4mFAT1XNym/iJ7pBTCpBnp5l4yGaLXVFsVqh1zCzuGGoiNuQoUcG7NjPYU1oSxVKrzDZuw+++BtPe5Oal4eOypdQWRVfNoswa+5xTl87YkysrjW3DpsQyDquSw5KcjXB83TlFeYoU9LbltO7ff5i/Mh+pOuncDFLYKwAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"timeSliderCue" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAeCAYAAAAl+Z4RAAAAcUlEQVQ4y2NgGAWjYBTgBaKi4llAfASKs0jWbGNj96S1tf03CIPYJBkCsrW6uu53bm7+fxAGsUFiJBmQlpbxOzMz5z8Ig9hAsaMkecHIyORJUlLq78TElN8gNlAsm9RwyAbZCsSHgDhzNFmNglGAHwAAo/gvURVBmFAAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"hdButtonOff" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB0AAAAeCAYAAADQBxWhAAABf0lEQVR42u2VvUoDQRSFA0awMIVCsv+z/1oE8yOE9MYmtb2P4AspSOyECFZqtU9gbZvK6CNoNZ6zMMuSQpxdEAJbHC737pz59mbmblpSyn9XA22gDXRLod2uMYfWkKwh+uc60LVtO9J1RWXBn4N1oNL3QxkEEcwuzYybOWMh07QJ4xqK/ryuBQ3DWEZRoowdx3FfhAgkI3NVp7IsO5xMpnPDsFae59NHvzaURgWlWpblPEOSkbmqQzfQK2DT8fj0HB0rrz40jlOqgA4Go1m/f3LJWIYC8uQ4nkSX94vF3S5qX8qrDU2SlCqgOMMrAK4Zy1B27nlCIj4i34G+lbcC9ChXuSNeFEbmpZe5RZdv+BU4ZjM8V159aJoe5yp3JIS/eaZcv7dcPhzghc6Qr3DZlLc6FOelRoTn9OvI4DKxw2rQXs/84KzRyLPhTSSQGzIyV2OBdYzIYz4rgKxjn88/Q4fD0QUNNT6BBL5zH50Pfhvahzo1RH+7+WtroA10O6E/bVCWtAEB8p4AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"hdButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB0AAAAeCAQAAAB6Dt0qAAABPUlEQVR4Ae2SsUrDUBiF/0EFfYK8Rl4g5BUUHGILRWghUHAQHJzaUcjSgB1EtCApliDoUApSKggZRFSUQsVAawspElz1OunxhwtZcm0Ht9LzQfLByVluLs145lkkjXQyyPwTg3uNv0tFKzuR+MAkIlF2eJyKPhBjRBMZYyBIp1SMEV6nMgIZlIoZQkJuIw7RiMll36XN5e31k0AkramYdiGhQjPsohlSgT13GTy8WXurR0mrmt5BQla+ZJ/mS2SxF8+GT7joLRRvvmWrnAaQULbi1R4rHmXZi/VhAO9laev6R7bKaQcSsv3+Lfw+2ey548B/t/Yz3pVs1dMWJORW4xaqfEzsfEwrO2te5ytpFVPjHJJntPnZ5jc708M9muwS1c/Ra8LHNGrKK6FlnENRxyQOPjcc0v5z/Wc68/wCXWlzVKUYIC4AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"ccButtonOff" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB0AAAAeCAYAAADQBxWhAAABzUlEQVR42u1Uu0oDQRQVTCMopMjmtZvdJPswKCQbC6tYCEqMBDUGrf2NCDF+gmXEyiZWiTb+gMTGxtrGwmh8IOKjUoLjueNGfCBk10rYC4eZOey5Z+7M3O1zww033Og5BCGQA9oAcw6uz9kxbYfDIpMk2TGg58Z2TJmixFg0GueIRBQWDIZ5BX5/kIli5AcfCIS6PIH0nLdlGoupLB7XmCxHyegymTSXa7UdoVBYHBVFqQEDMjozzfRCvd7w5fNzKfD74ElHevumEHKEQiJD4nmYz4JvwWirWt30YiO36fTYNKotgj8Hv1GprPvAP1obtm+qqjqBhC/l8toAkh18uqs7rK8ZY/0Yj8AT90o80LG09k01TQe48Bnw4O6asqzw5DjGXVR2Qt9iPLb4Dh07NnGvqhq0jkwNQvehTCYSI0tIeIWqtq1jfAA/bhiJFcxvcPzVUmlVwPwJVZLWvqmuD3MgGYlbGHPN5qE3m52JYU0PifhTGEwRn8lMaFjvYVNdrXNT7BjGX1tGkvgL/dYyxMv0vTNTahH02ocY1cBEpTbgeL8z41eeNKSn6+jZNJUyiyT4y28Q+gvK07MpWsEDDAJDzsH1nj433HDjX8YbqHFYmhICTLsAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"ccButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB0AAAAeCAQAAAB6Dt0qAAABWElEQVR4AWMY5mAUsDJIMBgy2DE44IR2QHkJoDoMINHQ/eTbl//44JNvDd1AzRjA8N63p/+f4IVP/9/7BrQZA9g9/H+fIHz4H+hsDOBw6z8EnvqZsJ6vznDCkke3/h/9Hr2ap9Z08oqnMFkGByxaL/+HwMiVafNufFl+hWvmiR+BC/IX3/yy4Bz/nJN/wbLYtZ75D4In/3GV7n56/v+1/zd/H/rGkHPgJYh94/fp/2B57FqP/AfBg/84SlY/O/L/8P+JLze/Z8je8PrI/0P/Jrza+Rcsj13r3v8guO9/+LKEhZu+9lzmn7zrl++c9BWbv7WfE5iy/S9YHrvWbf8hcP+P0FVsVSo9y57s+L/vm/9ytiqtvhVANlgWq1a79f8hcDPQR9eBAbIHyN7y/yyQfQnEhkCskWM4/9uq/4TgfKxJQiK6e/a3pf/xwZlfo4AJkZLkP6zBKAAAGMt/2TouFxQAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"muteButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABoAAAAeCAQAAACY0sZTAAABZ0lEQVR4AWMYjGAUMDEwMzCSpoUxju+kDQMXAW1AaRYGdiCGsFjchd/OWmELFMGrhd1a4UUTAy+QzXLSdKMhA1+Z/tuF0qIMTLjdz9tp+27ly/0M4kBbWGdqv1/gJcMgdLz6YAA2u9gYhBgkGGR2pH3ZfWf/1f0Mshdsk8UZBDYlXMthEJhqfbuVgQ9Tk9D//SD4dv/F/eeBkEHuaNjjegYBT/k78xiEOcWuLWIQxtQkcWI/MmSQYhC/shioUPjUAhB5cgFWTQf3I0MGaQ6JwyBNIofBmsAkpvN27UeGDPI349dXMghEKu2byyAsKLZ/IYMQzoBoTNm4e8v+LcCA2GBoKsQgcDFjcRqDwBr7dU0MfLiDnCfaavHKdaAgZ2ZgXWd4cZ6eJIPQ5YYZXgzseCNXQ35GPSRyt+lVaTLwTTA9NJdTmIGJ2GTEzMCSKPZifoklpj14jTDj6jJj4CI5nYOzxkCCUQAAMVp+znQAUSsAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"muteButtonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABoAAAAeCAQAAACY0sZTAAABfUlEQVR4AWMYjGAUsDJwMLCQpoXRTnZZIoM0AzMBZQzcDCIMXEAWC5Dk0tZ6fK0uFyiCBzAziCh5Xd7PoAJkc64I7QxhUPWLf/yQ3xjoTByAjUExrvzB+5f/GewYOBn4cgOf3ddxYNDftH1OCza7BBgMGBwYfCas/fjnzv+r/xn8NiXYGTJoTZ25ZymDTn7W8UMMapiaDP6Dwdv/F/+fB0KGgJXtF3YyaGp7XLrLYMhqce4hgyGmJocT/5EhgxuD7ZknDEYMJgcfMBgzGB8AkZiaDv5HhgzuLPa7nwBNN90N1gQmMZ236z8yZAjcN3H+JgZNM+8tQOdxWm17yGCAMyBSV6//s+X/lv8Mvv2BChoM2hsXd89n0GnKn7+PQRV3kCvYlsx6v+4/gy0DOwNvU8SJO1LWDAb791bUMgjji1xhMc/u3QzKoMid6hPtxaCakrbzDqsBAytxyYgZmFQ5bfXu3Q1Lx7QHrxHykgWRDFJAA0gCLAzsQC0DCUYBAC3AlmbNhvr6AAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"unmuteButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABoAAAAeCAQAAACY0sZTAAAAiklEQVR4AWMYWWAUMDKwMLADMUla2K0VnjUx8BKvhYmBt83m3cp3+xnEiFHOxiDEIMEgsz3l6+5H++/sB7KJAEL/94Pgu/1X918GQuI0SZzcjwSJ1XRgPxIk1nnb9iNBoCYSAqI6ZdXOtfvXAjWREuQ84VZzVi4DBjmJkassN7GegZe8ZDQSwSgAAJ/LQok1XVtuAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"unmuteButtonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABoAAAAeCAQAAACY0sZTAAAAjUlEQVR4AWMYWWAUMDJwM4gwcJGihZlBRMnr0l4GZeK1sDEoxpQ+eP/uP4MVMcoFGAwYHBh8+ld/+vPo/53/QDYRwOA/GLz7f/X/ZSAkTpPDyf9IkFhNB/4jQWKdt+0/EgRqIiEgElct/7P2/1qgJlKCXMG6eNL7Zf8ZLEmLXGFhj5bdDMrkJaORCEYBAOZEUGMjl+JZAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"castButtonOff" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAQCAQAAAC8EZeQAAABOElEQVQoz2NgYGDgYJBgUMALJYBqgEDiP0EAVAoECv//vyIAgaZCFL74z2CBw1qLFyBZsELp//+f/meQY8AOFMCyYIX8J9ovnmIQwa3wIVghO4MogzzMX9gV3gMrFPl0++aWhUmc0gycDEzYFd4CKxT9/uLe/2f/H1zq9GPgZ2DGpvAaWCEfg1Zc9PptF//e+r40h0EAw1SgwksQE7/cOzFfz6Ep/9Tncz8mRDJwYyo8B7X61ZX/d16VRTVknP198JGKEtCtQgyyiHD8//80WCGvoO6M6Ud/H3vj7HZo5Yn/c9oZJJ9uRo3A42CFwq8Pergv6jv6f/l6d697vzddZlDcmHrr/xEUCIprsf//jx1j07z7aN9HLu2Xlw/+lpVl4GWQwkw9HAxiwFjhBQa7GDAERIAk1qAHAOge4gtynPL2AAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"castButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAQCAYAAAAWGF8bAAABy0lEQVQ4y2NggAAOIJYAYgUKsATUHDCQENnz/z+lGGooGCiABESXPaAIQ12KbOB9kKAFiV61AOmD6oUbKA129tJ7IEE5BtKAApJeuIH8ApNPtAvPOHsKyBYhy8Ald+EGsgOxKBDLo8cUSQYuug03UER406fbggtubuEtX5jEyM4pDRTjBGImUgwUXngLbqCo8LbvL4SX3v8vvPrFf6GlDy9xp3b6gYIBiJmJNnDBDbiBfECsxeGeEC3Qunmb8Lyrf4UX3/nOW7U0ByguQIRLIQbOv4bkwi1f7gEjZT6Lkr4Dd1JLvvDMC5+F51z+wZM9MRIoz02UgXOvoHj5FSgMgN5+xRleFsUd35ghPPfyb6EpJx4xS6sqQcNUCIhlsaVDsIFzLsEN5GXkFdTlK503XXjmud9CM869YTV0dhOYeGSl8OyL//kqFrUD1UgKrXy6GV+2E551AW6gsNDa1wfZTD3c+aqW9AnPOv9foGn9ejYTdy/hFY9/C3bvvgxUo8jXtDFVGJi9gJbixLC8LAayQWjGmWMMLGyawssePhKeeuIjIwe3tvDaV5eFZ5z+zSwmB/IqLxBLEVPagAgxaA7hhSZyMWjsi0DZRCd2ANcuONhZFnJlAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"fullscreenButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABYAAAAeCAQAAACC7ibdAAAA5ElEQVR4Ae3KsUrzYBhH8RPIFAJ5O3/ig5COgVyHW7N09x7aXSrESafuHeLi0A6iGEX+Y3edLMqnpe7egfbFMZCMXfo762GH9gIijIx8W0rcMQ9tU/3oL9KOGXdYLOuNfOS0CrGLyVr/fZ1zMht9a6VXqV6JjFa9efmiZ43PDoqnCqMh8BGS4IjpT8vTMYY7NiIaooHhsNnovqRPTA9HSOCjwT6ro+Jy8qV3PZT0aJUt9VavdadbnY9IaJUv9KiF5jqZYIQd87V80/rfAEdAq/RKvht9VEPrmmNS8m0ZRkTAzuz9AlNJVl+tEWchAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"fullscreenButtonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABYAAAAeCAQAAACC7ibdAAAA5klEQVR4Ae3MIUzDUACE4b8VlU1FaQWEBPlQna+oxqHm0dTicShQcyWZwSBWEgohEIKcB8UKAZbhcZXHmsw1eZUz+357OdZow8HHkJItSwiwcodmUWuFpO852s2nzUJtZFh5mPNyrq+23nE4Lv4007templIsYon1ZtedXKzkz/XGDocXBw8QiICBqPq9JJ9ogODT4d/aIgw4+KhYkBAzBbe6qLD/NR7+UX5q089VsRYpVN9NHPd605nBSFWWaknlZroqMTg9Yyv1TZqto+JcLBKrtR2q+96aHCxCkjIlqUYfBzWZuMfAHJlDLF+xFEAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"normalscreenButton" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABYAAAAeCAQAAACC7ibdAAAA50lEQVR4Ae3KsU6DUBhA4QMNAtsNFcJLyKBx8mXYmNxkculDuJG4OOOmcbr/QNS1xKaJqxJjTJpUk84KuHW4d+nY76yHvV1zxlx8AiZYeJeHBKgmX14wte1qXZ1l98VG/8iyJMQo+ZJVvdGddPohx8co7eRThvWmQOFa5ncZWtSnRwQ4GEVvMvQh62oW2+YDItK+BIW3PTt4KJJxiPrVyJnF39Wv/EdkmQlOsqd6IUOkGLmou+JVv0ifdfabfKVbaXVTt0KCUfhczmWur4rj7LFCYTRhelte5yiC8xgPbHuIj4sztrdbfxJjV3K8mZ7yAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"normalscreenButtonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABYAAAAeCAQAAACC7ibdAAAA7ElEQVR4Ae3Sr07DUBzF8e+daKaaiaYNAoH8uc43pK+AmsHimETxDAQBQZVkCQhAUFMBewkUCG4W/ib4haTykCYzmFszuc+xX3lYtw3HAEdEQsqQHvGekWKz6qFh3Jfbl9+Znta/WmrekBFU/GjRLvWuN11UJASVXh/yetVxjRH1xM/qNm+3D0lxBOVP6vaiTz8xBgSNyCkpKTBiHP84YoyiC8gZETSY2LfXCjlBjnRretk26kZJUISd1I+679YbJ7NqoTvd6Ly9FQVB2ay51pX262x65jGChoyPmoMKI901YujLMxKi1TnXa+MPEjlkhvYbWGMAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"volumeCapLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAMAAAAeCAYAAADpYKT6AAAAFElEQVR42mP4//8/AwwzjHIGhgMAcFgNAkNCQTAAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"volumeCapRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAMAAAAeCAYAAADpYKT6AAAAFElEQVR42mP4//8/AwwzjHIGhgMAcFgNAkNCQTAAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"volumeRail" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACoAAAAeCAYAAABaKIzgAAAASElEQVRYCe3BsQ3AMAwDQRIW4Cqlkf031AZKVkg6An8nAQCAH3zOPQpQe28lqJcS1FpLCcpWhJKsBGVbCaq7lcAzcwkAAHz0AE0SB2llBfTtAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"volumeRailCapLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAQAAAAeCAYAAAALvL+DAAAAeElEQVR42tWKQQqDMBBFB3cFt9oQQ0wniW51b5f2ti30ZLX1AN+ZQA/hhwfz/zw6eZrmmoWn8NUyCh9jLJzzoLY1L2sd+v6GEBikmh7MCTHmYvyYI1LKBeo69/Y+SBkKtCz3SaztPxKAal0fs5ry2Emjo3ARajpNDtqHL/b2HUUVAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"volumeRailCapRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAQAAAAeCAYAAAALvL+DAAAAeUlEQVQYV9WKOw7CMBBEV3RItAmWYzlmbUMLfSjDbUHiZASFfpj1LTLSW+18RLarrjt+yZPUFoQQ4ZwHgw+5SEqKcTzB+4C+dy/JuUK1wAouVimlwlDNtvgxOMOIMWEYwrsFZtgu03S/Cp/Vmnl+3ADshOdA9s1sSn8goC/6ib5oHgAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"volumeProgress" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACoAAAAeCAQAAADwIURrAAAALElEQVRIx2NgGAWjYBSMRMD4/z/1DWW5TQOXsnwdMoZ+GyouHQWjYBSMTAAAnO8GxIQ7mhMAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"volumeProgressCapLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAQAAAAeCAQAAAChtXcIAAAANUlEQVQY02NkgAJGOjH+9zEkAxm/JrzJ/wYSufTxLx9Y6shHBghj10SGPKji9RMYkhjp6EIAcaIN1SJ2FnYAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"volumeProgressCapRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAQAAAAeCAQAAAChtXcIAAAANklEQVQYV2NgoCP4//F/H5hx5/+z/78mABnn/5//f+kzkHHkPxCCGLv+A+FEIGP9p/UgFXQFAHkZGwN2fDIsAAAAAElFTkSuQmCC"/\x3e\x3c/elements\x3e\x3c/component\x3e\x3ccomponent name\x3d"display"\x3e\x3csettings\x3e\x3csetting name\x3d"bufferrotation" value\x3d"90"/\x3e\x3csetting name\x3d"bufferinterval" value\x3d"125"/\x3e\x3csetting name\x3d"fontcase" value\x3d"normal"/\x3e\x3csetting name\x3d"fontcolor" value\x3d"0xffffff"/\x3e\x3csetting name\x3d"fontsize" value\x3d"11"/\x3e\x3csetting name\x3d"fontweight" value\x3d"normal"/\x3e\x3c/settings\x3e\x3celements\x3e\x3celement name\x3d"background" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAA0CAYAAACQGfi1AAAAYklEQVR4Ae2VwQ2AMAwD/cgKVRbJuAyH+mOBfMMQyBKCuwWsxoaLtfKQkaiqtAZ0t5yEzMSMOUCa15+IAGZqgO+AFTFTSmZFnyyZv+kfjEYH+ABlIhz7Cx4n4GROtPd5ycgNe0AqrojABCoAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"backgroundOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAA0CAYAAACQGfi1AAAAY0lEQVR4Ae2VsQ2AQAwDXWSFF91Pkf1rxkAZIm0YAllCcF7Aiu3/i7WOU0ZFZm6rQXfLaiCzYkbuC+b1EWHATM3iHbAiZkrJrIiSP/ObQjQ6gAcg8w/AsV/w2AEmE1HVVTLqBmJaKtrlUvCnAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"capLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAA0CAYAAACHO2h8AAAA4UlEQVR4Ae2XwUoDMRRFT17GTscIMoWOqwF1WUSFIv6Autf/X5TuxG6FBkOeHfAHpk+GLnI+4HBzLzyI44/l8uoBeAVugJqRuIMA4L1t24+u685DCGci4hhJBdwPkr7vL3POLsaIqnKM6G2xaJuUksPAILquqtlMFayiuYhzYDMJIygi+2qonloi0CkTldXK/NOXXVYrZRs6UgyUjsrxL6d28sP2b4n0xJ62z1nVHbCutolx/4MRH8LFt6o+Nc28tqTyq9Xd5273RUrpVsSL915gvNCt188MbLebR+Dl2K/oL+WmRveI4jXNAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"capLeftOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAA0CAYAAACHO2h8AAAA5ElEQVR4Ae2XMU7DQBBF346sIDAUDoqprNBCm4Im3IPcAE7EEbgId6BF6akQjheZGTYSF7DXQi7mSdM+zf4vjbSBP1arqy2wA26BUwZSJAHAY1VVT3VdX5RluZDEYBGwPUqaprlUVYkxYmaMEe2Wy+q873shgwK4KYrFiRnkis5EgkCeScjHRQNaw2xuG4HNYiNvzeufPmxvzcPOz8jIwDPy4++n9t8P22Qb2cye1qqahhAkt7W3GLvvKep/+Uyo/igYY0fW6+vXtv16/kgcDl2nagkYOmGzuePIfv9+DzyM/Yr+AujSfWZZzzLnAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"capRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAA0CAYAAACHO2h8AAAA20lEQVR4Ae2XQUrEQBBFX4e29QJDVgFv4Cb7wSt4Ps8wLtw5B3A97mfmAFlkkbaZMpAynkBiBRGpd4Ci6j/4UGGzqR9ZjgBn4AV4A4ht29YsZJomzTnXXdfd9X2/A55iKYWlhJmU0nXTNAl4mIedwnZ7/4wBkcvH8Xh6jaqYiDFdAbcRFAtVFQJwU7ESPuh7zPrX3wj0T2zk1lz/+mG7NQ/bnpFixDPy8veq/dViW20j/W+drTOAmK2JXEbgbDrt628bhqEA+x+dpjMiMuY8lFLed8DB+orugQPAJ8i7bEsKl1PuAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"capRightOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAA0CAYAAACHO2h8AAAA2UlEQVR4Ae3XwUkEMRTG8X8eIaLgwYXF0xRgKYsVWIIVrR1sI3uwANkSvMxhDhOzRoZ5pgOZSZiDvF8Bjy/vgwdx+/3jO8tdgQtwAs4A7nB4/mShuYgx5r7v4zAMR+DNp5RYyjknIYTbrutugNcy7ENYQVUpoZimSXa7h3vgxatSxfsQgCcPdZNEnAB3QiM26G/V9bdPBLp9ImvN6t9y2daaLbtiR0ol25Edfzu1mx62Zon0v91sVZ2Bq1Ap5+8f4FL1tLkYC+C06mla5CLGcUzp6wicm31FfwHzmG90m7lXIAAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"bufferIcon" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACQAAAA0CAQAAABI31KIAAABGElEQVR4Ae3Rr0pEQRSA8Zl1b1uDQTAt4j8QES1qURZvEf8lfYJVsfoAisYFq9mgyfUFVptgMtk3CAaD6DN8HoYbFhk9w9x0Yc6XDsv8LrNj0vgnTZo05LzzyR7m/wxafQC+sDHQENkv6DsG2uFV2i62nDc+2C82SybVwqAX+tIzxlOdzBUEPTnosTy0wgM9lryQpS7pVwutetAiN3RZU481mJYaf0PX9KR7rALNMCtNaVC3PLTALXesYpSGlatFVDFonnNOmfQeGKHFOqNhUIcr6cwLtdiVNkIgy6WDLrxQ7qBNrApJy0J1mCu2CY6k4qKMCbJFM/TPHvzeASfS8cBvtbhXazvosPzzN2lL4/GQXoISlKAqQz+eXnU2Tp6C2QAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"bufferIconOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACQAAAA0CAQAAABI31KIAAABGElEQVR4Ae3Rr0pEQRSA8Zl1b1uDQTAt4j8QES1qURZvEf8lfYJVsfoAisYFq9mgyfUFVptgMtk3CAaD6DN8HoYbFhk9w9x0Yc6XDsv8LrNj0vgnTZo05LzzyR7m/wxafQC+sDHQENkv6DsG2uFV2i62nDc+2C82SybVwqAX+tIzxlOdzBUEPTnosTy0wgM9lryQpS7pVwutetAiN3RZU481mJYaf0PX9KR7rALNMCtNaVC3PLTALXesYpSGlatFVDFonnNOmfQeGKHFOqNhUIcr6cwLtdiVNkIgy6WDLrxQ7qBNrApJy0J1mCu2CY6k4qKMCbJFM/TPHvzeASfS8cBvtbhXazvosPzzN2lL4/GQXoISlKAqQz+eXnU2Tp6C2QAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"errorIcon" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACQAAAA0CAQAAABI31KIAAAB3ElEQVR42u2Tv0sCYRzGv5WFJIVgkEVLSy1ObWGDUE0OgdRYtBZC/QENFv0DDTW0FEYJGkgEBUZCEFxYlJpnEMSpUxpBNAkiT++rlb+uvNOpuOcz3Pt+j3vgeN8PkRYtWv5Z2qmb0d58kXl7ZXuFzM3W6E3jybfUW+8E6ZupaaXB3ZNnPGPnlAbZruF02ebTuRRSSOds89TVaE0bWYJiEhIjiaBIFjZpKKaF1TSePknDuUamRmo6dKPRzCNKRDO6UepQW9NCAxseCXHGlHvKzZ8SNjw0wN6oSqfFIWXvwSE72YsrKWtxkEHdsQ/5hRjuCpCNbMVVDEdXNKzmGhhnlqT8DYrwoq+1lJ9ZIqNyu0aERAhXn/Cir3UIQoJGlJpndm2KuPyGF5V2IlxbyszTmybi7xcowYvK9/H3/sn65hXsEnBeBi8q3wuKzGN2PeQCKIcff+Xkoa55zK4zMYCTCubcs+7KSQBn3DzdL3Ytrt3iuIpXRvXsFs516vnFruuMH8oI/Whewa4gDmsY8435aqfBH81jdoWzXtTi8Dm8cvOwrHkFu/zwyJDBi+yc/aCMecyuUH4f6rjOTy9Xm9cXiRxgTyX7iESor7LIQENk5XdYFVb2lYG0aNHyF/MB+x5LQiE6gt8AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"errorIconOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACQAAAA0CAQAAABI31KIAAAB3ElEQVR42u2Tv0sCYRzGv5WFJIVgkEVLSy1ObWGDUE0OgdRYtBZC/QENFv0DDTW0FEYJGkgEBUZCEFxYlJpnEMSpUxpBNAkiT++rlb+uvNOpuOcz3Pt+j3vgeN8PkRYtWv5Z2qmb0d58kXl7ZXuFzM3W6E3jybfUW+8E6ZupaaXB3ZNnPGPnlAbZruF02ebTuRRSSOds89TVaE0bWYJiEhIjiaBIFjZpKKaF1TSePknDuUamRmo6dKPRzCNKRDO6UepQW9NCAxseCXHGlHvKzZ8SNjw0wN6oSqfFIWXvwSE72YsrKWtxkEHdsQ/5hRjuCpCNbMVVDEdXNKzmGhhnlqT8DYrwoq+1lJ9ZIqNyu0aERAhXn/Cir3UIQoJGlJpndm2KuPyGF5V2IlxbyszTmybi7xcowYvK9/H3/sn65hXsEnBeBi8q3wuKzGN2PeQCKIcff+Xkoa55zK4zMYCTCubcs+7KSQBn3DzdL3Ytrt3iuIpXRvXsFs516vnFruuMH8oI/Whewa4gDmsY8435aqfBH81jdoWzXtTi8Dm8cvOwrHkFu/zwyJDBi+yc/aCMecyuUH4f6rjOTy9Xm9cXiRxgTyX7iESor7LIQENk5XdYFVb2lYG0aNHyF/MB+x5LQiE6gt8AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"playIcon" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACQAAAA0CAQAAABI31KIAAABHUlEQVR4Ae2Vu0oDQRRAB2xSWVmmtQncLzFREUUsnW/wJ0SCWgQV8TUQBBEsjlgIFoJFCsFCCT5QgwZFtPGtncUWIcTZnd2pAnNOf2Bn5t5VgUCge8mpPtWrevxD+cbi1KTq948VXvjlbMM/Jk2aPPPjHZM7Ip88Y3JLy0e+M8fkmnYfMsbkkk7v+Uodkzr/2+AzVUxOsXvDh3NMToj3inenmByT7AVviTGp4WadV85XK0WVs4SOcHd3rVyyhg5xc91M6NhPOyDZFTOuEw97n3iXzZh2uv497C6YUe38ILFQMSM61Yjs0Om8Gdaph3abdmfNkM60RrZoWTaDOvNi2yRyxpQsETcKVapMm6JHJCI/tzTgEfH4QXYxgUDgD+1pwmmFlV3oAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"playIconOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACQAAAA0CAQAAABI31KIAAABHklEQVR4Ae2VvUpDQRBGt7BMaekD5AEsU0zvL6KI76CdL6FDUItgIYJNEERIoVgIFoKFhWChBBNRYwwZRBv/tfostgghuXf37lSBPac/cHd35ppIJDK45MyIGTZDRk2+UVteNaP6WOEVf7hu62PUQgsv+FXHqAnrszJGD+go+AmO0R26bQfGqI5en/CdOUZV9LeBr0wxukKy9/j0jtEl0r3Fh1eMLuC2hndnjM7hZxVvuHksLZpcQugM/h42i0uJoVP4uSMLnPppJ3C7LfPsPOxjpLslc+x1/UdIdlNm2ftBHqC/JZnhTCNSQa8bMs2Zh3Yf3a7JFAetkT10LMokBy+2XVhZJgIjlkIZZazIuCJiya/Xx9QR/Q8yEokMFv9/Ax7UXjl24wAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"replayIcon" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACQAAAA0CAQAAABI31KIAAADOElEQVR4Ae2VUWhbVRjH/0nqdk0m0eTGITVZNsmiZCLTlooNPoWlbk27lzmGSIeyh7YgFSYaGO2yDZk4GMi65kG9d6kkbfCuyf1bqZmmlsYxCK51KwxkrpM4qBRla18cIngvw0qgN7ea1/z+L4fDn4/vO+c730G9NGjQQIALj8CKumn+afjIQWyDHRbUxTO/8w/Ojux9Bc0Q6gn27B3eoRZM5Zm2l7EVm/5bMAsEiPAjiFiFun7hXa5MjJ7Y1gI3mjYaxA5vZzSdmJeWlfvqz/xHFd7jr5+fP+rYgU0wpQlibE8peV+9yyVWeJuLVapwleU4tsCEh9B8sn8lt8SbBprJvHUEXrOMmuCVj61o9h81fXEhEY/GHAf09QOVlaF3N4fgNDsjCzxnBn7jDU3T2TfexE64IeC5G9Q1lz/7/vY2iBs5aHtndCm/wAXmUtvb8ShsD/pogdf46bm2CJ7Qr16THY87t0Iwzsf77ch1/sBCdmcYjrVuaZ4813UAPjwMC3SXsztS+ujqWTxp1E9CV8ct9Sq/56EeOGGpemtb1t6a9bXdq7nbvKV2dRjlJKaOl1lm+gICsME47x1jsu5LHYeIdfEXpCu8wsE43KiFezCu+woS/FiX4KxSYon7YhBQC2FfTPfNKghiXUIldYYzdLfChlpYxRbd952KkEGgr9Uii3z6JbNAnhbd941hoOBF5RIv8WC3SWmbuzt130XD0vyfSFOc4gfvwIVauD48qvs+Njxs8URikpOckmtevw2Br2Tdd9Lw+oVIR15VeZl91Q1Z3UXOvp7LVJlXI4YNaYHvdHKCE7ye3fXvE6l2OHaFr43rntNJ+IxHrj0czeQVFjifCrbDCRuqi3IG2+dTBSrM5MNR2GuOkcMD48xymotZrcAAXBBghQ0C3Aj09Sxmp5nlOA8PwAOLyWDrPZbhGL/kMufkkff2xx5rferFQ/vPx+fkZW13jBn2D8KrOc1H7av9ci7NNIu8yVX+xT95T1sVqe/J+dffhldzYUPD/4U9Q8lR9TNWa5RDyeej8BhkY/Qd7Y72Jk5Jw4qkSuqwckrqTbTuhc/44zb/IEOagtpK/N8fdoMGDf4G6kd7103/csoAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"replayIconOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACQAAAA0CAQAAABI31KIAAADTElEQVR4Ae2VX2xTZRjGH1iBzDMrU6lxLdOFhLJ/CepwTWCJiUSTDTdilikxJmAo2GlJ9I7EsCgkw6jRG5ALtZNJy7QDiwxK0dZllSypssqatCHIMKdzM4uEnUUrtj2P57uAULNzOtltf8/Nl3OevHnf73u/70WJxVKiRAWqcD/KsGjsvyScb6EBZizFoth4nX9zJNn6KtZCwhLcNU9NcpJasPw3o80vogbl/y/YUkiwoRHNcMsUSvMGlX/6zz3SCiuWLzSIGXVbnN5gXJ7566b6K29J5ix///PwMWk9ylGUZVj93M5o6qZ6g9OUeY0TBZI5x9ggKlGEFbDvP6Jkp3lFR8PX93yEOpQXy6a2L6Bo9suaTv/2tv/ZPdLey7ylWKZnYEULLFhWbG+q3/f8waSmiPLKB3gSVkh4OkmhsdyHkZoO2Bay0eYtzulcggl+PVXTiYdggmBjgpf42XjzDqwRRy+OAo/eVwNJP5+675Pj/JkhZW0XVt7uFvvQePte1ONezSFclo4d0fjFH7FOr9Ol9l1X1Yv8idt6Ybmj6SRUofL2XSt76Zm57DVeVdt36eVkO3o2xhi9k9gAE/TzXn88LXxHz8KGeWkMyaMc5T4/rDDCus8vfCEZjZgXx0gmyijb3JBghNTmFr6RDByYl5ZofpjDfKANJhhR9mCr8P2QR4tOoG/zYYa57vligVa1Ct93uoEcJzLneZ4vvIEKGHFPx+vCd0K3tMZP5SCDfNeLKhjx8HvHhO8T3c22vRMc4hCDaTQZFGdC07m08O3XPX5p8+6AeooX2F3QkAUsgaW79wJPMaBu3g1Jr9XqD6ZO8iTHlYY7rkhBmJUNXZdmhedgCvX6w8C8yenLDTLE+JS9ExaY/lOUxd4ZnwpxkL7cJifMhs/Ids8Av2SEE4pWYBOqIKEMJlTAiqbu3gklov0d4HYPqo2H03LUugI+HucZznAs/fFXW92VbWu2bnvzsH8sPcMz2h8fXzuNWs1Z/KntOtKX9dLLMK9wjnlmOautwhTf+nIvf446zYUFPf5P7OxJ9atfsFD97Ek97kS1TjZ64+gxpyt4QD6U8age9VDmgOwKbnChXn9wFxuQDrRocmir1ai4y+lfokSJfwEhAcqxd5L4JgAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3c/elements\x3e\x3c/component\x3e\x3ccomponent name\x3d"dock"\x3e\x3csettings\x3e\x3csetting name\x3d"iconalpha" value\x3d"1"/\x3e\x3csetting name\x3d"iconalphaactive" value\x3d"1"/\x3e\x3csetting name\x3d"iconalphaover" value\x3d"1"/\x3e\x3c/settings\x3e\x3celements\x3e\x3celement name\x3d"button" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAgCAYAAABpRpp6AAAAxklEQVR4Ae2YsQ3CMBBF7+yIximQSERSMgYNI1AxJgswAaMkLREpEnQ2Z6Chooqwpf+k65+evhtzXW8LIjrp7fUcpcmod9U7v2Sbpjm2bVtaa5kSRERC13V13/ePIpatqk05zzOHEChFWImOKnyIwk7EMyXMJyTrOUOZAeGlKd4byUtYCZjEN9gwCuPRYRKYBCbx18JLJ0bh3IQJk/gFHh0Ko3BWwqOID8YYpoTx3ofoap0r18y0WymspCo7DLf7NE2X7L5bnyz7UgI6sO7WAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"buttonOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAgCAYAAABpRpp6AAAAzklEQVR4Ae2YMU7FMBAFx04osQvyRQIX4nfcgRZOAxW3oMqRkhKbBkWyjVfiCiD7a0dKPxq9dZHxdLq9Al6AB8DRJl/ACryOwPM8z0/LsvhhGCwNklLK27bd7fv+LcLnabrxx3HYUgotYoyx4liFH0XYpZQtDfMb0orrSGeo8L8Il9Jd4dL5JFRYN6xHp5PQSegkLuwd/uPEWrg3YXQSenRaWAtfVOGYUs62QsPkiriK8Brj571z3ot0q7IxhgB8iPBbCMHU7wxcN/679f0HQzRYj4Eg/3AAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"buttonActive" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAgCAYAAABpRpp6AAAAwUlEQVR4Ae2YsQ3CMBBFD8e0CVESUcFMpGMKapgAKvagymKWiF3RxMe/IUDn6J70I5dPX98u4odhvyWiG3JCdqSTiEzI3eNz7fv+0nVdW1WVI4VkEEI4IB8RHjXLCg6II4TPXmbgADOTZhwQV0+F4ekPmDBzcQ2zTcKEC9+wXTqbhE3CJrGyd5jpp1jDxb0SNgm7dNawNbyqhudlydkBUkwG4irCU0rzsa6bVqt0BinFN44vEX7EGDfIiHOj/Hfr8wvCZ0/Xf6TpeQAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"divider" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAQAAAAgCAYAAAA1zNleAAAAD0lEQVQoU2NgGAWjADcAAAIgAAEeEYatAAAAAElFTkSuQmCC"/\x3e\x3c/elements\x3e\x3c/component\x3e\x3ccomponent name\x3d"playlist"\x3e\x3csettings\x3e\x3csetting name\x3d"backgroundcolor" value\x3d"0x3c3c3e"/\x3e\x3csetting name\x3d"fontcolor" value\x3d"0x848489"/\x3e\x3csetting name\x3d"fontsize" value\x3d"11"/\x3e\x3csetting name\x3d"fontweight" value\x3d"normal"/\x3e\x3csetting name\x3d"activecolor" value\x3d"0xb2b2b6"/\x3e\x3csetting name\x3d"overcolor" value\x3d"0xb2b2b6"/\x3e\x3csetting name\x3d"titlecolor" value\x3d"0xb9b9be"/\x3e\x3csetting name\x3d"titlesize" value\x3d"12"/\x3e\x3csetting name\x3d"titleweight" value\x3d"bold"/\x3e\x3csetting name\x3d"titleactivecolor" value\x3d"0xececf4"/\x3e\x3csetting name\x3d"titleovercolor" value\x3d"0xececf4"/\x3e\x3c/settings\x3e\x3celements\x3e\x3celement name\x3d"item" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAABMAQMAAAASt2oTAAAAA1BMVEU8PD44mUV6AAAAFklEQVR4AWMYMmAUjIJRMApGwSgYBQAHuAABIqNCjAAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"itemActive" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAABMAQMAAAASt2oTAAAAA1BMVEUvLzHXqQRQAAAAFklEQVR4AWMYMmAUjIJRMApGwSgYBQAHuAABIqNCjAAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"itemImage" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAAA2CAMAAAAPkWzgAAAAk1BMVEU0NDcVFRcWFhgXFxknJyozMzYyMjUlJSgrKy4jIyYZGRssLC8YGBobGx0kJCcuLjAiIiQaGhwjIyUpKSwkJCYaGh0nJykiIiUgICIwMDMqKi0cHB8lJScdHSAtLTAuLjEdHR8VFRgxMTQvLzIvLzEoKCsZGRwqKiwbGx4gICMoKCofHyImJigmJikhISMeHiAhISRWJqoOAAAA/klEQVR4Ae3VNYLDMBQG4X8kme2QwwzLfP/TbeO0qfQ6zQW+coRxQqYl4HEJSEACEvA8NQamRkCoF40kNUxMgC3gc0lrtiZAB1BKuSOPDIzcXroB0EtL3hQXuIHLNboDC+aRgRnQ6GUAjtBEBmrgdcwA/OCyuMATraOvBiB3HBQTOJ8KZp5QwwXoA3xFBdrVjpPnHVgBfQfjqMChZSoAugDMwCsqUMFeAHwEwMFnXKDkshGAz5YAEOIC2fpbAqhUAMDG4AcO3HUAahkAHYykOQATC6Bsf7M7UNotswLwmR2wAviTHVAAHA2BMXCWIaDC7642wIMSkIAEJCABxv0D1B4Kmtm5dvAAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"divider" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAANIAAAABCAIAAAAkUWeUAAAAEUlEQVR42mPQ1zccRaOIzggAmuR1T+nadMkAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"sliderRail" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAABCAYAAADErm6rAAAAHklEQVQI12NgIABERcX/Kymp/FdWVkXBIDGQHCH9AAmVCvfMHD66AAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"sliderCapTop" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAKCAYAAACuaZ5oAAAAEUlEQVQoU2NgGAWjYBQMfQAAA8oAAZphnjsAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"sliderCapBottom" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAKCAYAAACuaZ5oAAAAEUlEQVQoU2NgGAWjYBQMfQAAA8oAAZphnjsAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"sliderRailCapTop" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAECAYAAACUY/8YAAAAX0lEQVR42q2P4QqAIAyEewktLUy3pKevVwvpAdZO+q9Qgw+OO25jQ88YM2blUAp4dW71epfvyuXcLCGsFWh4yD4fsHY6vV8kRpKUGFQND9kfHxQsJNqEOYOq4Wl2t/oPXdoiX8vd60IAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"sliderRailCapBottom" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAECAYAAACUY/8YAAAAXElEQVQY02NgIADExCQ+KSmp/FdWVkXBIDGg3BcGSoG0tMxGWVl5DAtAYiA5ii2wsbE1ALr0A8hAkKtBGMQGiYHkKLbg////TK6uboYg1wIN/QzCIDZIDCRHSD8AB2YrZ5n2CLAAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"sliderThumb" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAABCAAAAADhxTF3AAAAAnRSTlMA/1uRIrUAAAAUSURBVHjaY/oPA49unT+yaz2cCwAcKhapymVMMwAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"sliderThumbCapBottom" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAECAQAAAA+ajeTAAAAMElEQVQI12NgwACPPt76f/7/kf+7/q//yEAMeNQH19DHQBy41Xf+/ZH3u4hVjh8AAJAYGojU8tLHAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"sliderThumbCapTop" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAECAQAAAA+ajeTAAAANUlEQVQI12NgoAbY2rf+49KPs/uIVH54wrH/h/7v+L/y//QJRGm4/PHa/7NALdv+L/6MKQsAZV8ZczFGWjAAAAAASUVORK5CYII\x3d"/\x3e\x3c/elements\x3e\x3c/component\x3e\x3ccomponent name\x3d"tooltip"\x3e\x3csettings\x3e\x3csetting name\x3d"fontcase" value\x3d"normal"/\x3e\x3csetting name\x3d"fontcolor" value\x3d"0xacacac"/\x3e\x3csetting name\x3d"fontsize" value\x3d"11"/\x3e\x3csetting name\x3d"fontweight" value\x3d"normal"/\x3e\x3csetting name\x3d"activecolor" value\x3d"0xffffff"/\x3e\x3csetting name\x3d"overcolor" value\x3d"0xffffff"/\x3e\x3c/settings\x3e\x3celements\x3e\x3celement name\x3d"background" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAACCAYAAABsfz2XAAAAEUlEQVR4AWOwtnV8RgomWQMAWvcm6W7AcF8AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"arrow" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAADCAYAAACnI+4yAAAAEklEQVR42mP4//8/AymYgeYaABssa5WUTzsyAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"capTop" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAAECAYAAAC6Jt6KAAAAHUlEQVR42mMUFRU/wUACYHR1935GkgZrW0faagAAqHQGCWgiU9QAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"capBottom" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAAECAYAAAC6Jt6KAAAAGElEQVR42mOwtnV8RgpmoL0GUVHxE6RgAO7IRsl4Cw8cAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"capLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAcAAAACCAYAAACUn8ZgAAAAFklEQVR42mMQFRU/YW3r+AwbZsAnCQBUPRWHq8l/fAAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"capRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAcAAAACCAYAAACUn8ZgAAAAFklEQVR42mOwtnV8hg2LioqfYMAnCQBwXRWHw2Rr1wAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"capTopLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAcAAAAECAYAAABCxiV9AAAAPklEQVR4XmMQFRVnBeIiIN4FxCeQMQOQU6ijq3/VycXjiau79zNkDJLcZWvv9MTGzumZta0jCgZJnkAXhPEBnhkmTDF7/FAAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"capTopRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAcAAAAECAYAAABCxiV9AAAAPklEQVR42mMQFRU/gYZ3A3ERELMyuLp7P0PGTi4eT3R09a8CJbMYrG0dnyFjGzunZ7b2Tk+AkrswJGEYZAUA8XwmRnLnEVMAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"capBottomLeft" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAcAAAAECAYAAABCxiV9AAAAMUlEQVR4AWMQFRU/YW3r+AwbBknusrSye4JLslBdQ/uqpbX9E2ySrEBcBMS7QVYgYwAWViWcql/T2AAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"capBottomRight" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAcAAAAECAYAAABCxiV9AAAANUlEQVR42mOwtnV8hg2LioqfYMAmYWll9wQouQtD0tLa/om6hvZVoGQ2A0g7Gt4NxEVAzAoAZzolltlSH50AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"menuOption" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAARCAYAAADkIz3lAAAAcklEQVQoz2NgGLFAVFRcDoh3AfFnKC2HVaGYmMQeSUnp/7Kycv9BNJB/AJeJn+XlFf8rKir/V1BQ+g/k/8SqEGjKPhkZuf/Kyqr/QTSQfwirQm9vX3WQYqCVX0G0p6e3BlaF////ZwJiLiDmgdJMwzr2ANEWKw6VGUzBAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"menuOptionOver" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAARCAYAAADkIz3lAAAAcklEQVQoz2NgGLFAVFRcDoh3AfFnKC2HVaGYmMQeSUnp/7Kycv9BNJB/AJeJn+XlFf8rKir/V1BQ+g/k/8SqEGjKPhkZuf/Kyqr/QTSQfwirQm9vX3WQYqCVX0G0p6e3BlaF////ZwJiLiDmgdJMwzr2ANEWKw6VGUzBAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"menuOptionActive" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAARCAQAAABOKvVuAAAAdElEQVR4AWOgJ5BhcGQIBWIZhJCsW+6jS7+/P7rklssgBxN0un/59f+n/1//f3SVwQUmGPrs+6P/IPj8N0M4TNBl/+Vr/0Hw4FUGN5igkm3ursvnf+y6bJ/LoAwTZGZQY/BgCANiNSCbASHMwcANxMy09DcAxqMsxkMxUYIAAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"volumeCapTop" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAFCAYAAAB1j90SAAAAE0lEQVR42mP4//8/AzmYYQRoBADgm9EvDrkmuwAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"volumeCapBottom" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAFCAYAAAB1j90SAAAAE0lEQVR42mP4//8/AzmYYQRoBADgm9EvDrkmuwAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"volumeRailCapTop" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAECAYAAAC+0w63AAAAXklEQVR42n2NWwqAIBRE3YSmJT4KafW1tZAWMN2RPkSojwPDPO5VAFSP1lMRDqG+UJexN4524bJ2hvehQU2P2efQGHs6tyCEhBhzg5oes7+PlcWUVuS8Nah5QLK77z7Bcm/CZuJM1AAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"volumeRailCapBottom" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAECAYAAAC+0w63AAAAWklEQVQI12NgQAJiYhKfVFXV/6upaaBgkBhQ7gsDLiAtLbNRXl4RQyNIDCSHU6ONja0B0OQPIIUgW0AYxAaJgeRwavz//z+Tq6ubIch0oOLPIAxig8RAcshqARVfK+sjJ8UzAAAAAElFTkSuQmCC"/\x3e\x3celement name\x3d"volumeRail" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAA0CAYAAAC6qQkaAAAAXklEQVR42mP5//8/AwyIiUn85+bmZmBkZGRABiA1X79+ZXj16gVcgoUBDaBrwiWGoZFYMCg0MpKnkZFxCPlxVONw0MjIyDgaOCM7AdC7lBuNjtGiY1TjqMbRwooijQBUhw3jnmCdzgAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"volumeProgress" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAA0CAAAAACfwlbGAAAAAnRSTlMA/1uRIrUAAAAmSURBVHgBY/gPBPdunT+yaw2IBeY+BHHXwbmPQNz1w5w7yh3lAgBeJpPWLirUWgAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"volumeProgressCapTop" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAECAQAAAAU2sY8AAAANElEQVQI12NgIA5s7Vv/cenH2X1YpA5POPb/0P8d/1f+nz4BQ/Lyx2v/zwKlt/1f/BkmBgDJshlzy7m4BgAAAABJRU5ErkJggg\x3d\x3d"/\x3e\x3celement name\x3d"volumeProgressCapBottom" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAECAQAAAAU2sY8AAAAL0lEQVQI12NggIJHH2/9P///yP9d/9d/ZkAHjybCJScyYIJbE85/OvJp1wQG4gAADBkams/Cpm0AAAAASUVORK5CYII\x3d"/\x3e\x3celement name\x3d"volumeThumb" src\x3d"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAQCAQAAACMnYaxAAAA/klEQVR4AYXQoW7CUBjF8f9IYWkgq2l2k8llrmJBTOBxsyQlJENs4236CDhEywNUIEGh12WZuYDC4W9A3B2zhTVLds8VJ+fnPv5/FzQIaHGptNQaWn4ooM0DA56VgVpbi1hEk2vSvNjbozu6vc0LUi1NCQFXDBflwW/9p7L1B78oGRJJCOnN8o3/OMvGz3J6EiLStdX0K2tLKiFm8n6qY3XiVYL5C98cLxL90dLWcWkZSYjpZ0Uds4K+hIg7nqblOU1LxlojCDF0GWfz1a5ylVvtsrmoi5EQ0OGGhEdNE2WslmjpSND5VAy3mu6VRM1o0fm+Dx8SEWOUWC3UIvoCCFqphCwr/x8AAAAASUVORK5CYII\x3d"/\x3e\x3c/elements\x3e\x3c/component\x3e\x3c/components\x3e\x3c/skin\x3e')}})(jwplayer);
-(function(g){var l=g.html5,c=g.utils,a=g.events,d=a.state,k=c.css,b=c.isMobile(),e=document,f=".jwpreview",C=!0,p=!1;l.display=function(g,v){function t(e){if(X&&(g.jwGetControls()||g.jwGetState()==d.PLAYING))X(e);else if((!b||!g.jwGetControls())&&T.sendEvent(a.JWPLAYER_DISPLAY_CLICK),g.jwGetControls()){var f=(new Date).getTime();$&&500>f-$?(g.jwSetFullscreen(),$=void 0):$=(new Date).getTime();var j=c.bounds(y.parentNode.querySelector(".jwcontrolbar")),h=c.bounds(y),f=j.left-10-h.left,k=j.left+30-
-h.left,D=h.bottom-40,m=h.bottom,x=j.right-30-h.left,j=j.right+10-h.left;if(b&&!(e.x>=f&&e.x<=k&&e.y>=D&&e.y<=m)){if(e.x>=x&&e.x<=j&&e.y>=D&&e.y<=m){g.jwSetFullscreen();return}T.sendEvent(a.JWPLAYER_DISPLAY_CLICK);if(J)return}switch(g.jwGetState()){case d.PLAYING:case d.BUFFERING:g.jwPause();break;default:g.jwPlay()}}}function h(a,b){O.showicons&&(a||b?(E.setRotation("buffer"==a?parseInt(O.bufferrotation,10):0,parseInt(O.bufferinterval,10)),E.setIcon(a),E.setText(b)):E.hide())}function m(a){B!=a?(B&&
-j(f,p),(B=a)?(a=new Image,a.addEventListener("load",n,p),a.src=B):(k("#"+y.id+" "+f,{"background-image":""}),j(f,p),K=u=0)):B&&!J&&j(f,C);w(g.jwGetState())}function D(a){clearTimeout(ha);ha=setTimeout(function(){w(a.newstate)},100)}function w(a){a=S?S:g?g.jwGetState():d.IDLE;if(a!=Y)switch(Y=a,E&&E.setRotation(0),a){case d.IDLE:!z&&!H&&(B&&!A&&j(f,C),a=!0,g._model&&!1===g._model.config.displaytitle&&(a=!1),h("play",L&&a?L.title:""));break;case d.BUFFERING:z=p;x.error&&x.error.setText();H=p;h("buffer");
-break;case d.PLAYING:h();break;case d.PAUSED:h("play")}}function n(){K=this.width;u=this.height;w(g.jwGetState());r();B&&k("#"+y.id+" "+f,{"background-image":"url("+B+")"})}function F(a){z=C;h("error",a.message)}function r(){0<y.clientWidth*y.clientHeight&&c.stretch(g.jwGetStretching(),s,y.clientWidth,y.clientHeight,K,u)}function j(a,b){k("#"+y.id+" "+a,{opacity:b?1:0,visibility:b?"visible":"hidden"})}var y,s,I,L,B,K,u,A=p,x={},z=p,H=p,J,W,E,S,Y,O=c.extend({showicons:C,bufferrotation:45,bufferinterval:100,
-fontcolor:"#ccc",overcolor:"#fff",fontsize:15,fontweight:""},g.skin.getComponentSettings("display"),v),T=new a.eventdispatcher,X,$;c.extend(this,T);this.clickHandler=t;var ha;this.forceState=function(a){S=a;w(a);this.show()};this.releaseState=function(a){S=null;w(a);this.show()};this.hidePreview=function(a){A=a;j(f,!a);a&&(J=!0)};this.setHiding=function(){J=!0};this.element=function(){return y};this.redraw=r;this.show=function(a){if(E&&(a||(S?S:g?g.jwGetState():d.IDLE)!=d.PLAYING))clearTimeout(W),
-W=void 0,y.style.display="block",E.show(),J=!1};this.hide=function(){E&&(E.hide(),J=!0)};this.setAlternateClickHandler=function(a){X=a};this.revertAlternateClickHandler=function(){X=void 0};y=e.createElement("div");y.id=g.id+"_display";y.className="jwdisplay";s=e.createElement("div");s.className="jwpreview jw"+g.jwGetStretching();y.appendChild(s);g.jwAddEventListener(a.JWPLAYER_PLAYER_STATE,D);g.jwAddEventListener(a.JWPLAYER_PLAYLIST_ITEM,function(){z=p;x.error&&x.error.setText();var a=(L=g.jwGetPlaylist()[g.jwGetPlaylistIndex()])?
-L.image:"";Y=void 0;m(a)});g.jwAddEventListener(a.JWPLAYER_PLAYLIST_COMPLETE,function(){H=C;h("replay");var a=g.jwGetPlaylist()[0];m(a.image)});g.jwAddEventListener(a.JWPLAYER_MEDIA_ERROR,F);g.jwAddEventListener(a.JWPLAYER_ERROR,F);b?(I=new c.touch(y),I.addEventListener(c.touchEvents.TAP,t)):y.addEventListener("click",t,p);I={font:O.fontweight+" "+O.fontsize+"px/"+(parseInt(O.fontsize,10)+3)+"px Arial, Helvetica, sans-serif",color:O.fontcolor};E=new l.displayicon(y.id+"_button",g,I,{color:O.overcolor});
-y.appendChild(E.element());D({newstate:d.IDLE})};k(".jwdisplay",{position:"absolute",width:"100%",height:"100%",overflow:"hidden"});k(".jwdisplay "+f,{position:"absolute",width:"100%",height:"100%",background:"#000 no-repeat center",overflow:"hidden",opacity:0});c.transitionStyle(".jwdisplay, .jwdisplay *","opacity .25s, color .25s")})(jwplayer);
-(function(g){var l=g.utils,c=l.css,a=document,d="none",k="100%";g.html5.displayicon=function(b,e,f,C){function p(b,c,d,e){var f=a.createElement("div");f.className=b;c&&c.appendChild(f);w&&q(f,b,"."+b,d,e);return f}function q(a,b,d,f,j){var h=v(b);"replayIcon"==b&&!h.src&&(h=v("playIcon"));h.src?(f=l.extend({},f),0<b.indexOf("Icon")&&(B=h.width|0),f.width=h.width,f["background-image"]="url("+h.src+")",f["background-size"]=h.width+"px "+h.height+"px",f["float"]="none",j=l.extend({},j),h.overSrc&&(j["background-image"]=
-"url("+h.overSrc+")"),l.isMobile()||c("#"+e.id+" .jwdisplay:hover "+d,j),c.style(w,{display:"table"})):c.style(w,{display:"none"});f&&c.style(a,f);L=h}function v(a){var b=D.getSkinElement("display",a);a=D.getSkinElement("display",a+"Over");return b?(b.overSrc=a&&a.src?a.src:"",b):{src:"",overSrc:"",width:0,height:0}}function t(){var a=j||0===B;c.style(y,{display:y.innerHTML&&a?"":d});u=a?30:0;h()}function h(){clearTimeout(K);0<u--&&(K=setTimeout(h,33));var a="px "+k,b=Math.ceil(Math.max(L.width,l.bounds(w).width-
-r.width-F.width)),a={"background-size":[F.width+a,b+a,r.width+a].join(", ")};w.parentNode&&(a.left=1==w.parentNode.clientWidth%2?"0.5px":"");c.style(w,a)}function m(){z=(z+x)%360;l.rotate(s,z)}var D=e.skin,w,n,F,r,j,y,s,I={},L,B=0,K=-1,u=0;this.element=function(){return w};this.setText=function(b){var c=y.style;y.innerHTML=b?b.replace(":",":\x3cbr\x3e"):"";c.height="0";c.display="block";if(b)for(;2<Math.floor(y.scrollHeight/a.defaultView.getComputedStyle(y,null).lineHeight.replace("px",""));)y.innerHTML=
-y.innerHTML.replace(/(.*) .*$/,"$1...");c.height="";c.display="";t()};this.setIcon=function(a){var b=I[a];b||(b=p("jwicon"),b.id=w.id+"_"+a);q(b,a+"Icon","#"+b.id);w.contains(s)?w.replaceChild(b,s):w.appendChild(b);s=b};var A,x=0,z;this.setRotation=function(a,b){clearInterval(A);z=0;x=a|0;0===x?m():A=setInterval(m,b)};var H=this.hide=function(){w.style.opacity=0;w.style.cursor=""};this.show=function(){w.style.opacity=1;w.style.cursor="pointer"};w=p("jwdisplayIcon");w.id=b;n=v("background");F=v("capLeft");
-r=v("capRight");j=0<F.width*r.width;var J={"background-image":"url("+F.src+"), url("+n.src+"), url("+r.src+")","background-position":"left,center,right","background-repeat":"no-repeat",padding:"0 "+r.width+"px 0 "+F.width+"px",height:n.height,"margin-top":n.height/-2};c("#"+b,J);l.isMobile()||(n.overSrc&&(J["background-image"]="url("+F.overSrc+"), url("+n.overSrc+"), url("+r.overSrc+")"),c(".jw-tab-focus #"+b+", #"+e.id+" .jwdisplay:hover "+("#"+b),J));y=p("jwtext",w,f,C);s=p("jwicon",w);e.jwAddEventListener(g.events.JWPLAYER_RESIZE,
-h);H();t()};c(".jwplayer .jwdisplayIcon",{display:"table",position:"relative","margin-left":"auto","margin-right":"auto",top:"50%","float":"none"});c(".jwplayer .jwdisplayIcon div",{position:"relative",display:"table-cell","vertical-align":"middle","background-repeat":"no-repeat","background-position":"center"});c(".jwplayer .jwdisplayIcon div",{"vertical-align":"middle"},!0);c(".jwplayer .jwdisplayIcon .jwtext",{color:"#fff",padding:"0 1px","max-width":"300px","overflow-y":"hidden","text-align":"center",
-"-webkit-user-select":d,"-moz-user-select":d,"-ms-user-select":d,"user-select":d})})(jwplayer);
-(function(g){var l=g.html5,c=g.utils,a=c.css,d=c.bounds,k=".jwdockbuttons",b=document,e="none",f="block";l.dock=function(g,p){function q(a){return!a||!a.src?{}:{background:"url("+a.src+") center","background-size":a.width+"px "+a.height+"px"}}function v(b,d){var e=m(b);a(t("."+b),c.extend(q(e),{width:e.width}));return h("div",b,d)}function t(a){return"#"+n+" "+(a?a:"")}function h(a,c,d){a=b.createElement(a);c&&(a.className=c);d&&d.appendChild(a);return a}function m(a){return(a=F.getSkinElement("dock",
-a))?a:{width:0,height:0,src:""}}function D(){a(k+" .capLeft, "+k+" .capRight",{display:r?f:e})}var w=c.extend({},{iconalpha:0.75,iconalphaactive:0.5,iconalphaover:1,margin:8},p),n=g.id+"_dock",F=g.skin,r=0,j={},y={},s,I,L,B=this;B.redraw=function(){d(s)};B.element=function(){return s};B.offset=function(b){a(t(),{"margin-left":b})};B.hide=function(){B.visible&&(B.visible=!1,s.style.opacity=0,clearTimeout(L),L=setTimeout(function(){s.style.display=e},250))};B.showTemp=function(){B.visible||(s.style.opacity=
-0,s.style.display=f)};B.hideTemp=function(){B.visible||(s.style.display=e)};B.show=function(){!B.visible&&r&&(B.visible=!0,s.style.display=f,clearTimeout(L),L=setTimeout(function(){s.style.opacity=1},0))};B.addButton=function(b,e,f,g){if(!j[g]){var k=h("div","divider",I),m=h("div","button",I),A=h("div",null,m);A.id=n+"_"+g;A.innerHTML="\x26nbsp;";a("#"+A.id,{"background-image":b});"string"==typeof f&&(f=new Function(f));c.isMobile()?(new c.touch(m)).addEventListener(c.touchEvents.TAP,function(a){f(a)}):
-m.addEventListener("click",function(a){f(a);a.preventDefault()});j[g]={element:m,label:e,divider:k,icon:A};if(e){var u=new l.overlay(A.id+"_tooltip",F,!0);b=h("div");b.id=A.id+"_label";b.innerHTML=e;a("#"+b.id,{padding:3});u.setContents(b);if(!c.isMobile()){var w;m.addEventListener("mouseover",function(){clearTimeout(w);var b=y[g],e,f;e=d(j[g].icon);b.offsetX(0);f=d(s);a("#"+b.element().id,{left:e.left-f.left+e.width/2});e=d(b.element());f.left>e.left&&b.offsetX(f.left-e.left+8);u.show();c.foreach(y,
-function(a,b){a!=g&&b.hide()})},!1);m.addEventListener("mouseout",function(){w=setTimeout(u.hide,100)},!1);s.appendChild(u.element());y[g]=u}}r++;D()}};B.removeButton=function(a){if(j[a]){I.removeChild(j[a].element);I.removeChild(j[a].divider);var b=document.getElementById(""+n+"_"+a+"_tooltip");b&&s.removeChild(b);delete j[a];r--;D()}};B.numButtons=function(){return r};B.visible=!1;s=h("div","jwdock");I=h("div","jwdockbuttons");s.appendChild(I);s.id=n;var K=m("button"),u=m("buttonOver"),A=m("buttonActive");
-K&&(a(t(),{height:K.height,padding:w.margin}),a(k,{height:K.height}),a(t("div.button"),c.extend(q(K),{width:K.width,cursor:"pointer",border:e})),a(t("div.button:hover"),q(u)),a(t("div.button:active"),q(A)),a(t("div.button\x3ediv"),{opacity:w.iconalpha}),a(t("div.button:hover\x3ediv"),{opacity:w.iconalphaover}),a(t("div.button:active\x3ediv"),{opacity:w.iconalphaactive}),a(t(".jwoverlay"),{top:w.margin+K.height}),v("capLeft",I),v("capRight",I),v("divider"));setTimeout(function(){d(s)})};a(".jwdock",
-{opacity:0,display:e});a(".jwdock \x3e *",{height:"100%","float":"left"});a(".jwdock \x3e .jwoverlay",{height:"auto","float":e,"z-index":99});a(k+" div.button",{position:"relative"});a(k+" \x3e *",{height:"100%","float":"left"});a(k+" .divider",{display:e});a(k+" div.button ~ .divider",{display:f});a(k+" .capLeft, "+k+" .capRight",{display:e});a(k+" .capRight",{"float":"right"});a(k+" div.button \x3e div",{left:0,right:0,top:0,bottom:0,margin:5,position:"absolute","background-position":"center","background-repeat":"no-repeat"});
-c.transitionStyle(".jwdock","background .25s, opacity .25s");c.transitionStyle(".jwdock .jwoverlay","opacity .25s");c.transitionStyle(k+" div.button div","opacity .25s")})(jwplayer);
-(function(g){var l=g.html5,c=g.utils,a=g.events,d=a.state,k=g.playlist;l.instream=function(b,e,f,g){function p(a){D(a.type,a);J&&b.jwInstreamDestroy(!1,E)}function q(a){D(a.type,a);h()}function v(a){D(a.type,a)}function t(){z&&z.releaseState(E.jwGetState());A.play()}function h(){if(r&&j+1<r.length){j++;var d=r[j];F=new k.item(d);J.setPlaylist([d]);var e;y&&(e=y[j]);s=c.extend(n,e);A.load(J.playlist[0]);I.reset(s.skipoffset||-1);W=setTimeout(function(){D(a.JWPLAYER_PLAYLIST_ITEM,{index:j},!0)},0)}else W=
-setTimeout(function(){D(a.JWPLAYER_PLAYLIST_COMPLETE,{},!0);b.jwInstreamDestroy(!0,E)},0)}function m(a){a.width&&a.height&&(z&&z.releaseState(E.jwGetState()),f.resizeMedia())}function D(a,b){b=b||{};n.tag&&!b.tag&&(b.tag=n.tag);E.sendEvent(a,b)}function w(){x&&x.redraw();z&&z.redraw()}var n={controlbarseekable:"never",controlbarpausable:!0,controlbarstoppable:!0,loadingmessage:"Loading ad",playlistclickable:!0,skipoffset:null,tag:null},F,r,j=0,y,s={controlbarseekable:"never",controlbarpausable:!1,
-controlbarstoppable:!1},I,L,B,K,u,A,x,z,H,J,W=-1,E=c.extend(this,new a.eventdispatcher);b.jwAddEventListener(a.JWPLAYER_RESIZE,w);b.jwAddEventListener(a.JWPLAYER_FULLSCREEN,function(a){w();!a.fullscreen&&c.isIPad()&&(J.state===d.PAUSED?z.show(!0):J.state===d.PLAYING&&z.hide())});E.init=function(){L=g.detachMedia();A=new l.video(L,"instream");A.addGlobalListener(v);A.addEventListener(a.JWPLAYER_MEDIA_META,m);A.addEventListener(a.JWPLAYER_MEDIA_COMPLETE,h);A.addEventListener(a.JWPLAYER_MEDIA_BUFFER_FULL,
-t);A.addEventListener(a.JWPLAYER_MEDIA_ERROR,p);A.addEventListener(a.JWPLAYER_MEDIA_TIME,function(a){I&&I.updateSkipTime(a.position,a.duration)});A.attachMedia();A.mute(e.mute);A.volume(e.volume);J=new l.model({},A);J.setVolume(e.volume);J.setMute(e.mute);u=e.playlist[e.item];B=L.currentTime;g.checkBeforePlay()||0===B?(B=0,K=d.PLAYING):K=b.jwGetState()===d.IDLE||e.getVideo().checkComplete()?d.IDLE:d.PLAYING;K==d.PLAYING&&L.pause();z=new l.display(E);z.forceState(d.BUFFERING);H=document.createElement("div");
-H.id=E.id+"_instream_container";c.css.style(H,{width:"100%",height:"100%"});H.appendChild(z.element());x=new l.controlbar(E);x.instreamMode(!0);H.appendChild(x.element());b.jwGetControls()?(x.show(),z.show()):(x.hide(),z.hide());f.setupInstream(H,x,z,J);w();E.jwInstreamSetText(n.loadingmessage)};E.load=function(e,h){if(c.isAndroid(2.3))p({type:a.JWPLAYER_ERROR,message:"Error loading instream: Cannot play instream on Android 2.3"});else{D(a.JWPLAYER_PLAYLIST_ITEM,{index:j},!0);var g=10+c.bounds(H.parentNode).bottom-
-c.bounds(x.element()).top;"array"===c.typeOf(e)&&(h&&(y=h,h=h[j]),r=e,e=r[j]);s=c.extend(n,h);F=new k.item(e);J.setPlaylist([e]);I=new l.adskipbutton(b.id,g,s.skipMessage,s.skipText);I.addEventListener(a.JWPLAYER_AD_SKIPPED,q);I.reset(s.skipoffset||-1);b.jwGetControls()?I.show():I.hide();g=I.element();H.appendChild(g);J.addEventListener(a.JWPLAYER_ERROR,p);z.setAlternateClickHandler(function(e){e=e||{};e.hasControls=!!b.jwGetControls();D(a.JWPLAYER_INSTREAM_CLICK,e);e.hasControls?J.state===d.PAUSED?
-E.jwInstreamPlay():E.jwInstreamPause():c.isAndroid()&&J.state!==d.PAUSED&&E.jwInstreamPause()});c.isMSIE()&&L.parentElement.addEventListener("click",z.clickHandler);f.addEventListener(a.JWPLAYER_AD_SKIPPED,q);A.load(J.playlist[0])}};E.jwInstreamDestroy=function(b){if(J){clearTimeout(W);W=-1;A.detachMedia();g.attachMedia();if(K!==d.IDLE){var h=c.extend({},u);h.starttime=B;e.getVideo().load(h)}else e.getVideo().stop();E.resetEventListeners();A.resetEventListeners();J.resetEventListeners();if(x)try{x.element().parentNode.removeChild(x.element())}catch(j){}z&&
-(L&&L.parentElement&&L.parentElement.removeEventListener("click",z.clickHandler),z.revertAlternateClickHandler());D(a.JWPLAYER_INSTREAM_DESTROYED,{reason:b?"complete":"destroyed"},!0);K==d.PLAYING&&L.play();f.destroyInstream(A.audioMode());J=null}};E.jwInstreamAddEventListener=function(a,b){E.addEventListener(a,b)};E.jwInstreamRemoveEventListener=function(a,b){E.removeEventListener(a,b)};E.jwInstreamPlay=function(){A.play(!0);e.state=d.PLAYING;z.show()};E.jwInstreamPause=function(){A.pause(!0);e.state=
-d.PAUSED;b.jwGetControls()&&z.show()};E.jwInstreamSeek=function(a){A.seek(a)};E.jwInstreamSetText=function(a){x.setText(a)};E.jwInstreamState=function(){return e.state};E.setControls=function(a){a?I.show():I.hide()};E.jwPlay=function(){"true"==s.controlbarpausable.toString().toLowerCase()&&E.jwInstreamPlay()};E.jwPause=function(){"true"==s.controlbarpausable.toString().toLowerCase()&&E.jwInstreamPause()};E.jwStop=function(){"true"==s.controlbarstoppable.toString().toLowerCase()&&(b.jwInstreamDestroy(!1,
-E),b.jwStop())};E.jwSeek=function(a){switch(s.controlbarseekable.toLowerCase()){case "always":E.jwInstreamSeek(a);break;case "backwards":J.position>a&&E.jwInstreamSeek(a)}};E.jwSeekDrag=function(a){J.seekDrag(a)};E.jwGetPosition=function(){};E.jwGetDuration=function(){};E.jwGetWidth=b.jwGetWidth;E.jwGetHeight=b.jwGetHeight;E.jwGetFullscreen=b.jwGetFullscreen;E.jwSetFullscreen=b.jwSetFullscreen;E.jwGetVolume=function(){return e.volume};E.jwSetVolume=function(a){J.setVolume(a);b.jwSetVolume(a)};E.jwGetMute=
-function(){return e.mute};E.jwSetMute=function(a){J.setMute(a);b.jwSetMute(a)};E.jwGetState=function(){return!J?d.IDLE:J.state};E.jwGetPlaylist=function(){return[F]};E.jwGetPlaylistIndex=function(){return 0};E.jwGetStretching=function(){return e.config.stretching};E.jwAddEventListener=function(a,b){E.addEventListener(a,b)};E.jwRemoveEventListener=function(a,b){E.removeEventListener(a,b)};E.jwSetCurrentQuality=function(){};E.jwGetQualityLevels=function(){return[]};E.jwGetControls=function(){return b.jwGetControls()};
-E.skin=b.skin;E.id=b.id+"_instream";return E}})(window.jwplayer);
-(function(g){var l=g.utils,c=l.css,a=g.events.state,d=g.html5.logo=function(k,b){function e(b){l.exists(b)&&b.stopPropagation&&b.stopPropagation();if(!t||!p.link)f.jwGetState()==a.IDLE||f.jwGetState()==a.PAUSED?f.jwPlay():f.jwPause();t&&p.link&&(f.jwPause(),f.jwSetFullscreen(!1),window.open(p.link,p.linktarget))}var f=k,C=f.id+"_logo",p,q,v=d.defaults,t=!1;this.resize=function(){};this.element=function(){return q};this.offset=function(a){c("#"+C+" ",{"margin-bottom":a})};this.position=function(){return p.position};
-this.margin=function(){return parseInt(p.margin)};this.hide=function(a){if(p.hide||a)t=!1,q.style.visibility="hidden",q.style.opacity=0};this.show=function(){t=!0;q.style.visibility="visible";q.style.opacity=1};var h="o";f.edition&&(h=f.edition(),h="pro"==h?"p":"premium"==h?"r":"ads"==h?"a":"free"==h?"f":"o");if("o"==h||"f"==h)v.link="http://www.longtailvideo.com/jwpabout/?a\x3dl\x26v\x3d"+g.version+"\x26m\x3dh\x26e\x3d"+h;p=l.extend({},v,b);p.hide="true"==p.hide.toString();q=document.createElement("img");
-q.className="jwlogo";q.id=C;if(p.file){var v=/(\w+)-(\w+)/.exec(p.position),h={},m=p.margin;3==v.length?(h[v[1]]=m,h[v[2]]=m):h.top=h.right=m;c("#"+C+" ",h);q.src=(p.prefix?p.prefix:"")+p.file;l.isMobile()?(new l.touch(q)).addEventListener(l.touchEvents.TAP,e):q.onclick=e}else q.style.display="none";return this};d.defaults={prefix:l.repo(),file:"logo.png",linktarget:"_top",margin:8,hide:!1,position:"top-right"};c(".jwlogo",{cursor:"pointer",position:"absolute"})})(jwplayer);
-(function(g){var l=g.html5,c=g.utils,a=c.css;l.menu=function(d,g,b,e){function f(a){return!a||!a.src?{}:{background:"url("+a.src+") no-repeat left","background-size":a.width+"px "+a.height+"px"}}function C(a,b){return function(){D(a);v&&v(b)}}function p(a,b){var c=document.createElement("div");a&&(c.className=a);b&&b.appendChild(c);return c}function q(a){return(a=b.getSkinElement("tooltip",a))?a:{width:0,height:0,src:void 0}}var v=e,t=new l.overlay(g+"_overlay",b);e=c.extend({fontcase:void 0,fontcolor:"#cccccc",
-fontsize:11,fontweight:void 0,activecolor:"#ffffff",overcolor:"#ffffff"},b.getComponentSettings("tooltip"));var h,m=[];this.element=function(){return t.element()};this.addOption=function(a,b){var d=p("jwoption",h);d.id=g+"_option_"+b;d.innerHTML=a;c.isMobile()?(new c.touch(d)).addEventListener(c.touchEvents.TAP,C(m.length,b)):d.addEventListener("click",C(m.length,b));m.push(d)};this.clearOptions=function(){for(;0<m.length;)h.removeChild(m.pop())};var D=this.setActive=function(a){for(var b=0;b<m.length;b++){var c=
-m[b];c.className=c.className.replace(" active","");b==a&&(c.className+=" active")}};this.show=t.show;this.hide=t.hide;this.offsetX=t.offsetX;this.positionX=t.positionX;this.constrainX=t.constrainX;h=p("jwmenu");h.id=g;var w=q("menuTop"+d);d=q("menuOption");var n=q("menuOptionOver"),F=q("menuOptionActive");if(w&&w.image){var r=new Image;r.src=w.src;r.width=w.width;r.height=w.height;h.appendChild(r)}d&&(w="#"+g+" .jwoption",a(w,c.extend(f(d),{height:d.height,color:e.fontcolor,"padding-left":d.width,
-font:e.fontweight+" "+e.fontsize+"px Arial,Helvetica,sans-serif","line-height":d.height,"text-transform":"upper"==e.fontcase?"uppercase":void 0})),a(w+":hover",c.extend(f(n),{color:e.overcolor})),a(w+".active",c.extend(f(F),{color:e.activecolor})));t.setContents(h)};a("."+"jwmenu jwoption".replace(/ /g," ."),{cursor:"pointer","white-space":"nowrap",position:"relative"})})(jwplayer);
-(function(g){var l=g.html5,c=g.utils,a=g.events;l.model=function(d,k){function b(a){var b=t[a.type];if(b&&b.length){for(var c=!1,d=0;d<b.length;d++){var f=b[d].split("-\x3e"),h=f[0],f=f[1]||h;e[f]!==a[h]&&(e[f]=a[h],c=!0)}c&&e.sendEvent(a.type,a)}else e.sendEvent(a.type,a)}var e=this,f,C={html5:k||new l.video(null,"default")},p=c.getCookies(),q={controlbar:{},display:{}},v={autostart:!1,controls:!0,fullscreen:!1,height:320,mobilecontrols:!1,mute:!1,playlist:[],playlistposition:"none",playlistsize:180,
-playlistlayout:"extended",repeat:!1,stretching:c.stretching.UNIFORM,width:480,volume:90},t={};t[a.JWPLAYER_MEDIA_MUTE]=["mute"];t[a.JWPLAYER_MEDIA_VOLUME]=["volume"];t[a.JWPLAYER_PLAYER_STATE]=["newstate-\x3estate"];t[a.JWPLAYER_MEDIA_BUFFER]=["bufferPercent-\x3ebuffer"];t[a.JWPLAYER_MEDIA_TIME]=["position","duration"];e.setVideo=function(a){if(a!==f){if(f){f.removeGlobalListener(b);var c=f.getContainer();c&&(f.remove(),a.setContainer(c))}f=a;f.volume(e.volume);f.mute(e.mute);f.addGlobalListener(b)}};
-e.destroy=function(){f&&(f.removeGlobalListener(b),f.destroy())};e.getVideo=function(){return f};e.seekDrag=function(a){f.seekDrag(a)};e.setFullscreen=function(b){b=!!b;b!=e.fullscreen&&(e.fullscreen=b,e.sendEvent(a.JWPLAYER_FULLSCREEN,{fullscreen:b}))};e.setPlaylist=function(b){e.playlist=c.filterPlaylist(b,!1,e.androidhls);0===e.playlist.length?e.sendEvent(a.JWPLAYER_ERROR,{message:"Error loading playlist: No playable sources found"}):(e.sendEvent(a.JWPLAYER_PLAYLIST_LOADED,{playlist:g(e.id).getPlaylist()}),
-e.item=-1,e.setItem(0))};e.setItem=function(b){var d=!1;b==e.playlist.length||-1>b?(b=0,d=!0):b=-1==b||b>e.playlist.length?e.playlist.length-1:b;if(d||b!==e.item){e.item=b;e.sendEvent(a.JWPLAYER_PLAYLIST_ITEM,{index:e.item});d=e.playlist[b];b=C.html5;if(e.playlist.length){var h=d.sources[0];if("youtube"===h.type||c.isYouTube(h.file))b=C.youtube,b!==f&&(b&&b.destroy(),b=C.youtube=new l.youtube(e.id))}e.setVideo(b);b.init&&b.init(d)}};e.setVolume=function(d){e.mute&&0<d&&e.setMute(!1);d=Math.round(d);
-e.mute||c.saveCookie("volume",d);b({type:a.JWPLAYER_MEDIA_VOLUME,volume:d});f.volume(d)};e.setMute=function(d){c.exists(d)||(d=!e.mute);c.saveCookie("mute",d);b({type:a.JWPLAYER_MEDIA_MUTE,mute:d});f.mute(d)};e.componentConfig=function(a){return q[a]};c.extend(e,new a.eventdispatcher);var h=e,m=c.extend({},v,p,d);c.foreach(m,function(a,b){m[a]=c.serialize(b)});h.config=m;c.extend(e,{id:d.id,state:a.state.IDLE,duration:-1,position:0,buffer:0},e.config);e.playlist=[];e.setItem(0)}})(jwplayer);
-(function(g){var l=g.utils,c=l.css,a=l.transitionStyle,d="top",k="bottom",b="right",e="left",f=document,C={fontcase:void 0,fontcolor:"#ffffff",fontsize:12,fontweight:void 0,activecolor:"#ffffff",overcolor:"#ffffff"};g.html5.overlay=function(a,g,v){function t(a){return"#"+F+(a?" ."+a:"")}function h(a,b){var c=f.createElement("div");a&&(c.className=a);b&&b.appendChild(c);return c}function m(a,b){var d;d=(d=r.getSkinElement("tooltip",a))?d:{width:0,height:0,src:"",image:void 0,ready:!1};var e=h(b,y);
-c.style(e,D(d));return[e,d]}function D(a){return{background:"url("+a.src+") center","background-size":a.width+"px "+a.height+"px"}}function w(a,f){f||(f="");var h=m("cap"+a+f,"jwborder jw"+a+(f?f:"")),g=h[0],h=h[1],s=l.extend(D(h),{width:a==e||f==e||a==b||f==b?h.width:void 0,height:a==d||f==d||a==k||f==k?h.height:void 0});s[a]=a==k&&!j||a==d&&j?I.height:0;f&&(s[f]=0);c.style(g,s);g={};s={};h={left:h.width,right:h.width,top:(j?I.height:0)+h.height,bottom:(j?0:I.height)+h.height};f&&(g[f]=h[f],g[a]=
-0,s[a]=h[a],s[f]=0,c(t("jw"+a),g),c(t("jw"+f),s),B[a]=h[a],B[f]=h[f])}var n=this,F=a,r=g,j=v,y,s,I,L;a=l.extend({},C,r.getComponentSettings("tooltip"));var B={};n.element=function(){return y};n.setContents=function(a){l.empty(s);s.appendChild(a)};n.positionX=function(a){c.style(y,{left:Math.round(a)})};n.constrainX=function(a,b){if(n.showing&&0!==a.width&&n.offsetX(0)){b&&c.unblock();var d=l.bounds(y);0!==d.width&&(d.right>a.right?n.offsetX(a.right-d.right):d.left<a.left&&n.offsetX(a.left-d.left))}};
-n.offsetX=function(a){a=Math.round(a);var b=y.clientWidth;0!==b&&(c.style(y,{"margin-left":Math.round(-b/2)+a}),c.style(L,{"margin-left":Math.round(-I.width/2)-a}));return b};n.borderWidth=function(){return B.left};n.show=function(){n.showing=!0;c.style(y,{opacity:1,visibility:"visible"})};n.hide=function(){n.showing=!1;c.style(y,{opacity:0,visibility:"hidden"})};y=h(".jwoverlay".replace(".",""));y.id=F;g=m("arrow","jwarrow");L=g[0];I=g[1];c.style(L,{position:"absolute",bottom:j?void 0:0,top:j?0:
-void 0,width:I.width,height:I.height,left:"50%"});w(d,e);w(k,e);w(d,b);w(k,b);w(e);w(b);w(d);w(k);g=m("background","jwback");c.style(g[0],{left:B.left,right:B.right,top:B.top,bottom:B.bottom});s=h("jwcontents",y);c(t("jwcontents")+" *",{color:a.fontcolor,font:a.fontweight+" "+a.fontsize+"px Arial,Helvetica,sans-serif","text-transform":"upper"==a.fontcase?"uppercase":void 0});j&&l.transform(t("jwarrow"),"rotate(180deg)");c.style(y,{padding:B.top+1+"px "+B.right+"px "+(B.bottom+1)+"px "+B.left+"px"});
-n.showing=!1};c(".jwoverlay",{position:"absolute",visibility:"hidden",opacity:0});c(".jwoverlay .jwcontents",{position:"relative","z-index":1});c(".jwoverlay .jwborder",{position:"absolute","background-size":"100% 100%"},!0);c(".jwoverlay .jwback",{position:"absolute","background-size":"100% 100%"});a(".jwoverlay","opacity .25s, visibility .25s")})(jwplayer);
-(function(g){var l=g.html5,c=g.utils;l.player=function(a){function d(){for(var a=C.playlist,b=[],c=0;c<a.length;c++)b.push(k(a[c]));return b}function k(a){var b={description:a.description,file:a.file,image:a.image,mediaid:a.mediaid,title:a.title};c.foreach(a,function(a,c){b[a]=c});b.sources=[];b.tracks=[];0<a.sources.length&&c.foreach(a.sources,function(a,c){b.sources.push({file:c.file,type:c.type?c.type:void 0,label:c.label,"default":c["default"]?!0:!1})});0<a.tracks.length&&c.foreach(a.tracks,function(a,
-c){b.tracks.push({file:c.file,kind:c.kind?c.kind:void 0,label:c.label,"default":c["default"]?!0:!1})});!a.file&&0<a.sources.length&&(b.file=a.sources[0].file);return b}function b(){f.jwPlay=q.play;f.jwPause=q.pause;f.jwStop=q.stop;f.jwSeek=q.seek;f.jwSetVolume=q.setVolume;f.jwSetMute=q.setMute;f.jwLoad=function(a){q.load(a)};f.jwPlaylistNext=q.next;f.jwPlaylistPrev=q.prev;f.jwPlaylistItem=q.item;f.jwSetFullscreen=q.setFullscreen;f.jwResize=p.resize;f.jwSeekDrag=C.seekDrag;f.jwGetQualityLevels=q.getQualityLevels;
-f.jwGetCurrentQuality=q.getCurrentQuality;f.jwSetCurrentQuality=q.setCurrentQuality;f.jwGetCaptionsList=q.getCaptionsList;f.jwGetCurrentCaptions=q.getCurrentCaptions;f.jwSetCurrentCaptions=q.setCurrentCaptions;f.jwGetSafeRegion=p.getSafeRegion;f.jwForceState=p.forceState;f.jwReleaseState=p.releaseState;f.jwGetPlaylistIndex=e("item");f.jwGetPosition=e("position");f.jwGetDuration=e("duration");f.jwGetBuffer=e("buffer");f.jwGetWidth=e("width");f.jwGetHeight=e("height");f.jwGetFullscreen=e("fullscreen");
-f.jwGetVolume=e("volume");f.jwGetMute=e("mute");f.jwGetState=e("state");f.jwGetStretching=e("stretching");f.jwGetPlaylist=d;f.jwGetControls=e("controls");f.jwDetachMedia=q.detachMedia;f.jwAttachMedia=q.attachMedia;f.jwPlayAd=function(a){var b=g(f.id).plugins;b.vast&&b.vast.jwPlayAd(a)};f.jwPauseAd=function(){var a=g(f.id).plugins;a.googima&&a.googima.jwPauseAd()};f.jwDestroyGoogima=function(){var a=g(f.id).plugins;a.googima&&a.googima.jwDestroyGoogima()};f.jwInitInstream=function(){f.jwInstreamDestroy();
-t=new l.instream(f,C,p,q);t.init()};f.jwLoadItemInstream=function(a,b){if(!t)throw"Instream player undefined";t.load(a,b)};f.jwLoadArrayInstream=function(a,b){if(!t)throw"Instream player undefined";t.load(a,b)};f.jwSetControls=function(a){p.setControls(a);t&&t.setControls(a)};f.jwInstreamPlay=function(){t&&t.jwInstreamPlay()};f.jwInstreamPause=function(){t&&t.jwInstreamPause()};f.jwInstreamState=function(){return t?t.jwInstreamState():""};f.jwInstreamDestroy=function(a,b){if(b=b||t)b.jwInstreamDestroy(a||
-!1),b===t&&(t=void 0)};f.jwInstreamAddEventListener=function(a,b){t&&t.jwInstreamAddEventListener(a,b)};f.jwInstreamRemoveEventListener=function(a,b){t&&t.jwInstreamRemoveEventListener(a,b)};f.jwPlayerDestroy=function(){p&&p.destroy();C&&C.destroy();v&&v.resetEventListeners()};f.jwInstreamSetText=function(a){t&&t.jwInstreamSetText(a)};f.jwIsBeforePlay=function(){return q.checkBeforePlay()};f.jwIsBeforeComplete=function(){return C.getVideo().checkComplete()};f.jwSetCues=p.addCues;f.jwAddEventListener=
-q.addEventListener;f.jwRemoveEventListener=q.removeEventListener;f.jwDockAddButton=p.addButton;f.jwDockRemoveButton=p.removeButton}function e(a){return function(){return C[a]}}var f=this,C,p,q,v,t;C=new l.model(a);f.id=C.id;f._model=C;c.css.block(f.id);p=new l.view(f,C);q=new l.controller(C,p);b();f.initializeAPI=b;v=new l.setup(C,p);v.addEventListener(g.events.JWPLAYER_READY,function(a){q.playerReady(a);c.css.unblock(f.id)});v.addEventListener(g.events.JWPLAYER_ERROR,function(a){c.log("There was a problem setting up the player: ",
-a);c.css.unblock(f.id)});v.start()}})(window.jwplayer);
-(function(g){var l={size:180,backgroundcolor:"#333333",fontcolor:"#999999",overcolor:"#CCCCCC",activecolor:"#CCCCCC",titlecolor:"#CCCCCC",titleovercolor:"#FFFFFF",titleactivecolor:"#FFFFFF",fontweight:"normal",titleweight:"normal",fontsize:11,titlesize:13},c=jwplayer.events,a=jwplayer.utils,d=a.css,k=a.isMobile(),b=document;g.playlistcomponent=function(e,f){function C(a){return"#"+m.id+(a?" ."+a:"")}function p(a,c){var d=b.createElement(a);c&&(d.className=c);return d}function q(a){return function(){r=
-a;v.jwPlaylistItem(a);v.jwPlay(!0)}}var v=e,t=v.skin,h=a.extend({},l,v.skin.getComponentSettings("playlist"),f),m,D,w,n,F=-1,r,j,y=76,s={background:void 0,divider:void 0,item:void 0,itemOver:void 0,itemImage:void 0,itemActive:void 0},I,L=this;L.element=function(){return m};L.redraw=function(){j&&j.redraw()};L.show=function(){a.show(m)};L.hide=function(){a.hide(m)};m=p("div","jwplaylist");m.id=v.id+"_jwplayer_playlistcomponent";I="basic"==v._model.playlistlayout;D=p("div","jwlistcontainer");m.appendChild(D);
-a.foreach(s,function(a){s[a]=t.getSkinElement("playlist",a)});I&&(y=32);s.divider&&(y+=s.divider.height);var B=0,K=0,u=0;a.clearCss(C());d(C(),{"background-color":h.backgroundcolor});d(C("jwlist"),{"background-image":s.background?" url("+s.background.src+")":""});d(C("jwlist *"),{color:h.fontcolor,font:h.fontweight+" "+h.fontsize+"px Arial, Helvetica, sans-serif"});s.itemImage?(B=(y-s.itemImage.height)/2+"px ",K=s.itemImage.width,u=s.itemImage.height):(K=4*y/3,u=y);s.divider&&d(C("jwplaylistdivider"),
-{"background-image":"url("+s.divider.src+")","background-size":"100% "+s.divider.height+"px",width:"100%",height:s.divider.height});d(C("jwplaylistimg"),{height:u,width:K,margin:B?B+"0 "+B+B:"0 5px 0 0"});d(C("jwlist li"),{"background-image":s.item?"url("+s.item.src+")":"",height:y,overflow:"hidden","background-size":"100% "+y+"px",cursor:"pointer"});B={overflow:"hidden"};""!==h.activecolor&&(B.color=h.activecolor);s.itemActive&&(B["background-image"]="url("+s.itemActive.src+")");d(C("jwlist li.active"),
-B);d(C("jwlist li.active .jwtitle"),{color:h.titleactivecolor});d(C("jwlist li.active .jwdescription"),{color:h.activecolor});B={overflow:"hidden"};""!==h.overcolor&&(B.color=h.overcolor);s.itemOver&&(B["background-image"]="url("+s.itemOver.src+")");k||(d(C("jwlist li:hover"),B),d(C("jwlist li:hover .jwtitle"),{color:h.titleovercolor}),d(C("jwlist li:hover .jwdescription"),{color:h.overcolor}));d(C("jwtextwrapper"),{height:y,position:"relative"});d(C("jwtitle"),{overflow:"hidden",display:"inline-block",
-height:I?y:20,color:h.titlecolor,"font-size":h.titlesize,"font-weight":h.titleweight,"margin-top":I?"0 10px":10,"margin-left":10,"margin-right":10,"line-height":I?y:20});d(C("jwdescription"),{display:"block","font-size":h.fontsize,"line-height":18,"margin-left":10,"margin-right":10,overflow:"hidden",height:36,position:"relative"});v.jwAddEventListener(c.JWPLAYER_PLAYLIST_LOADED,function(){D.innerHTML="";for(var b=v.jwGetPlaylist(),c=[],e=0;e<b.length;e++)b[e]["ova.hidden"]||c.push(b[e]);if(w=c){b=
-p("ul","jwlist");b.id=m.id+"_ul"+Math.round(1E7*Math.random());n=b;for(b=0;b<w.length;b++){var f=b,c=w[f],e=p("li","jwitem"),h=void 0;e.id=n.id+"_item_"+f;0<f?(h=p("div","jwplaylistdivider"),e.appendChild(h)):(f=s.divider?s.divider.height:0,e.style.height=y-f+"px",e.style["background-size"]="100% "+(y-f)+"px");f=p("div","jwplaylistimg jwfill");h=void 0;c["playlist.image"]&&s.itemImage?h=c["playlist.image"]:c.image&&s.itemImage?h=c.image:s.itemImage&&(h=s.itemImage.src);h&&!I&&(d("#"+e.id+" .jwplaylistimg",
-{"background-image":h}),e.appendChild(f));f=p("div","jwtextwrapper");h=p("span","jwtitle");h.innerHTML=c&&c.title?c.title:"";f.appendChild(h);c.description&&!I&&(h=p("span","jwdescription"),h.innerHTML=c.description,f.appendChild(h));e.appendChild(f);c=e;k?(new a.touch(c)).addEventListener(a.touchEvents.TAP,q(b)):c.onclick=q(b);n.appendChild(c)}F=v.jwGetPlaylistIndex();D.appendChild(n);j=new g.playlistslider(m.id+"_slider",v.skin,m,n)}});v.jwAddEventListener(c.JWPLAYER_PLAYLIST_ITEM,function(a){0<=
-F&&(b.getElementById(n.id+"_item_"+F).className="jwitem",F=a.index);b.getElementById(n.id+"_item_"+a.index).className="jwitem active";a=v.jwGetPlaylistIndex();a!=r&&(r=-1,j&&j.visible()&&j.thumbPosition(a/(v.jwGetPlaylist().length-1)))});v.jwAddEventListener(c.JWPLAYER_RESIZE,function(){L.redraw()});return this};d(".jwplaylist",{position:"absolute",width:"100%",height:"100%"});a.dragStyle(".jwplaylist","none");d(".jwplaylist .jwplaylistimg",{position:"relative",width:"100%","float":"left",margin:"0 5px 0 0",
-background:"#000",overflow:"hidden"});d(".jwplaylist .jwlist",{position:"absolute",width:"100%","list-style":"none",margin:0,padding:0,overflow:"hidden"});d(".jwplaylist .jwlistcontainer",{position:"absolute",overflow:"hidden",width:"100%",height:"100%"});d(".jwplaylist .jwlist li",{width:"100%"});d(".jwplaylist .jwtextwrapper",{overflow:"hidden"});d(".jwplaylist .jwplaylistdivider",{position:"absolute"});k&&a.transitionStyle(".jwplaylist .jwlist","top .35s")})(jwplayer.html5);
-(function(g){function l(){var a=[],b;for(b=0;b<arguments.length;b++)a.push(".jwplaylist ."+arguments[b]);return a.join(",")}var c=jwplayer.utils,a=c.touchEvents,d=c.css,k=document,b=window;g.playlistslider=function(e,f,g,l){function q(a){return"#"+y.id+(a?" ."+a:"")}function v(a,b,c,e){var f=k.createElement("div");a&&(f.className=a,b&&d(q(a),{"background-image":b.src?b.src:void 0,"background-repeat":e?"repeat-y":"no-repeat",height:e?void 0:b.height}));c&&c.appendChild(f);return f}function t(a){return(a=
-r.getSkinElement("playlist",a))?a:{width:0,height:0,src:void 0}}function h(a){if(A)return a=a?a:b.event,X(B-(a.detail?-1*a.detail:a.wheelDelta/40)/10),a.stopPropagation&&a.stopPropagation(),a.preventDefault?a.preventDefault():a.returnValue=!1,a.cancelBubble=!0,a.cancel=!0,!1}function m(a){0==a.button&&(L=!0);k.onselectstart=function(){return!1};b.addEventListener("mousemove",w,!1);b.addEventListener("mouseup",F,!1)}function D(a){X(B-2*a.deltaY/j.clientHeight)}function w(a){if(L||"click"==a.type){var b=
-c.bounds(s),d=I.clientHeight/2;X((a.pageY-b.top-d)/(b.height-d-d))}}function n(a){return function(b){0<b.button||(X(B+0.05*a),K=setTimeout(function(){u=setInterval(function(){X(B+0.05*a)},50)},500))}}function F(){L=!1;b.removeEventListener("mousemove",w);b.removeEventListener("mouseup",F);k.onselectstart=void 0;clearTimeout(K);clearInterval(u)}var r=f,j=l,y,s,I,L,B=0,K,u;f=c.isMobile();var A=!0,x,z,H,J,W,E,S,Y,O;this.element=function(){return y};this.visible=function(){return A};var T=this.redraw=
-function(){clearTimeout(O);O=setTimeout(function(){if(j&&j.clientHeight){var a=j.parentNode.clientHeight/j.clientHeight;0>a&&(a=0);1<a?A=!1:(A=!0,d(q("jwthumb"),{height:Math.max(s.clientHeight*a,W.height+E.height)}));d(q(),{visibility:A?"visible":"hidden"});j&&(j.style.width=A?j.parentElement.clientWidth-H.width+"px":"")}else O=setTimeout(T,10)},0)},X=this.thumbPosition=function(a){isNaN(a)&&(a=0);B=Math.max(0,Math.min(1,a));d(q("jwthumb"),{top:S+(s.clientHeight-I.clientHeight)*B});l&&(l.style.top=
-Math.min(0,y.clientHeight-l.scrollHeight)*B+"px")};y=v("jwslider",null,g);y.id=e;e=new c.touch(j);f?e.addEventListener(a.DRAG,D):(y.addEventListener("mousedown",m,!1),y.addEventListener("click",w,!1));x=t("sliderCapTop");z=t("sliderCapBottom");H=t("sliderRail");e=t("sliderRailCapTop");g=t("sliderRailCapBottom");J=t("sliderThumb");W=t("sliderThumbCapTop");E=t("sliderThumbCapBottom");S=x.height;Y=z.height;d(q(),{width:H.width});d(q("jwrail"),{top:S,bottom:Y});d(q("jwthumb"),{top:S});x=v("jwslidertop",
-x,y);z=v("jwsliderbottom",z,y);s=v("jwrail",null,y);I=v("jwthumb",null,y);f||(x.addEventListener("mousedown",n(-1),!1),z.addEventListener("mousedown",n(1),!1));v("jwrailtop",e,s);v("jwrailback",H,s,!0);v("jwrailbottom",g,s);d(q("jwrailback"),{top:e.height,bottom:g.height});v("jwthumbtop",W,I);v("jwthumbback",J,I,!0);v("jwthumbbottom",E,I);d(q("jwthumbback"),{top:W.height,bottom:E.height});T();j&&!f&&(j.addEventListener("mousewheel",h,!1),j.addEventListener("DOMMouseScroll",h,!1));return this};d(l("jwslider"),
-{position:"absolute",height:"100%",visibility:"hidden",right:0,top:0,cursor:"pointer","z-index":1,overflow:"hidden"});d(l("jwslider")+" *",{position:"absolute",width:"100%","background-position":"center","background-size":"100% 100%",overflow:"hidden"});d(l("jwslidertop","jwrailtop","jwthumbtop"),{top:0});d(l("jwsliderbottom","jwrailbottom","jwthumbbottom"),{bottom:0})})(jwplayer.html5);
-(function(g){var l=jwplayer.utils,c=l.css,a=document,d="none";g.rightclick=function(c,b){function e(b){var c=a.createElement("div");c.className=b.replace(".","");return c}function f(){q||(v.style.display=d)}var C,p=l.extend({aboutlink:"http://www.longtailvideo.com/jwpabout/?a\x3dr\x26v\x3d"+g.version+"\x26m\x3dh\x26e\x3do",abouttext:"About JW Player "+g.version+"..."},b),q=!1,v,t;this.element=function(){return v};this.destroy=function(){a.removeEventListener("mousedown",f,!1)};C=a.getElementById(c.id);
-v=e(".jwclick");v.id=c.id+"_menu";v.style.display=d;C.oncontextmenu=function(a){if(!q){null==a&&(a=window.event);var b=null!=a.target?a.target:a.srcElement,c=l.bounds(C),b=l.bounds(b);v.style.display=d;v.style.left=(a.offsetX?a.offsetX:a.layerX)+b.left-c.left+"px";v.style.top=(a.offsetY?a.offsetY:a.layerY)+b.top-c.top+"px";v.style.display="block";a.preventDefault()}};v.onmouseover=function(){q=!0};v.onmouseout=function(){q=!1};a.addEventListener("mousedown",f,!1);t=e(".jwclick_item");t.innerHTML=
-p.abouttext;t.onclick=function(){window.top.location=p.aboutlink};v.appendChild(t);C.appendChild(v)};c(".jwclick",{"background-color":"#FFF","-webkit-border-radius":5,"-moz-border-radius":5,"border-radius":5,height:"auto",border:"1px solid #bcbcbc","font-family":'"MS Sans Serif", "Geneva", sans-serif',"font-size":10,width:320,"-webkit-box-shadow":"5px 5px 7px rgba(0,0,0,.10), 0px 1px 0px rgba(255,255,255,.3) inset","-moz-box-shadow":"5px 5px 7px rgba(0,0,0,.10), 0px 1px 0px rgba(255,255,255,.3) inset",
-"box-shadow":"5px 5px 7px rgba(0,0,0,.10), 0px 1px 0px rgba(255,255,255,.3) inset",position:"absolute","z-index":999},!0);c(".jwclick div",{padding:"8px 21px",margin:"0px","background-color":"#FFF",border:"none","font-family":'"MS Sans Serif", "Geneva", sans-serif',"font-size":10,color:"inherit"},!0);c(".jwclick_item",{padding:"8px 21px","text-align":"left",cursor:"pointer"},!0);c(".jwclick_item:hover",{"background-color":"#595959",color:"#FFF"},!0);c(".jwclick_item a",{"text-decoration":d,color:"#000"},
-!0);c(".jwclick hr",{width:"100%",padding:0,margin:0,border:"1px #e9e9e9 solid"},!0)})(jwplayer.html5);
-(function(g){var l=g.html5,c=g.utils,a=g.events,d=2,k=4;l.setup=function(b,e){function f(){for(var a=0;a<F.length;a++){var b=F[a],c;a:{if(c=b.depends){c=c.toString().split(",");for(var d=0;d<c.length;d++)if(!h[c[d]]){c=!1;break a}}c=!0}if(c){F.splice(a,1);try{b.method(),f()}catch(e){v(e.message)}return}}0<F.length&&!w&&setTimeout(f,500)}function C(){h[d]=!0}function p(a){v("Error loading skin: "+a)}function q(){n&&(n.onload=null,n=n.onerror=null);clearTimeout(r);h[k]=!0}function v(b){w=!0;D.sendEvent(a.JWPLAYER_ERROR,
-{message:b});t.setupError(b)}var t=e,h={},m,D=new a.eventdispatcher,w=!1,n,F=[{name:1,method:function(){b.edition&&"invalid"==b.edition()?v("Error setting up player: Invalid license key"):h[1]=!0},depends:!1},{name:d,method:function(){m=new l.skin;m.load(b.config.skin,C,p)},depends:1},{name:3,method:function(){var a=c.typeOf(b.config.playlist);"array"===a?(a=new g.playlist(b.config.playlist),b.setPlaylist(a),0===b.playlist.length||0===b.playlist[0].sources.length?v("Error loading playlist: No playable sources found"):
-h[3]=!0):v("Playlist type not supported: "+a)},depends:1},{name:k,method:function(){var a=b.playlist[b.item].image;a?(n=new Image,n.onload=q,n.onerror=q,n.src=a,clearTimeout(r),r=setTimeout(q,500)):q()},depends:3},{name:5,method:function(){t.setup(m);h[5]=!0},depends:k+","+d},{name:6,method:function(){h[6]=!0},depends:"5,3"},{name:7,method:function(){D.sendEvent(a.JWPLAYER_READY);h[7]=!0},depends:6}],r=-1;c.extend(this,D);this.start=f}})(jwplayer);
-(function(g){g.skin=function(){var l={},c=!1;this.load=function(a,d,k){new g.skinloader(a,function(a){c=!0;l=a;"function"==typeof d&&d()},function(a){"function"==typeof k&&k(a)})};this.getSkinElement=function(a,d){a=a.toLowerCase();d=d.toLowerCase();if(c)try{return l[a].elements[d]}catch(g){jwplayer.utils.log("No such skin component / element: ",[a,d])}return null};this.getComponentSettings=function(a){a=a.toLowerCase();return c&&l&&l[a]?l[a].settings:null};this.getComponentLayout=function(a){a=a.toLowerCase();
-if(c){var d=l[a].layout;if(d&&(d.left||d.right||d.center))return l[a].layout}return null}}})(jwplayer.html5);
-(function(g){var l=jwplayer.utils,c=l.foreach,a="Skin formatting error";g.skinloader=function(d,k,b){function e(b){t=b;l.ajax(l.getAbsolutePath(n),function(b){try{l.exists(b.responseXML)&&C(b.responseXML)}catch(c){m(a)}},function(a){m(a)})}function f(a,b){return a?a.getElementsByTagName(b):null}function C(a){var b=f(a,"skin")[0];a=f(b,"component");var c=b.getAttribute("target"),b=parseFloat(b.getAttribute("pixelratio"));0<b&&(j=b);(!c||parseFloat(c)>parseFloat(jwplayer.version))&&m("Incompatible player version");
-if(0===a.length)h(t);else for(c=0;c<a.length;c++){var d=v(a[c].getAttribute("name")),b={settings:{},elements:{},layout:{}},e=f(f(a[c],"elements")[0],"element");t[d]=b;for(var g=0;g<e.length;g++)q(e[g],d);if((d=f(a[c],"settings")[0])&&0<d.childNodes.length){d=f(d,"setting");for(e=0;e<d.length;e++){var g=d[e].getAttribute("name"),k=d[e].getAttribute("value");/color$/.test(g)&&(k=l.stringToColor(k));b.settings[v(g)]=k}}if((d=f(a[c],"layout")[0])&&0<d.childNodes.length){d=f(d,"group");for(e=0;e<d.length;e++){k=
-d[e];g={elements:[]};b.layout[v(k.getAttribute("position"))]=g;for(var n=0;n<k.attributes.length;n++){var r=k.attributes[n];g[r.name]=r.value}k=f(k,"*");for(n=0;n<k.length;n++){r=k[n];g.elements.push({type:r.tagName});for(var w=0;w<r.attributes.length;w++){var F=r.attributes[w];g.elements[n][v(F.name)]=F.value}l.exists(g.elements[n].name)||(g.elements[n].name=r.tagName)}}}D=!1;p()}}function p(){clearInterval(w);F||(w=setInterval(function(){var a=!0;c(t,function(b,d){"properties"!=b&&c(d.elements,
-function(c){(t[v(b)]?t[v(b)].elements[v(c)]:null).ready||(a=!1)})});a&&!D&&(clearInterval(w),h(t))},100))}function q(a,b){b=v(b);var c=new Image,d=v(a.getAttribute("name")),e=a.getAttribute("src");if(0!==e.indexOf("data:image/png;base64,"))var f=l.getAbsolutePath(n),e=[f.substr(0,f.lastIndexOf("/")),b,e].join("/");t[b].elements[d]={height:0,width:0,src:"",ready:!1,image:c};c.onload=function(){var a=b,e=t[v(a)]?t[v(a)].elements[v(d)]:null;e?(e.height=Math.round(c.height/j*r),e.width=Math.round(c.width/
-j*r),e.src=c.src,e.ready=!0,p()):l.log("Loaded an image for a missing element: "+a+"."+d)};c.onerror=function(){F=!0;p();m("Skin image not found: "+this.src)};c.src=e}function v(a){return a?a.toLowerCase():""}var t={},h=k,m=b,D=!0,w,n=d,F=!1,r=(jwplayer.utils.isMobile(),1),j=1;"string"!=typeof n||""===n?C(g.defaultskin()):"xml"!=l.extension(n)?m("Skin not a valid file type"):new g.skinloader("",e,m)}})(jwplayer.html5);
-(function(g){var l=g.utils,c=g.events,a=l.css;g.html5.thumbs=function(d){function k(a){q=null;try{a=(new g.parsers.srt).parse(a.responseText,!0)}catch(c){b(c.message);return}if("array"!==l.typeOf(a))return b("Invalid data");C=a}function b(a){q=null;l.log("Thumbnails could not be loaded: "+a)}function e(b,c,d){b.onload=null;c.width||(c.width=b.width,c.height=b.height);c["background-image"]=b.src;a.style(f,c);d&&d(c.width)}var f,C,p,q,v,t={},h,m=new c.eventdispatcher;l.extend(this,m);f=document.createElement("div");
-f.id=d;this.load=function(c){a.style(f,{display:"none"});q&&(q.onload=null,q.onreadystatechange=null,q.onerror=null,q.abort&&q.abort(),q=null);h&&(h.onload=null);c?(p=c.split("?")[0].split("/").slice(0,-1).join("/"),q=l.ajax(c,k,b,!0)):(C=v=h=null,t={})};this.element=function(){return f};this.updateTimeline=function(a,c){if(C){for(var d=0;d<C.length&&a>C[d].end;)d++;d===C.length&&d--;d=C[d].text;a:{var f=d;if(f&&f!==v){v=f;0>f.indexOf("://")&&(f=p?p+"/"+f:f);var g={display:"block",margin:"0 auto",
-"background-position":"0 0",width:0,height:0};if(0<f.indexOf("#xywh"))try{var j=/(.+)\#xywh=(\d+),(\d+),(\d+),(\d+)/.exec(f),f=j[1];g["background-position"]=-1*j[2]+"px "+-1*j[3]+"px";g.width=j[4];g.height=j[5]}catch(k){b("Could not parse thumbnail");break a}var l=t[f];l?e(l,g,c):(l=new Image,l.onload=function(){e(l,g,c)},t[f]=l,l.src=f);h&&(h.onload=null);h=l}}return d}}}})(jwplayer);
-(function(g){var l=g.jwplayer,c=l.html5,a=l.utils,d=l.events,k=d.state,b=a.css,e=a.bounds,f=a.isMobile(),C=a.isIPad(),p=a.isIPod(),q=document,v="aspectMode",t=["fullscreenchange","webkitfullscreenchange","mozfullscreenchange","MSFullscreenChange"],h=!0,m=!h,D=m,w="hidden",n="none",F="block";c.view=function(r,j){function y(b){b=a.between(j.position+b,0,this.getDuration());this.seek(b)}function s(b){b=a.between(this.getVolume()+b,0,100);this.setVolume(b)}function I(a){var b;b=a.ctrlKey||a.metaKey?!1:
-j.controls?!0:!1;if(!b)return!0;M.adMode()||(ma(),H());b=l(r.id);switch(a.keyCode){case 27:b.setFullscreen(m);break;case 13:case 32:b.play();break;case 37:M.adMode()||y.call(b,-5);break;case 39:M.adMode()||y.call(b,5);break;case 38:s.call(b,10);break;case 40:s.call(b,-10);break;case 77:b.setMute();break;case 70:b.setFullscreen();break;default:if(48<=a.keyCode&&59>=a.keyCode){var c=(a.keyCode-48)/10*b.getDuration();b.seek(c)}}if(/13|32|37|38|39|40/.test(a.keyCode))return a.preventDefault(),!1}function L(){var a=
-!Ma;Ma=!1;a&&fa.sendEvent(d.JWPLAYER_VIEW_TAB_FOCUS,{hasFocus:!0});M.adMode()||(ma(),H())}function B(){Ma=!1;fa.sendEvent(d.JWPLAYER_VIEW_TAB_FOCUS,{hasFocus:!1})}function K(){var a=e(N),b=Math.round(a.width),c=Math.round(a.height);if(q.body.contains(N)){if(b&&c&&(b!==Za||c!==Ia))Za=b,Ia=c,Q&&Q.redraw(),clearTimeout(ra),ra=setTimeout($,50),fa.sendEvent(d.JWPLAYER_RESIZE,{width:b,height:c})}else g.removeEventListener("resize",K),f&&g.removeEventListener("orientationchange",K);return a}function u(a){a&&
-(a.element().addEventListener("mousemove",W,m),a.element().addEventListener("mouseout",E,m))}function A(){}function x(){clearTimeout(za);za=setTimeout(Aa,10)}function z(a,b){var c=q.createElement(a);b&&(c.className=b);return c}function H(){clearTimeout(za);za=setTimeout(Aa,$a)}function J(){clearTimeout(za);if(r.jwGetState()==k.PAUSED||r.jwGetState()==k.PLAYING)Da(),ia||(za=setTimeout(Aa,$a))}function W(){clearTimeout(za);ia=h}function E(){ia=m}function S(a){fa.sendEvent(a.type,a)}function Y(a){if(a.done)O();
-else{if(!a.complete){M.adMode()||(M.instreamMode(!0),M.adMode(!0),M.show(!0));M.setText(a.message);var b=a.onClick;void 0!==b&&Q.setAlternateClickHandler(function(){b(a)});void 0!==a.onSkipAd&&da&&da.setSkipoffset(a,a.onSkipAd)}da&&da.adChanged(a)}}function O(){M.setText("");M.adMode(!1);M.instreamMode(!1);M.show(!0);da&&(da.adsEnded(),da.setState(r.jwGetState()));Q.revertAlternateClickHandler()}function T(c,d,e){var f=N.className,g,k,l=r.id+"_view";b.block(l);if(e=!!e)f=f.replace(/\s*aspectMode/,
-""),N.className!==f&&(N.className=f),b.style(N,{display:F},e);a.exists(c)&&a.exists(d)&&(j.width=c,j.height=d);e={width:c};-1==f.indexOf(v)&&(e.height=d);b.style(N,e,!0);Q&&Q.redraw();M&&M.redraw(h);Z&&(Z.offset(M&&0<=Z.position().indexOf("bottom")?M.height()+M.margin():0),setTimeout(function(){ea&&ea.offset("top-left"==Z.position()?Z.element().clientWidth+Z.margin():0)},500));X(d);g=j.playlistsize;k=j.playlistposition;if(la&&g&&("right"==k||"bottom"==k))la.redraw(),f={display:F},e={},f[k]=0,e[k]=
-g,"right"==k?f.width=g:f.height=g,b.style(Pa,f),b.style(ya,e);$(c,d);b.unblock(l)}function X(a){var b=e(N);ca=0<a.toString().indexOf("%")||0===b.height?m:"bottom"==j.playlistposition?b.height<=40+j.playlistsize:40>=b.height;M&&(ca?(M.audioMode(h),Da(),Q.hidePreview(h),Q&&Q.hide(),Ea(m)):(M.audioMode(m),Na(r.jwGetState())));Z&&ca&&Ra();N.style.backgroundColor=ca?"transparent":"#000"}function $(a,b){if(!a||isNaN(Number(a))){if(!ba)return;a=ba.clientWidth}if(!b||isNaN(Number(b))){if(!ba)return;b=ba.clientHeight}j.getVideo().resize(a,
-b,j.stretching)&&(clearTimeout(ra),ra=setTimeout($,250))}function ha(a){if(a.target===N||N.contains(a.target))void 0!==a.jwstate?a=a.jwstate:Ga?(a=q.currentFullScreenElement||q.webkitCurrentFullScreenElement||q.mozFullScreenElement||q.msFullscreenElement,a=!!(a&&a.id===r.id)):a=j.getVideo().getFullScreen(),Ga?ka(N,a):Ca(a)}function ka(c,d){a.removeClass(c,"jwfullscreen");d?(a.addClass(c,"jwfullscreen"),b.style(q.body,{"overflow-y":w}),H()):b.style(q.body,{"overflow-y":""});M&&M.redraw();Q&&Q.redraw();
-ea&&ea.redraw();$();Ca(d)}function Ca(a){j.setFullscreen(a);a?(clearTimeout(ra),ra=setTimeout($,200)):C&&r.jwGetState()==k.PAUSED&&setTimeout(Qa,500)}function ma(){(!p||ca)&&M&&j.controls&&M.show()}function R(){P!==h&&M&&(!ca&&!j.getVideo().audioMode())&&M.hide()}function Ba(){ea&&(!ca&&j.controls)&&ea.show()}function wa(){ea&&(!aa&&!j.getVideo().audioMode())&&ea.hide()}function Ra(){Z&&(!j.getVideo().audioMode()||ca)&&Z.hide(ca)}function Qa(){Q&&j.controls&&!ca&&(!p||r.jwGetState()==k.IDLE)&&Q.show();
-(!f||!j.fullscreen)&&j.getVideo().setControls(m)}function Aa(){clearTimeout(za);if(P!==h){Ha=m;var b=r.jwGetState();(!j.controls||b!=k.PAUSED)&&R();j.controls||wa();b!=k.IDLE&&b!=k.PAUSED&&(wa(),Ra());a.addClass(N,"jw-user-inactive")}}function Da(){if(P!==m){Ha=h;if((j.controls||ca)&&!(p&&Ta==k.PAUSED))ma(),Ba();rb.hide&&Z&&!ca&&Z.show();a.removeClass(N,"jw-user-inactive")}}function Ea(a){a=a&&!ca;j.getVideo().setVisibility(a)}function cb(){aa=h;Ja(m);j.controls&&Ba()}function vb(){da&&da.setState(r.jwGetState())}
-function jb(){}function Oa(a){aa=m;clearTimeout(Xa);Xa=setTimeout(function(){Na(a.newstate)},100)}function wb(){R()}function Na(a){Ta=a;if(j.getVideo().isCaster)Q&&(Q.show(),Q.hidePreview(m)),b.style(ba,{visibility:w,opacity:0}),M&&(M.show(),M.hideFullscreen(h));else{switch(a){case k.PLAYING:P=j.getVideo().isCaster!==h?null:h;(V?Fa:j).getVideo().audioMode()?(Ea(m),Q.hidePreview(ca),Q.setHiding(h),M&&(Da(),M.hideFullscreen(h)),Ba()):(Ea(h),$(),Q.hidePreview(h),M&&M.hideFullscreen(!j.getVideo().supportsFullscreen()));
-break;case k.IDLE:Ea(m);ca||(Q.hidePreview(m),Qa(),Ba(),M&&M.hideFullscreen(m));break;case k.BUFFERING:Qa();Aa();f&&Ea(h);break;case k.PAUSED:Qa(),Da()}Z&&!ca&&Z.show()}}function Va(a){return"#"+r.id+(a?" ."+a:"")}function Wa(a,c){b(a,{display:c?F:n})}var N,ya,pa,qb,Pa,za=-1,$a=f?4E3:2E3,ba,Za,Ia,va,Ya,Sa,Fa,V=m,M,Q,da,ea,Z,rb=a.extend({},j.componentConfig("logo")),ta,la,ca,G=m,Ha=m,P=null,aa,qa,ra=-1,ia=m,Ta,ga,La,Ga=!1,Ma=!1,fa=a.extend(this,new d.eventdispatcher);this.getCurrentCaptions=function(){return ta.getCurrentCaptions()};
-this.setCurrentCaptions=function(a){ta.setCurrentCaptions(a)};this.getCaptionsList=function(){return ta.getCaptionsList()};this.setup=function(e){if(!G){r.skin=e;ya=z("span","jwmain");ya.id=r.id+"_view";ba=z("span","jwvideo");ba.id=r.id+"_media";pa=z("span","jwcontrols");va=z("span","jwinstream");Pa=z("span","jwplaylistcontainer");qb=z("span","jwaspect");e=j.height;var s=j.componentConfig("controlbar"),w=j.componentConfig("display");X(e);ta=new c.captions(r,j.captions);ta.addEventListener(d.JWPLAYER_CAPTIONS_LIST,
-S);ta.addEventListener(d.JWPLAYER_CAPTIONS_CHANGED,S);ta.addEventListener(d.JWPLAYER_CAPTIONS_LOADED,A);pa.appendChild(ta.element());Q=new c.display(r,w);Q.addEventListener(d.JWPLAYER_DISPLAY_CLICK,function(a){S(a);f?Ha?Aa():Da():Oa({newstate:r.jwGetState()});Ha&&H()});ca&&Q.hidePreview(h);pa.appendChild(Q.element());Z=new c.logo(r,rb);pa.appendChild(Z.element());ea=new c.dock(r,j.componentConfig("dock"));pa.appendChild(ea.element());r.edition&&!f?qa=new c.rightclick(r,{abouttext:j.abouttext,aboutlink:j.aboutlink}):
-f||(qa=new c.rightclick(r,{}));j.playlistsize&&(j.playlistposition&&j.playlistposition!=n)&&(la=new c.playlistcomponent(r,{}),Pa.appendChild(la.element()));M=new c.controlbar(r,s);M.addEventListener(d.JWPLAYER_USER_ACTION,H);pa.appendChild(M.element());p&&R();D&&fa.forceControls(h);ya.appendChild(ba);ya.appendChild(pa);ya.appendChild(va);N.appendChild(ya);N.appendChild(qb);N.appendChild(Pa);j.getVideo().setContainer(ba);j.addEventListener("fullscreenchange",ha);for(e=t.length;e--;)q.addEventListener(t[e],
-ha,m);g.removeEventListener("resize",K);g.addEventListener("resize",K,m);f&&(g.removeEventListener("orientationchange",K),g.addEventListener("orientationchange",K,m));l(r.id).onAdPlay(function(){M.adMode(!0);Na(k.PLAYING)});l(r.id).onAdSkipped(function(){M.adMode(!1)});l(r.id).onAdComplete(function(){M.adMode(!1)});l(r.id).onAdError(function(){M.adMode(!1)});r.jwAddEventListener(d.JWPLAYER_PLAYER_READY,jb);r.jwAddEventListener(d.JWPLAYER_PLAYER_STATE,Oa);r.jwAddEventListener(d.JWPLAYER_MEDIA_ERROR,
-wb);r.jwAddEventListener(d.JWPLAYER_PLAYLIST_COMPLETE,cb);r.jwAddEventListener(d.JWPLAYER_PLAYLIST_ITEM,vb);r.jwAddEventListener(d.JWPLAYER_CAST_AVAILABLE,function(a){a.available?(fa.forceControls(h),D=h):fa.releaseControls()});r.jwAddEventListener(d.JWPLAYER_CAST_SESSION,function(a){da||(da=new l.html5.castDisplay(r.id),da.statusDelegate=function(a){da.setState(a.newstate)});a.active?(b.style(ta.element(),{display:"none"}),fa.forceControls(h),da.setState("connecting").setName(a.deviceName).show(),
-r.jwAddEventListener(d.JWPLAYER_PLAYER_STATE,da.statusDelegate),r.jwAddEventListener(d.JWPLAYER_CAST_AD_CHANGED,Y)):(r.jwRemoveEventListener(d.JWPLAYER_PLAYER_STATE,da.statusDelegate),r.jwRemoveEventListener(d.JWPLAYER_CAST_AD_CHANGED,Y),da.hide(),M.adMode()&&O(),b.style(ta.element(),{display:null}),Oa({newstate:r.jwGetState()}),K())});Oa({newstate:k.IDLE});f||(pa.addEventListener("mouseout",x,m),pa.addEventListener("mousemove",J,m),a.isMSIE()&&(ba.addEventListener("mousemove",J,m),ba.addEventListener("click",
-Q.clickHandler)));u(M);u(ea);u(Z);b("#"+N.id+"."+v+" .jwaspect",{"margin-top":j.aspectratio,display:F});e=a.exists(j.aspectratio)?parseFloat(j.aspectratio):100;s=j.playlistsize;b("#"+N.id+".playlist-right .jwaspect",{"margin-bottom":-1*s*(e/100)+"px"});b("#"+N.id+".playlist-right .jwplaylistcontainer",{width:s+"px",right:0,top:0,height:"100%"});b("#"+N.id+".playlist-bottom .jwaspect",{"padding-bottom":s+"px"});b("#"+N.id+".playlist-bottom .jwplaylistcontainer",{width:"100%",height:s+"px",bottom:0});
-b("#"+N.id+".playlist-right .jwmain",{right:s+"px"});b("#"+N.id+".playlist-bottom .jwmain",{bottom:s+"px"});setTimeout(function(){T(j.width,j.height)},0)}};var Ja=this.fullscreen=function(b){a.exists(b)||(b=!j.fullscreen);b=!!b;b!==j.fullscreen&&(Ga?(b?ga.apply(N):La.apply(q),ka(N,b)):j.getVideo().setFullScreen(b))};this.resize=function(a,b){T(a,b,h);K()};this.resizeMedia=$;var pb=this.completeSetup=function(){b.style(N,{opacity:1});g.onbeforeunload=function(){j.getVideo().isCaster||r.jwStop()}},
-Xa;this.setupInstream=function(a,c,d,e){b.unblock();Wa(Va("jwinstream"),h);Wa(Va("jwcontrols"),m);va.appendChild(a);Ya=c;Sa=d;Fa=e;Oa({newstate:k.PLAYING});V=h;va.addEventListener("mousemove",J);va.addEventListener("mouseout",x)};this.destroyInstream=function(){b.unblock();Wa(Va("jwinstream"),m);Wa(Va("jwcontrols"),h);va.innerHTML="";va.removeEventListener("mousemove",J);va.removeEventListener("mouseout",x);V=m};this.setupError=function(a){G=h;l.embed.errorScreen(N,a,j);pb()};this.addButton=function(a,
-b,c,d){ea&&(ea.addButton(a,b,c,d),r.jwGetState()==k.IDLE&&Ba())};this.removeButton=function(a){ea&&ea.removeButton(a)};this.setControls=function(a){var b=j.controls,c=!!a;j.controls=c;c!=b&&(V?a?(Ya.show(),Sa.show()):(Ya.hide(),Sa.hide()):c?Oa({newstate:r.jwGetState()}):(Aa(),Q&&Q.hide()),fa.sendEvent(d.JWPLAYER_CONTROLS,{controls:c}))};this.forceControls=function(a){P=!!a;a?Da():Aa()};this.releaseControls=function(){P=null;Na(r.jwGetState())};this.addCues=function(a){M&&M.addCues(a)};this.forceState=
-function(a){Q.forceState(a)};this.releaseState=function(){Q.releaseState(r.jwGetState())};this.getSafeRegion=function(){var a={x:0,y:0,width:0,height:0};if(!j.controls)return a;M.showTemp();ea.showTemp();var b=e(ya),c=b.top,d=V?e(q.getElementById(r.id+"_instream_controlbar")):e(M.element()),f=V?m:0<ea.numButtons(),g=0===Z.position().indexOf("top"),h=e(Z.element());f&&(f=e(ea.element()),a.y=Math.max(0,f.bottom-c));g&&(a.y=Math.max(a.y,h.bottom-c));a.width=b.width;a.height=d.height?(g?d.top:h.top)-
-c-a.y:b.height-a.y;M.hideTemp();ea.hideTemp();return a};this.destroy=function(){g.removeEventListener("resize",K);g.removeEventListener("orientationchange",K);for(var a=t.length;a--;)q.removeEventListener(t[a],ha,m);j.removeEventListener("fullscreenchange",ha);N.removeEventListener("keydown",I,m);qa&&qa.destroy();da&&(r.jwRemoveEventListener(d.JWPLAYER_PLAYER_STATE,da.statusDelegate),da.destroy(),da=null);pa&&(pa.removeEventListener("mousemove",J),pa.removeEventListener("mouseout",x));ba&&(ba.removeEventListener("mousemove",
-J),ba.removeEventListener("click",Q.clickHandler));V&&this.destroyInstream()};N=z("div","jwplayer playlist-"+j.playlistposition);N.id=r.id;N.tabIndex=0;N.onmousedown=function(){Ma=!0;fa.sendEvent(d.JWPLAYER_VIEW_TAB_FOCUS,{hasFocus:!1})};N.onfocusin=L;N.addEventListener("focus",L);N.onfocusout=B;N.addEventListener("blur",B);N.addEventListener("keydown",I);ga=N.requestFullscreen||N.requestFullScreen||N.webkitRequestFullscreen||N.webkitRequestFullScreen||N.webkitEnterFullscreen||N.webkitEnterFullScreen||
-N.mozRequestFullScreen||N.msRequestFullscreen;La=q.exitFullscreen||q.cancelFullScreen||q.webkitExitFullscreen||q.webkitCancelFullScreen||q.mozCancelFullScreen||q.msExitFullscreen;Ga=ga&&La;j.aspectratio&&(b.style(N,{display:"inline-block"}),N.className=N.className.replace("jwplayer","jwplayer "+v));T(j.width,j.height);var ja=q.getElementById(r.id);ja.parentNode.replaceChild(N,ja)};b(".jwplayer",{position:"relative",display:"block",opacity:0,"min-height":0,"-webkit-transition":"opacity .25s ease",
-"-moz-transition":"opacity .25s ease","-o-transition":"opacity .25s ease"});b(".jwmain",{position:"absolute",left:0,right:0,top:0,bottom:0,"-webkit-transition":"opacity .25s ease","-moz-transition":"opacity .25s ease","-o-transition":"opacity .25s ease"});b(".jwvideo, .jwcontrols",{position:"absolute",height:"100%",width:"100%","-webkit-transition":"opacity .25s ease","-moz-transition":"opacity .25s ease","-o-transition":"opacity .25s ease"});b(".jwvideo",{overflow:w,visibility:w,opacity:0});b(".jwvideo video",
-{background:"transparent",height:"100%",width:"100%",position:"absolute",margin:"auto",right:0,left:0,top:0,bottom:0});b(".jwplaylistcontainer",{position:"absolute",height:"100%",width:"100%",display:n});b(".jwinstream",{position:"absolute",top:0,left:0,bottom:0,right:0,display:"none"});b(".jwaspect",{display:"none"});b(".jwplayer."+v,{height:"auto"});b(".jwplayer.jwfullscreen",{width:"100%",height:"100%",left:0,right:0,top:0,bottom:0,"z-index":1E3,margin:0,position:"fixed"},h);b(".jwplayer.jwfullscreen.jw-user-inactive",
-{cursor:"none","-webkit-cursor-visibility":"auto-hide"});b(".jwplayer.jwfullscreen .jwmain",{left:0,right:0,top:0,bottom:0},h);b(".jwplayer.jwfullscreen .jwplaylistcontainer",{display:n},h);b(".jwplayer .jwuniform",{"background-size":"contain !important"});b(".jwplayer .jwfill",{"background-size":"cover !important","background-position":"center"});b(".jwplayer .jwexactfit",{"background-size":"100% 100% !important"})})(window);
-(function(g,l){function c(a){return"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA"+q[a]}function a(a,b){var c=l.createElement(a);b&&d(c,b);return c}function d(a,b){b.join||(b=[b]);for(var c=0;c<b.length;c++)b[c]&&(b[c]="jwcast-"+b[c]);a.className=b.join(" ")}function k(a,b){b.join||(b=[b]);for(var c=0;c<b.length;c++)a.appendChild(b[c])}var b=g.utils,e=g.html5,f=g.events,C=f.state,p=b.css,q={wheel:"DgAAAA4CAYAAACohjseAAACiUlEQVR42u3aP2sTYRzAcZ87Md6mhE5GhRqli0NC22yNKO1iaStSY+ggdKggal6BDXRoUuwbEG1LpE4B30LAxEGbKYgO7SVoUhJD04hOusRv4ZlCwP5LevfDgw9kCnzD5Z4/95xqtVqideNLTQzjKV4gCxtNtNwaqBBGCg3UkcYz3EUIV+F1W6AHj7CFb1hAEIbbb1GFByjjAyZgSvkPXkMGW7gt7SETwQ8swpL0FFV4jjpuShsmTiOFz7gobRxUWEceXokDfQKf0CdxJhNFFT6JU7Ur2MUtiXNRhXdYlDrZnkERZyUGerCNcanLpYfISV0PGtjEpNTAGyjBkBq4ggWpWxYmGghIDRzEDgypgTG8lbyrtoZ5yYFZ3JccWMKg5MCfGJAcuHf5/ge6xwX8lnyLDmCn/SEzJChwCKX2YSIqKDCKbPtAHxcUGAdNOhBPkBYUmAZNOhDXUYMSEKdQBU06EAp1BAUEBnWLgg4EXmJJQOASXnVa0YdRcfma0NAN4U6BCpu44+LASd2g0BYIPEbexYHvdQOfOwdaqLh063AcFVj73bq3XBRnoYiZ/b58ySDposAkMlD/DNT8aGLUBXGjaMJ/0Beg9/Dd4etEH2qIHOUVdgHnHRh3DgUkjnoIIYUNh0V6sYHXUIcO1Eyso4BLDoi7jC94A/O4DgIZWEYdYycYN4YalmF04yjXNJpIwOrxOJdAE9PdPoznRxZFTPUgbgI2svD38jjlLMrI61DjmFcFU/iICmZhnMSB2DOYg41tJBGAOuSPFkASZdiYg8cpR5pHsIIGqkgjjghC6Eef1o8QIphHGlU0sIYRGE4/lB7DKnL4il/Yu/5gFzZyWEUMwzC7sXUv2l9q1CPRZSGkLwAAAABJRU5ErkJggg\x3d\x3d",
-display:"UAAAAC4AQMAAACo6KcpAAAABlBMVEV6enp6enqEWMsmAAAAAXRSTlMAQObYZgAAAEdJREFUeF7t2bEJACAMRcGAg7j/Fo6VTkvbIKSRe/XBH+DHLlaHK0qN7yAIgiAIgiAIgiAIgiAIgiAIgiAIgg0PZHfzbuUjPCPnO5qQcE/AAAAAAElFTkSuQmCC",pause:"CoAAAA2CAQAAAAb3sMwAAAAMElEQVR4Ae3MMQEAMAzDsIY/6AxB9/aRfyvt7GX2Ph8UCoVCoVAo9AiFQqFQKBQKfdYvoctOjDeGAAAAAElFTkSuQmCC",play:"DYAAAA2BAMAAAB+a3fuAAAAFVBMVEX///////////////////////////9nSIHRAAAABnRSTlMAP79AwMFfxd6iAAAAX0lEQVR4Xn3JQQGAABAEoaliFiPYYftHMMHBl55uQw455JBDDjnkkEMOOeSQQw455JBDDjnkkEMOOeSQQ+5O3HffW6hQoUKFChUqVKhQoUKFChUqVKhQoUKFChUqVKgfWHsiYI6VycIAAAAASUVORK5CYII\x3d",
-replay:"DQAAAA8CAYAAAApK5mGAAADkklEQVRoBd3BW2iVBRwA8P/cWHMsv9QilLCITLCU0khpST6JCEXrQbKMCgrKFwsfZq/LMnRRIdkFvBQUvmShgg9iV02zB7FScyWlqNHNqbCJ7PKLkFHp952dnZ3tfOv3ixgGSLAVt8b/ARIX9WADJsVIhsR/daIV42MkQiJdO5ZjdIwkSBR2Ek+gJkYCJIpzEE2Rd0gMzB7MibxCojRbcEtUGsZgJu7HYixVuh6sx6QYLrgSD+Fd/GhodKIV42Ko4B68h07Dpx3NGB3lgnnYpbJOYFoMBm7ANpW3D3NjMPAgzqqsn7EIVVEqVGOtymrHMtTGYKAeWxSvB3vxIh7ANIzFNUpzAa0YF4OFWuxUnFNYjkmRAomB6cX7uDHKAdX4QP/asRRXRAFIFO8TzI5yQov+bcO1UQQk+ncITVFumIce2XqxHFVRJCSy/YolqIlyQwOOy9aNR2KAkLhcJ1agIYYKVsvWi6eiBEj8owfrMDEGAVVYiMcjDa7HBdlejhIhcdF2TI9BQiP2uOgsro5LYa1sX6M2SoQ6zItBwmRsdrnn498wDuel68aMqDBMQZd0v6Mu+mCJbBsiJ7BdtkXRB7ul68HNkRNolO3D+BvGoke6HZEz+Fa6c6gJNMn2WOQMmmW7K/CSbBMiZ3CbbM8EPpKuLXIIo3BWujcCh6TbEjmFr6TbGfhDulcip7BJugOBbulaIqfwlnRHQ7bnIqewVrpjgU7pVkZOYaN0hwOnpFsfOYWt0u0LfCnd55FT+EG6zYEN0p1BdeQMEnRLtzKwTLZZkTO4V7bFgTtka4mcwTrZrgtU47R0P6E6cgINOCfdkeiDjbItipzAs7K1Rh/Mle0gaqLC0IBTsk2PPhiFI7ItiwrDKtl2xaXwqGwdmBoVgrvRJdv8uBRq0CbbISQxzDARJ2TbG1kwX2GfoT6GCa7CN7J1Y0YUgk0K+wJjY4hhAg4o7LXoD8bjuMIOY1oMETTiuMIOoj6KgTvRobDzaEZtlAnq8QK6FHYGU2IgcB+69e97LEJNlAh1eBrH9K8DjVEKPIxuxTmJVZiFmugHajEHa/Cb4nRiQQwGmtBpYM7hU7yNFjSjGSuwDrvRYWD+RGOUA25Hm8rZj8lRThiDd9Br+PTgVdTFUMFcfGfo7cHMGA4YhYXYr/x2YQGqohIwG2vwi9Idw2pMjzzBVCzBm/gYR3EaXbiA02jDDryOJ3FTlNFfAO8ENqnn13UAAAAASUVORK5CYII\x3d"},
-v=!1,t=316/176;e.castDisplay=function(h){function q(){if(z){var a=z.element();a.parentNode&&a.parentNode.removeChild(a);z.resetEventListeners();z=null}}function w(){J&&(J.parentNode&&J.parentNode.removeChild(J),J=null)}function n(){H&&(H.parentNode&&H.parentNode.removeChild(H),H=null)}v||(p(".jwplayer .jwcast-display",{display:"none",position:"absolute",width:"100%",height:"100%","background-repeat":"no-repeat","background-size":"auto","background-position":"50% 50%","background-image":c("display")}),
-p(".jwplayer .jwcast-label",{position:"absolute",left:10,right:10,bottom:"50%","margin-bottom":100,"text-align":"center"}),p(".jwplayer .jwcast-label span",{"font-family":'"Karbon", "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "Lucida Grande", sans-serif',"font-size":20,"font-weight":300,color:"#7a7a7a"}),p(".jwplayer span.jwcast-name",{color:"#ccc"}),p(".jwcast-button",{position:"absolute",width:"100%",height:"100%",opacity:0,"background-repeat":"no-repeat",
-"background-size":"auto","background-position":"50% 50%"}),p(".jwcast-wheel",{"background-image":c("wheel")}),p(".jwcast-pause",{"background-image":c("pause")}),p(".jwcast-play",{"background-image":c("play")}),p(".jwcast-replay",{"background-image":c("replay")}),p(".jwcast-paused .jwcast-play",{opacity:1}),p(".jwcast-playing .jwcast-pause",{opacity:1}),p(".jwcast-idle .jwcast-replay",{opacity:1}),b.cssKeyframes("spin","from {transform: rotate(0deg);} to {transform: rotate(360deg);}"),p(".jwcast-connecting .jwcast-wheel, .jwcast-buffering .jwcast-wheel",
-{opacity:1,"-webkit-animation":"spin 1.5s linear infinite",animation:"spin 1.5s linear infinite"}),p(".jwcast-companion",{position:"absolute","background-position":"50% 50%","background-size":"316px 176px","background-repeat":"no-repeat",top:0,left:0,right:0,bottom:4}),p(".jwplayer .jwcast-click-label",{"font-family":'"Karbon", "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "Lucida Grande", sans-serif',"font-size":14,"font-weight":300,"text-align":"center",position:"absolute",
-left:10,right:10,top:"50%",color:"#ccc","margin-top":100,"-webkit-user-select":"none","user-select":"none",cursor:"pointer"}),p(".jwcast-paused .jwcast-click-label",{color:"#7a7a7a",cursor:"default"}),v=!0);var F=l.getElementById(h+"_display_button"),r=a("div","display"),j=a("div",["pause","button"]),y=a("div",["play","button"]),s=a("div",["replay","button"]),I=a("div",["wheel","button"]),L=a("div","label"),B=a("span"),K=a("span","name"),u="#"+h+"_display.jwdisplay",A=-1,x=null,z=null,H=null,J=null;
-k(r,[I,j,y,s,L]);k(L,[B,K]);F.parentNode.insertBefore(r,F);this.statusDelegate=null;this.setName=function(a){K.innerText=a||"Google Cast";return this};this.setState=function(a){var b="Casting on ";if(null===x)if("connecting"===a)b="Connecting to ";else if(a!==C.IDLE){var c=g(h).getPlaylistItem().title||"";c&&(b=b.replace("on",c+" on"))}B.innerText=b;clearTimeout(A);a===C.IDLE&&(A=setTimeout(function(){d(r,["display","idle"])},3E3),a="");d(r,["display",(a||"").toLowerCase()]);return this};this.show=
-function(){p(u+" .jwpreview",{"background-size":"316px 176px !important",opacity:0.6,"margin-top":-2});p(u+" .jwdisplayIcon",{display:"none !important"});p.style(r,{display:"block"});return this};this.hide=function(){b.clearCss(u+" .jwpreview");p(u+" .jwdisplayIcon",{display:""});p.style(r,{display:"none"});return this};this.setSkipoffset=function(a,c){if(null===z){var d=l.getElementById(h+"_controlbar"),g=10+b.bounds(r).bottom-b.bounds(d).top;z=new e.adskipbutton(h,g|0,a.skipMessage,a.skipText);
-z.addEventListener(f.JWPLAYER_AD_SKIPPED,function(){c(a)});z.reset(a.skipoffset||-1);z.show();d.parentNode.insertBefore(z.element(),d)}else z.reset(a.skipoffset||-1)};this.setCompanions=function(b){var c,d,e,f=Number.MAX_VALUE,g=null;for(d=b.length;d--;)if(c=b[d],c.width&&c.height&&c.source)switch(c.type){case "html":case "iframe":case "application/x-shockwave-flash":break;default:e=Math.abs(c.width/c.height-t),e<f&&(f=e,0.75>e&&(g=c))}(b=g)?(null===H&&(H=a("div","companion"),k(r,H)),b.width/b.height>
-t?(c=316,d=b.height*c/b.width):(d=176,c=b.width*d/b.height),p.style(H,{"background-image":b.source,"background-size":c+"px "+d+"px"})):n()};this.adChanged=function(b){if(b.complete)z&&z.reset(-1),x=null;else{z&&(void 0===b.skipoffset?q():(b.position||b.duration)&&z.updateSkipTime(b.position|0,b.duration|0));var c=b.tag+b.sequence;c!==x&&(p(u+" .jwpreview",{opacity:0}),b.companions?this.setCompanions(b.companions):n(),b.clickthrough?null===J&&(J=a("div","click-label"),J.innerText="Click here to learn more \x3e",
-k(r,J)):w(),x=c,this.setState(b.newstate))}};this.adsEnded=function(){q();n();w();p(u+" .jwpreview",{opacity:0.6});x=null};this.destroy=function(){this.hide();r.parentNode&&r.parentNode.removeChild(r)}};var h=".jwcast button";p(h,{opacity:1});p(h+":hover",{opacity:0.75});h+=".off";p(h,{opacity:0.75});p(h+":hover",{opacity:1})})(jwplayer,document);
-(function(g){var l=jwplayer.utils.extend,c=g.logo;c.defaults.prefix="";c.defaults.file="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHoAAAAyCAMAAACkjD/XAAACnVBMVEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJCQkSEhIAAAAaGhoAAAAiIiIrKysAAAAxMTEAAAA4ODg+Pj4AAABEREQAAABJSUkAAABOTk5TU1NXV1dcXFxiYmJmZmZqamptbW1xcXF0dHR3d3d9fX2AgICHh4eKioqMjIyOjo6QkJCSkpKUlJSWlpaYmJidnZ2enp6ioqKjo6OlpaWmpqanp6epqamqqqqurq6vr6+wsLCxsbG0tLS1tbW2tra3t7e6urq7u7u8vLy9vb2+vr6/v7/AwMDCwsLFxcXFxcXHx8fIyMjJycnKysrNzc3Ozs7Ozs7Pz8/Pz8/Q0NDR0dHR0dHS0tLU1NTV1dXW1tbW1tbW1tbX19fX19fa2trb29vb29vc3Nzc3Nzf39/f39/f39/f39/g4ODh4eHj4+Pj4+Pk5OTk5OTk5OTk5OTl5eXn5+fn5+fn5+fn5+fn5+fo6Ojo6Ojq6urq6urq6urr6+vr6+vr6+vt7e3t7e3t7e3t7e3u7u7u7u7v7+/v7+/w8PDw8PDw8PDw8PDy8vLy8vLy8vLy8vLy8vLy8vLy8vLy8vL09PT09PT09PT09PT09PT09PT09PT29vb29vb29vb29vb29vb29vb29vb29vb39/f39/f39/f39/f39/f4+Pj4+Pj4+Pj5+fn5+fn5+fn5+fn5+fn5+fn5+fn6+vr6+vr6+vr6+vr6+vr6+vr8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz9/f39/f39/f39/f39/f39/f39/f39/f39/f3+/v7+/v7+/v7+/v7+/v7+/v7+/v7+/v7///////////////9kpi5JAAAA33RSTlMAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhYWFxcYGBgZGRoaGhsbHBwdHR4eHx8gISIiIyQmJicoKSoqKywtLi4uMDEyMjM0NTU2Njc5Ojo7Ozw9Pj5AQUJCQ0ZGSElKSktMTU5PUFFRUlRVVlZXWFpbXV5eX2BhYmVmZ2hpamtsbW5vcHFyc3R2d3h5enx9fn+BgoKDhIWGiYmKi4yNjo+QkZKTlJWWl5eYmZqbnJ2enp+goaKkpaamp6ipqqusra6vsLKzs7W2t7i5uru8vb6/wMHCwsPExcbHyMnJysvMVK8y+QAAB5FJREFUeNrFmP2f3EQdx8kmm2yy2WQzmZkjl3bJ2Rb12mtp8SiKiBUUxVKFVisIihV62CKCIoK0UvVK1bP07mitBeVJUVso0Duw1Xo9ET0f6JN47bV3u9+/xe83kyzr0+vlL7t8Xq9ubpLpvHfm+7i54P+UVkBp2gWdFpGNYtFA+NtALpYcxzZ1rSM0TSvgv5xse0wwu1joxDYLulE0dKTTSLcqfOvMQ1WzoHXAtCadsGXqBCsUnWDxNBzmlq51wLSuz0LmOcTWClZFfA1ghLUbrUwbdq396kAvK5s6HoFdlb8FuLONB66RlGnD5S8BwKkNoVMsFEw3XIOj97hmoX2updP5kml7jgLp/Ec8yzBKntwDMCnwa7TPtUrkWLrliW2gtC+0TdNhvdMAu1hJ19plYNcP0LGKiJp/HJTeEI5V8sjJ4PZ2mTp1rb7Pf5C5JbvCN0Cuha7jpE5WX9oeU6us8YlTUH8grFQC+QzkWuKVvdTJXuWO0Z5Nk2tNkWNdzgLed+4tdNWrkpPBI20ytVYwK+LrQLpPcHk3vIVm1ZCcDD7jt8fUGmYNoeLpJzKW+1vQYSjJyc72ZKbWSOqqhpn+99r/rn99WDDLbJViHZbJirkWtJDkZPArbhta2jFg7LdKV1ID9aWaz5CTzTD0pvB2aypB9xYPKtaUXEC7bKKjeA1dHyJTU+xbFgY/RiAKP2lYsm28RaJmAtfTs6c4xP9g0gycUqKpeDGLegZPl3MqTL6oWCdl9EIrOol20/U6zyzgVJzpeV6l7Dhl18VP1/N8v1r1vQoNSziH1nPKKMdBChbAiprheygfL65tZmxazguYXDoL8BcyqlhRb0W/M3Wy412YRTUd7SKEFIKzIBQ8DBhHewgSjkLB7GwS54wxwcoORqYQ+QyhFGA9VIYxnfCKq2VtE3k3wTB1taLx+FVCNTRyxnU4YQ/8WEY9M7PvkvJHsEsAam5srRRwH0YBhml14Zv7pRz62+LAD/jWE0vHINU6OUGXyc0Mt5GiLW/+6blV8eO4tY8B6t3qvBsZOnUy+HJgFaiuMELfhQ6RrAe4JZGvwxcFPLx69YZDZ1ciOrB03ayEd52vr0x6/zokhbxs+p5o7Oc3kfrkxFOrV392d+NWFaeaXvK652Cw+xTAo9cS5ar0vKcfy9BrgNRfMVN0SOh+gPfWtgN8L7kM6pcI2FSrJUtm7kc0KxlF2xcHd/1xWxxvmv1QLB9/5cJobDiKIxklcmI4ShJ5eJ/qOTSqU6/BBC4JN6boQSAN71Doi1Mnm+B0Rjlavgabo/GZ2V/LL8FRSehkkfzzYIouoqXf31jz3de7kq5DB6JP1a+vSUQnOXrRoujpn2XogumJpwCeBfhDV4qeAdK1QwqdOhkMqdAyyyk6HoHR3tmD4/UlI/DDBNFxHK1tDBDaNrHODU7KDzTW16Lr6nccHZGxHNt3Jao/RrSU8pPTeX+JPYj4NpAGkxsg16FoWP1xP5Bu8UwdYxSXJXRyJ0zeCtsegdsm4QsLBBwcHf3l+fF5hHbscnDh1LeSaGwvModnTl7ChVRuNiblxIkjR6bq+9+R9RzkO7cBadWCdZBroDaq/jgDqHMLMYtSr8jkpwl9aaOxF9bdDHsb9T5Ev/rkk6N398SIDj3X5zfDzi1bDpxdHNWWwcOchS27funeR+EOyTI0RcyKLIM20VPzyOObeh4LJsZ/hYnaRpgRsTwG9TPzLz5XhyOSDlzykDEKLsEYl08cG0W9eW+U4B1eZZmtY7J13PXCeHeg0MrPjlH8yLiJ/mYtfqIFvQVNTaez/cMrfwHHpJC7APZH0csAP5ARokPPwXyIoEjKaOnM7UIIOfKKrJEJvEAguhZHUY1sHb3vH1tCxyS0OvGtAL+/iMubQOlMXyKfA6U8i+I0PqWyecA3AmyVEmPhczxEdBUbOKwCsHsAtfNUDyZNdiNcLQld8cTYgQHScjExjNPvOf9RSsrZtt3uB3f2s0Dku35MyiY6z6LYjbMdx+HvO7pd11/egBtCvh7mFvs+P70Rl8L0yU8r7WROyXb5b77Dxemv+I7L82wmxoeY53U9+/K8HE1ZvBq4eGQfh1SNa0Keo5tZVCXwXs7KluUwIZjrMsrHTsB95f4B50JwztGURtHywsBjvGphtIUiFeb9Kn4pjzHXUOhmlXPI3Ug/5QH6BjS1uWpRRdLNku3YWPNw4RKVSSqfpKLq3k3bIZXMvFha+NjQqXqlhYxKa9EgFJGVqKCrqD2ZloJrql7Qgq4vw9DKfn0ahp73B+ln3hPQY/xKJEO1CC2P6T49UOP/fD+R5qphSBvAslttQb8YZr1os7/5ry0P8VDNoZK6T8pnZpdW4bb9ZWPQ2NPtlhxf/A5yPUApt+0/MP2uqy5nLkaKLyZycuOKCp13u9mWXXasol4staAPYyprN1p5CvkR1nD5pxz9jQDPu1Pvbii3yklQmr2U/LtDUr9Fngelp0NqwDsmirPtoLRWJdxOiQrp9Yr8XGiTk3XyxF2eFuw3+ju5aRJl1Yu+f+LMM1eiexc6/lK0QuWpYhkd3XT+UsfOXhd2WKpO6W/TO3BUO8H/BB7RwuB6W7b7AAAAAElFTkSuQmCC";g.logo=
-function(a,d){"free"==a.edition()?d=null:(c.defaults.file="",c.defaults.prefix="");l(this,new c(a,d))}})(jwplayer.html5);(function(g){var l=g.html5,c=l.model;l.model=function(a,d){var k=new g.utils.key(a.key),b=new c(a,d),e=b.componentConfig;b.edition=function(){return k.edition()};b.componentConfig=function(a){return"logo"==a?b.logo:e(a)};return b}})(jwplayer);
-(function(g){var l=g.html5,c=l.player;l.player=function(a){a=new c(a);var d;d=a._model.edition();if("enterprise"===d||"ads"===d)d=new g.cast.controller(a,a._model),a.jwStartCasting=d.startCasting,a.jwStopCasting=d.stopCasting;return a};c.prototype.edition=function(){return this._model.edition()}})(jwplayer);
-(function(g){var l=jwplayer.utils.extend,c=g.rightclick;g.rightclick=function(a,d){if("free"==a.edition())d.aboutlink="http://www.longtailvideo.com/jwpabout/?a\x3dr\x26v\x3d"+g.version+"\x26m\x3dh\x26e\x3df",delete d.abouttext;else{if(!d.aboutlink){var k="http://www.longtailvideo.com/jwpabout/?a\x3dr\x26v\x3d"+g.version+"\x26m\x3dh\x26e\x3d",b=a.edition();d.aboutlink=k+("pro"==b?"p":"premium"==b?"r":"enterprise"==b?"e":"ads"==b?"a":"f")}d.abouttext?d.abouttext+=" ...":(k=a.edition(),k=k.charAt(0).toUpperCase()+
-k.substr(1),d.abouttext="About JW Player "+g.version+" ("+k+" edition)")}l(this,new c(a,d))}})(jwplayer.html5);(function(g){var l=g.view;g.view=function(c,a){var d=new l(c,a),g=d.setup,b=a.edition();d.setup=function(a){g(a)};"invalid"==b&&d.setupError("Error setting up player: Invalid license key");return d}})(window.jwplayer.html5);
\ No newline at end of file
diff --git a/gui/slick/js/jwplayer/jwplayer.js b/gui/slick/js/jwplayer/jwplayer.js
deleted file mode 100644
index 9b4fd93ef7b8f7561b57a5cf54a85c5f2c6bc40f..0000000000000000000000000000000000000000
--- a/gui/slick/js/jwplayer/jwplayer.js
+++ /dev/null
@@ -1,128 +0,0 @@
-"undefined"==typeof jwplayer&&(jwplayer=function(h){if(jwplayer.api)return jwplayer.api.selectPlayer(h)},jwplayer.version="6.9.4867",jwplayer.vid=document.createElement("video"),jwplayer.audio=document.createElement("audio"),jwplayer.source=document.createElement("source"),function(h){function d(c){return function(){return a(c)}}function m(c,a,k,l,b){return function(){var d,f;if(b)k(c);else{try{if(d=c.responseXML)if(f=d.firstChild,d.lastChild&&"parsererror"===d.lastChild.nodeName){l&&l("Invalid XML",
-a,c);return}}catch(g){}if(d&&f)return k(c);(d=e.parseXML(c.responseText))&&d.firstChild?(c=e.extend({},c,{responseXML:d}),k(c)):l&&l(c.responseText?"Invalid XML":a,a,c)}}}var j=document,g=window,b=navigator,e=h.utils={};e.exists=function(c){switch(typeof c){case "string":return 0<c.length;case "object":return null!==c;case "undefined":return!1}return!0};e.styleDimension=function(c){return c+(0<c.toString().indexOf("%")?"":"px")};e.getAbsolutePath=function(c,a){e.exists(a)||(a=j.location.href);if(e.exists(c)){var k;
-if(e.exists(c)){k=c.indexOf("://");var l=c.indexOf("?");k=0<k&&(0>l||l>k)}else k=void 0;if(k)return c;k=a.substring(0,a.indexOf("://")+3);var l=a.substring(k.length,a.indexOf("/",k.length+1)),b;0===c.indexOf("/")?b=c.split("/"):(b=a.split("?")[0],b=b.substring(k.length+l.length+1,b.lastIndexOf("/")),b=b.split("/").concat(c.split("/")));for(var d=[],f=0;f<b.length;f++)b[f]&&(e.exists(b[f])&&"."!=b[f])&&(".."==b[f]?d.pop():d.push(b[f]));return k+l+"/"+d.join("/")}};e.extend=function(){var c=Array.prototype.slice.call(arguments,
-0);if(1<c.length){for(var a=c[0],k=function(c,l){void 0!==l&&null!==l&&(a[c]=l)},l=1;l<c.length;l++)e.foreach(c[l],k);return a}return null};var p=window.console=window.console||{log:function(){}};e.log=function(){var c=Array.prototype.slice.call(arguments,0);"object"===typeof p.log?p.log(c):p.log.apply(p,c)};var a=e.userAgentMatch=function(c){return null!==b.userAgent.toLowerCase().match(c)};e.isFF=d(/firefox/i);e.isChrome=d(/chrome/i);e.isIPod=d(/iP(hone|od)/i);e.isIPad=d(/iPad/i);e.isSafari602=
-d(/Macintosh.*Mac OS X 10_8.*6\.0\.\d* Safari/i);e.isIETrident=function(c){return c?(c=parseFloat(c).toFixed(1),a(RegExp("trident/.+rv:\\s*"+c,"i"))):a(/trident/i)};e.isMSIE=function(c){return c?(c=parseFloat(c).toFixed(1),a(RegExp("msie\\s*"+c,"i"))):a(/msie/i)};e.isIE=function(c){return c?(c=parseFloat(c).toFixed(1),11<=c?e.isIETrident(c):e.isMSIE(c)):e.isMSIE()||e.isIETrident()};e.isSafari=function(){return a(/safari/i)&&!a(/chrome/i)&&!a(/chromium/i)&&!a(/android/i)};e.isIOS=function(c){return c?
-a(RegExp("iP(hone|ad|od).+\\sOS\\s"+c,"i")):a(/iP(hone|ad|od)/i)};e.isAndroidNative=function(c){return e.isAndroid(c,!0)};e.isAndroid=function(c,b){return b&&a(/chrome\/[123456789]/i)&&!a(/chrome\/18/)?!1:c?(e.isInt(c)&&!/\./.test(c)&&(c=""+c+"."),a(RegExp("Android\\s*"+c,"i"))):a(/Android/i)};e.isMobile=function(){return e.isIOS()||e.isAndroid()};e.saveCookie=function(c,a){j.cookie="jwplayer."+c+"\x3d"+a+"; path\x3d/"};e.getCookies=function(){for(var c={},a=j.cookie.split("; "),k=0;k<a.length;k++){var l=
-a[k].split("\x3d");0===l[0].indexOf("jwplayer.")&&(c[l[0].substring(9,l[0].length)]=l[1])}return c};e.isInt=function(c){return 0===c%1};e.typeOf=function(c){var a=typeof c;return"object"===a?!c?"null":c instanceof Array?"array":a:a};e.translateEventResponse=function(c,a){var k=e.extend({},a);if(c==h.events.JWPLAYER_FULLSCREEN&&!k.fullscreen)k.fullscreen="true"===k.message,delete k.message;else if("object"==typeof k.data){var l=k.data;delete k.data;k=e.extend(k,l)}else"object"==typeof k.metadata&&
-e.deepReplaceKeyName(k.metadata,["__dot__","__spc__","__dsh__","__default__"],["."," ","-","default"]);e.foreach(["position","duration","offset"],function(c,l){k[l]&&(k[l]=Math.round(1E3*k[l])/1E3)});return k};e.flashVersion=function(){if(e.isAndroid())return 0;var c=b.plugins,a;try{if("undefined"!==c&&(a=c["Shockwave Flash"]))return parseInt(a.description.replace(/\D+(\d+)\..*/,"$1"),10)}catch(k){}if("undefined"!=typeof g.ActiveXObject)try{if(a=new g.ActiveXObject("ShockwaveFlash.ShockwaveFlash"))return parseInt(a.GetVariable("$version").split(" ")[1].split(",")[0],
-10)}catch(l){}return 0};e.getScriptPath=function(c){for(var a=j.getElementsByTagName("script"),k=0;k<a.length;k++){var l=a[k].src;if(l&&0<=l.indexOf(c))return l.substr(0,l.indexOf(c))}return""};e.deepReplaceKeyName=function(c,a,k){switch(h.utils.typeOf(c)){case "array":for(var l=0;l<c.length;l++)c[l]=h.utils.deepReplaceKeyName(c[l],a,k);break;case "object":e.foreach(c,function(l,b){var e;if(a instanceof Array&&k instanceof Array){if(a.length!=k.length)return;e=a}else e=[a];for(var d=l,f=0;f<e.length;f++)d=
-d.replace(RegExp(a[f],"g"),k[f]);c[d]=h.utils.deepReplaceKeyName(b,a,k);l!=d&&delete c[l]})}return c};var f=e.pluginPathType={ABSOLUTE:0,RELATIVE:1,CDN:2};e.getPluginPathType=function(c){if("string"==typeof c){c=c.split("?")[0];var a=c.indexOf("://");if(0<a)return f.ABSOLUTE;var k=c.indexOf("/");c=e.extension(c);return 0>a&&0>k&&(!c||!isNaN(c))?f.CDN:f.RELATIVE}};e.getPluginName=function(c){return c.replace(/^(.*\/)?([^-]*)-?.*\.(swf|js)$/,"$2")};e.getPluginVersion=function(c){return c.replace(/[^-]*-?([^\.]*).*$/,
-"$1")};e.isYouTube=function(c){return/^(http|\/\/).*(youtube\.com|youtu\.be)\/.+/.test(c)};e.youTubeID=function(c){try{return/v[=\/]([^?&]*)|youtu\.be\/([^?]*)|^([\w-]*)$/i.exec(c).slice(1).join("").replace("?","")}catch(a){return""}};e.isRtmp=function(c,a){return 0===c.indexOf("rtmp")||"rtmp"==a};e.foreach=function(c,a){var k,l;for(k in c)"function"==e.typeOf(c.hasOwnProperty)?c.hasOwnProperty(k)&&(l=c[k],a(k,l)):(l=c[k],a(k,l))};e.isHTTPS=function(){return 0===g.location.href.indexOf("https")};
-e.repo=function(){var c="http://p.jwpcdn.com/"+h.version.split(/\W/).splice(0,2).join("/")+"/";try{e.isHTTPS()&&(c=c.replace("http://","https://ssl."))}catch(a){}return c};e.ajax=function(c,a,k,l){var b,d=!1;0<c.indexOf("#")&&(c=c.replace(/#.*$/,""));if(c&&0<=c.indexOf("://")&&c.split("/")[2]!=g.location.href.split("/")[2]&&e.exists(g.XDomainRequest))b=new g.XDomainRequest,b.onload=m(b,c,a,k,l),b.ontimeout=b.onprogress=function(){},b.timeout=5E3;else if(e.exists(g.XMLHttpRequest)){var f=b=new g.XMLHttpRequest,
-j=c;b.onreadystatechange=function(){if(4===f.readyState)switch(f.status){case 200:m(f,j,a,k,l)();break;case 404:k("File not found",j,f)}}}else return k&&k("",c,b),b;b.overrideMimeType&&b.overrideMimeType("text/xml");var p=c,h=b;b.onerror=function(){k("Error loading file",p,h)};try{b.open("GET",c,!0)}catch(F){d=!0}setTimeout(function(){if(d)k&&k(c,c,b);else try{b.send()}catch(a){k&&k(c,c,b)}},0);return b};e.parseXML=function(c){var a;try{if(g.DOMParser){if(a=(new g.DOMParser).parseFromString(c,"text/xml"),
-a.childNodes&&a.childNodes.length&&"parsererror"==a.childNodes[0].firstChild.nodeName)return}else a=new g.ActiveXObject("Microsoft.XMLDOM"),a.async="false",a.loadXML(c)}catch(b){return}return a};e.filterPlaylist=function(a,b,k){var l=[],d,f,g,j;for(d=0;d<a.length;d++)if(f=e.extend({},a[d]),f.sources=e.filterSources(f.sources,!1,k),0<f.sources.length){for(g=0;g<f.sources.length;g++)j=f.sources[g],j.label||(j.label=g.toString());l.push(f)}if(b&&0===l.length)for(d=0;d<a.length;d++)if(f=e.extend({},a[d]),
-f.sources=e.filterSources(f.sources,!0,k),0<f.sources.length){for(g=0;g<f.sources.length;g++)j=f.sources[g],j.label||(j.label=g.toString());l.push(f)}return l};e.between=function(a,b,k){return Math.max(Math.min(a,k),b)};e.filterSources=function(a,b,k){var l,d;if(a){d=[];for(var f=0;f<a.length;f++){var g=e.extend({},a[f]),j=g.file,p=g.type;j&&(g.file=j=e.trim(""+j),p||(p=e.extension(j),g.type=p=e.extensionmap.extType(p)),b?h.embed.flashCanPlay(j,p)&&(l||(l=p),p==l&&d.push(g)):h.embed.html5CanPlay(j,
-p,k)&&(l||(l=p),p==l&&d.push(g)))}}return d};e.canPlayHTML5=function(a){a=e.extensionmap.types[a];return!!a&&!!h.vid.canPlayType&&!!h.vid.canPlayType(a)};e.seconds=function(a){a=a.replace(",",".");var b=a.split(":"),k=0;"s"==a.slice(-1)?k=parseFloat(a):"m"==a.slice(-1)?k=60*parseFloat(a):"h"==a.slice(-1)?k=3600*parseFloat(a):1<b.length?(k=parseFloat(b[b.length-1]),k+=60*parseFloat(b[b.length-2]),3==b.length&&(k+=3600*parseFloat(b[b.length-3]))):k=parseFloat(a);return k};e.serialize=function(a){return null===
-a?null:"true"==a.toString().toLowerCase()?!0:"false"==a.toString().toLowerCase()?!1:isNaN(Number(a))||5<a.length||0===a.length?a:Number(a)};e.addClass=function(a,b){a.className=a.className+" "+b};e.removeClass=function(a,b){a.className=a.className.replace(RegExp(" *"+b,"g")," ")}}(jwplayer),function(h){function d(a){var b=document.createElement("style");a&&b.appendChild(document.createTextNode(a));b.type="text/css";document.getElementsByTagName("head")[0].appendChild(b);return b}function m(a,l,c){if(!b.exists(l))return"";
-c=c?" !important":"";return"string"===typeof l&&isNaN(l)?/png|gif|jpe?g/i.test(l)&&0>l.indexOf("url")?"url("+l+")":l+c:0===l||"z-index"===a||"opacity"===a?""+l+c:/color/i.test(a)?"#"+b.pad(l.toString(16).replace(/^0x/i,""),6)+c:Math.ceil(l)+"px"+c}function j(a,b){for(var c=0;c<a.length;c++){var e=a[c],d,f;if(void 0!==e&&null!==e)for(d in b){f=d;f=f.split("-");for(var g=1;g<f.length;g++)f[g]=f[g].charAt(0).toUpperCase()+f[g].slice(1);f=f.join("");e.style[f]!==b[d]&&(e.style[f]=b[d])}}}function g(b){var l=
-e[b].sheet,d,f,g;if(l){d=l.cssRules;f=c[b];g=b;var j=a[g];g+=" { ";for(var p in j)g+=p+": "+j[p]+"; ";g+="}";if(void 0!==f&&f<d.length&&d[f].selectorText===b){if(g===d[f].cssText)return;l.deleteRule(f)}else f=d.length,c[b]=f;try{l.insertRule(g,f)}catch(h){}}}var b=h.utils,e={},p,a={},f=null,c={};b.cssKeyframes=function(a,b){var c=e.keyframes;c||(c=d(),e.keyframes=c);var c=c.sheet,f="@keyframes "+a+" { "+b+" }";try{c.insertRule(f,c.cssRules.length)}catch(g){}f=f.replace(/(keyframes|transform)/g,"-webkit-$1");
-try{c.insertRule(f,c.cssRules.length)}catch(j){}};var n=b.css=function(b,c,j){a[b]||(a[b]={});var h=a[b];j=j||!1;var u=!1,n,q;for(n in c)q=m(n,c[n],j),""!==q?q!==h[n]&&(h[n]=q,u=!0):void 0!==h[n]&&(delete h[n],u=!0);if(u){if(!e[b]){c=p&&p.sheet&&p.sheet.cssRules&&p.sheet.cssRules.length||0;if(!p||5E4<c)p=d();e[b]=p}null!==f?f.styleSheets[b]=a[b]:g(b)}};n.style=function(a,b,c){if(!(void 0===a||null===a)){void 0===a.length&&(a=[a]);var d={},e;for(e in b)d[e]=m(e,b[e]);if(null!==f&&!c){b=(b=a.__cssRules)||
-{};for(var g in d)b[g]=d[g];a.__cssRules=b;0>f.elements.indexOf(a)&&f.elements.push(a)}else j(a,d)}};n.block=function(a){null===f&&(f={id:a,styleSheets:{},elements:[]})};n.unblock=function(a){if(f&&(!a||f.id===a)){for(var b in f.styleSheets)g(b);for(a=0;a<f.elements.length;a++)b=f.elements[a],j(b,b.__cssRules);f=null}};b.clearCss=function(b){for(var c in a)0<=c.indexOf(b)&&delete a[c];for(var f in e)0<=f.indexOf(b)&&g(f)};b.transform=function(a,b){var c={};b=b||"";c.transform=b;c["-webkit-transform"]=
-b;c["-ms-transform"]=b;c["-moz-transform"]=b;c["-o-transform"]=b;"string"===typeof a?n(a,c):n.style(a,c)};b.dragStyle=function(a,b){n(a,{"-webkit-user-select":b,"-moz-user-select":b,"-ms-user-select":b,"-webkit-user-drag":b,"user-select":b,"user-drag":b})};b.transitionStyle=function(a,b){navigator.userAgent.match(/5\.\d(\.\d)? safari/i)||n(a,{"-webkit-transition":b,"-moz-transition":b,"-o-transition":b,transition:b})};b.rotate=function(a,c){b.transform(a,"rotate("+c+"deg)")};b.rgbHex=function(a){a=
-String(a).replace("#","");3===a.length&&(a=a[0]+a[0]+a[1]+a[1]+a[2]+a[2]);return"#"+a.substr(-6)};b.hexToRgba=function(a,b){var c="rgb",f=[parseInt(a.substr(1,2),16),parseInt(a.substr(3,2),16),parseInt(a.substr(5,2),16)];void 0!==b&&100!==b&&(c+="a",f.push(b/100));return c+"("+f.join(",")+")"}}(jwplayer),function(h){var d=h.foreach,m={mp4:"video/mp4",ogg:"video/ogg",oga:"audio/ogg",vorbis:"audio/ogg",webm:"video/webm",aac:"audio/mp4",mp3:"audio/mpeg",hls:"application/vnd.apple.mpegurl"},j={mp4:m.mp4,
-f4v:m.mp4,m4v:m.mp4,mov:m.mp4,m4a:m.aac,f4a:m.aac,aac:m.aac,mp3:m.mp3,ogv:m.ogg,ogg:m.ogg,oga:m.vorbis,vorbis:m.vorbis,webm:m.webm,m3u8:m.hls,m3u:m.hls,hls:m.hls},g=h.extensionmap={};d(j,function(b,d){g[b]={html5:d}});d({flv:"video",f4v:"video",mov:"video",m4a:"video",m4v:"video",mp4:"video",aac:"video",f4a:"video",mp3:"sound",smil:"rtmp",m3u8:"hls",hls:"hls"},function(b,d){g[b]||(g[b]={});g[b].flash=d});g.types=m;g.mimeType=function(b){var e;d(m,function(d,a){!e&&a==b&&(e=d)});return e};g.extType=
-function(b){return g.mimeType(j[b])}}(jwplayer.utils),function(h){var d=h.loaderstatus={NEW:0,LOADING:1,ERROR:2,COMPLETE:3},m=document;h.scriptloader=function(j){function g(b){a=d.ERROR;p.sendEvent(e.ERROR,b)}function b(b){a=d.COMPLETE;p.sendEvent(e.COMPLETE,b)}var e=jwplayer.events,p=h.extend(this,new e.eventdispatcher),a=d.NEW;this.load=function(){if(a==d.NEW){var f=h.scriptloader.loaders[j];if(f&&(a=f.getStatus(),2>a)){f.addEventListener(e.ERROR,g);f.addEventListener(e.COMPLETE,b);return}var c=
-m.createElement("script");c.addEventListener?(c.onload=b,c.onerror=g):c.readyState&&(c.onreadystatechange=function(a){("loaded"==c.readyState||"complete"==c.readyState)&&b(a)});m.getElementsByTagName("head")[0].appendChild(c);c.src=j;a=d.LOADING;h.scriptloader.loaders[j]=this}};this.getStatus=function(){return a}};h.scriptloader.loaders={}}(jwplayer.utils),function(h){h.trim=function(d){return d.replace(/^\s*/,"").replace(/\s*$/,"")};h.pad=function(d,h,j){for(j||(j="0");d.length<h;)d=j+d;return d};
-h.xmlAttribute=function(d,h){for(var j=0;j<d.attributes.length;j++)if(d.attributes[j].name&&d.attributes[j].name.toLowerCase()==h.toLowerCase())return d.attributes[j].value.toString();return""};h.extension=function(d){if(!d||"rtmp"==d.substr(0,4))return"";var h;h=d.match(/manifest\(format=(.*),audioTrack/);h=!h||!h[1]?!1:h[1].split("-")[0];if(h)return h;d=d.substring(d.lastIndexOf("/")+1,d.length).split("?")[0].split("#")[0];if(-1<d.lastIndexOf("."))return d.substr(d.lastIndexOf(".")+1,d.length).toLowerCase()};
-h.stringToColor=function(d){d=d.replace(/(#|0x)?([0-9A-F]{3,6})$/gi,"$2");3==d.length&&(d=d.charAt(0)+d.charAt(0)+d.charAt(1)+d.charAt(1)+d.charAt(2)+d.charAt(2));return parseInt(d,16)}}(jwplayer.utils),function(h){var d="touchmove",m="touchstart";h.touch=function(j){function g(f){f.type==m?(a=!0,c=e(k.DRAG_START,f)):f.type==d?a&&(n||(b(k.DRAG_START,f,c),n=!0),b(k.DRAG,f)):(a&&(n?b(k.DRAG_END,f):(f.cancelBubble=!0,b(k.TAP,f))),a=n=!1,c=null)}function b(a,b,c){if(f[a]&&(b.preventManipulation&&b.preventManipulation(),
-b.preventDefault&&b.preventDefault(),b=c?c:e(a,b)))f[a](b)}function e(a,b){var f=null;b.touches&&b.touches.length?f=b.touches[0]:b.changedTouches&&b.changedTouches.length&&(f=b.changedTouches[0]);if(!f)return null;var d=p.getBoundingClientRect(),f={type:a,target:p,x:f.pageX-window.pageXOffset-d.left,y:f.pageY,deltaX:0,deltaY:0};a!=k.TAP&&c&&(f.deltaX=f.x-c.x,f.deltaY=f.y-c.y);return f}var p=j,a=!1,f={},c=null,n=!1,k=h.touchEvents;document.addEventListener(d,g);document.addEventListener("touchend",
-function(f){a&&n&&b(k.DRAG_END,f);a=n=!1;c=null});document.addEventListener("touchcancel",g);j.addEventListener(m,g);j.addEventListener("touchend",g);this.addEventListener=function(a,b){f[a]=b};this.removeEventListener=function(a){delete f[a]};return this}}(jwplayer.utils),function(h){h.touchEvents={DRAG:"jwplayerDrag",DRAG_START:"jwplayerDragStart",DRAG_END:"jwplayerDragEnd",TAP:"jwplayerTap"}}(jwplayer.utils),function(h){h.key=function(d){var m,j,g;this.edition=function(){return g&&g.getTime()<
-(new Date).getTime()?"invalid":m};this.token=function(){return j};h.exists(d)||(d="");try{d=h.tea.decrypt(d,"36QXq4W@GSBV^teR");var b=d.split("/");(m=b[0])?/^(free|pro|premium|enterprise|ads)$/i.test(m)?(j=b[1],b[2]&&0<parseInt(b[2])&&(g=new Date,g.setTime(String(b[2])))):m="invalid":m="free"}catch(e){m="invalid"}}}(jwplayer.utils),function(h){var d=h.tea={};d.encrypt=function(g,b){if(0==g.length)return"";var e=d.strToLongs(j.encode(g));1>=e.length&&(e[1]=0);for(var h=d.strToLongs(j.encode(b).slice(0,
-16)),a=e.length,f=e[a-1],c=e[0],n,k=Math.floor(6+52/a),l=0;0<k--;){l+=2654435769;n=l>>>2&3;for(var r=0;r<a;r++)c=e[(r+1)%a],f=(f>>>5^c<<2)+(c>>>3^f<<4)^(l^c)+(h[r&3^n]^f),f=e[r]+=f}e=d.longsToStr(e);return m.encode(e)};d.decrypt=function(g,b){if(0==g.length)return"";for(var e=d.strToLongs(m.decode(g)),h=d.strToLongs(j.encode(b).slice(0,16)),a=e.length,f=e[a-1],c=e[0],n,k=2654435769*Math.floor(6+52/a);0!=k;){n=k>>>2&3;for(var l=a-1;0<=l;l--)f=e[0<l?l-1:a-1],f=(f>>>5^c<<2)+(c>>>3^f<<4)^(k^c)+(h[l&3^
-n]^f),c=e[l]-=f;k-=2654435769}e=d.longsToStr(e);e=e.replace(/\0+$/,"");return j.decode(e)};d.strToLongs=function(d){for(var b=Array(Math.ceil(d.length/4)),e=0;e<b.length;e++)b[e]=d.charCodeAt(4*e)+(d.charCodeAt(4*e+1)<<8)+(d.charCodeAt(4*e+2)<<16)+(d.charCodeAt(4*e+3)<<24);return b};d.longsToStr=function(d){for(var b=Array(d.length),e=0;e<d.length;e++)b[e]=String.fromCharCode(d[e]&255,d[e]>>>8&255,d[e]>>>16&255,d[e]>>>24&255);return b.join("")};var m={code:"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\x3d",
-encode:function(d,b){var e,h,a,f,c=[],n="",k,l,r=m.code;l=("undefined"==typeof b?0:b)?j.encode(d):d;k=l.length%3;if(0<k)for(;3>k++;)n+="\x3d",l+="\x00";for(k=0;k<l.length;k+=3)e=l.charCodeAt(k),h=l.charCodeAt(k+1),a=l.charCodeAt(k+2),f=e<<16|h<<8|a,e=f>>18&63,h=f>>12&63,a=f>>6&63,f&=63,c[k/3]=r.charAt(e)+r.charAt(h)+r.charAt(a)+r.charAt(f);c=c.join("");return c=c.slice(0,c.length-n.length)+n},decode:function(d,b){b="undefined"==typeof b?!1:b;var e,h,a,f,c,n=[],k,l=m.code;k=b?j.decode(d):d;for(var r=
-0;r<k.length;r+=4)e=l.indexOf(k.charAt(r)),h=l.indexOf(k.charAt(r+1)),f=l.indexOf(k.charAt(r+2)),c=l.indexOf(k.charAt(r+3)),a=e<<18|h<<12|f<<6|c,e=a>>>16&255,h=a>>>8&255,a&=255,n[r/4]=String.fromCharCode(e,h,a),64==c&&(n[r/4]=String.fromCharCode(e,h)),64==f&&(n[r/4]=String.fromCharCode(e));f=n.join("");return b?j.decode(f):f}},j={encode:function(d){d=d.replace(/[\u0080-\u07ff]/g,function(b){b=b.charCodeAt(0);return String.fromCharCode(192|b>>6,128|b&63)});return d=d.replace(/[\u0800-\uffff]/g,function(b){b=
-b.charCodeAt(0);return String.fromCharCode(224|b>>12,128|b>>6&63,128|b&63)})},decode:function(d){d=d.replace(/[\u00e0-\u00ef][\u0080-\u00bf][\u0080-\u00bf]/g,function(b){b=(b.charCodeAt(0)&15)<<12|(b.charCodeAt(1)&63)<<6|b.charCodeAt(2)&63;return String.fromCharCode(b)});return d=d.replace(/[\u00c0-\u00df][\u0080-\u00bf]/g,function(b){b=(b.charCodeAt(0)&31)<<6|b.charCodeAt(1)&63;return String.fromCharCode(b)})}}}(jwplayer.utils),function(h){h.events={COMPLETE:"COMPLETE",ERROR:"ERROR",API_READY:"jwplayerAPIReady",
-JWPLAYER_READY:"jwplayerReady",JWPLAYER_FULLSCREEN:"jwplayerFullscreen",JWPLAYER_RESIZE:"jwplayerResize",JWPLAYER_ERROR:"jwplayerError",JWPLAYER_SETUP_ERROR:"jwplayerSetupError",JWPLAYER_MEDIA_BEFOREPLAY:"jwplayerMediaBeforePlay",JWPLAYER_MEDIA_BEFORECOMPLETE:"jwplayerMediaBeforeComplete",JWPLAYER_COMPONENT_SHOW:"jwplayerComponentShow",JWPLAYER_COMPONENT_HIDE:"jwplayerComponentHide",JWPLAYER_MEDIA_BUFFER:"jwplayerMediaBuffer",JWPLAYER_MEDIA_BUFFER_FULL:"jwplayerMediaBufferFull",JWPLAYER_MEDIA_ERROR:"jwplayerMediaError",
-JWPLAYER_MEDIA_LOADED:"jwplayerMediaLoaded",JWPLAYER_MEDIA_COMPLETE:"jwplayerMediaComplete",JWPLAYER_MEDIA_SEEK:"jwplayerMediaSeek",JWPLAYER_MEDIA_TIME:"jwplayerMediaTime",JWPLAYER_MEDIA_VOLUME:"jwplayerMediaVolume",JWPLAYER_MEDIA_META:"jwplayerMediaMeta",JWPLAYER_MEDIA_MUTE:"jwplayerMediaMute",JWPLAYER_MEDIA_LEVELS:"jwplayerMediaLevels",JWPLAYER_MEDIA_LEVEL_CHANGED:"jwplayerMediaLevelChanged",JWPLAYER_CAPTIONS_CHANGED:"jwplayerCaptionsChanged",JWPLAYER_CAPTIONS_LIST:"jwplayerCaptionsList",JWPLAYER_CAPTIONS_LOADED:"jwplayerCaptionsLoaded",
-JWPLAYER_PLAYER_STATE:"jwplayerPlayerState",state:{BUFFERING:"BUFFERING",IDLE:"IDLE",PAUSED:"PAUSED",PLAYING:"PLAYING"},JWPLAYER_PLAYLIST_LOADED:"jwplayerPlaylistLoaded",JWPLAYER_PLAYLIST_ITEM:"jwplayerPlaylistItem",JWPLAYER_PLAYLIST_COMPLETE:"jwplayerPlaylistComplete",JWPLAYER_DISPLAY_CLICK:"jwplayerViewClick",JWPLAYER_VIEW_TAB_FOCUS:"jwplayerViewTabFocus",JWPLAYER_CONTROLS:"jwplayerViewControls",JWPLAYER_USER_ACTION:"jwplayerUserAction",JWPLAYER_INSTREAM_CLICK:"jwplayerInstreamClicked",JWPLAYER_INSTREAM_DESTROYED:"jwplayerInstreamDestroyed",
-JWPLAYER_AD_TIME:"jwplayerAdTime",JWPLAYER_AD_ERROR:"jwplayerAdError",JWPLAYER_AD_CLICK:"jwplayerAdClicked",JWPLAYER_AD_COMPLETE:"jwplayerAdComplete",JWPLAYER_AD_IMPRESSION:"jwplayerAdImpression",JWPLAYER_AD_COMPANIONS:"jwplayerAdCompanions",JWPLAYER_AD_SKIPPED:"jwplayerAdSkipped",JWPLAYER_AD_PLAY:"jwplayerAdPlay",JWPLAYER_AD_PAUSE:"jwplayerAdPause",JWPLAYER_AD_META:"jwplayerAdMeta",JWPLAYER_CAST_AVAILABLE:"jwplayerCastAvailable",JWPLAYER_CAST_SESSION:"jwplayerCastSession",JWPLAYER_CAST_AD_CHANGED:"jwplayerCastAdChanged"}}(jwplayer),
-function(h){var d=h.utils;h.events.eventdispatcher=function(m,j){function g(b,a,f){if(b)for(var c=0;c<b.length;c++){var e=b[c];if(e){null!==e.count&&0===--e.count&&delete b[c];try{e.listener(a)}catch(g){d.log('Error handling "'+f+'" event listener ['+c+"]: "+g.toString(),e.listener,a)}}}}var b,e;this.resetEventListeners=function(){b={};e=[]};this.resetEventListeners();this.addEventListener=function(e,a,f){try{d.exists(b[e])||(b[e]=[]),"string"==d.typeOf(a)&&(a=(new Function("return "+a))()),b[e].push({listener:a,
-count:f||null})}catch(c){d.log("error",c)}return!1};this.removeEventListener=function(e,a){if(b[e]){try{if(void 0===a){b[e]=[];return}for(var f=0;f<b[e].length;f++)if(b[e][f].listener.toString()==a.toString()){b[e].splice(f,1);break}}catch(c){d.log("error",c)}return!1}};this.addGlobalListener=function(b,a){try{"string"==d.typeOf(b)&&(b=(new Function("return "+b))()),e.push({listener:b,count:a||null})}catch(f){d.log("error",f)}return!1};this.removeGlobalListener=function(b){if(b){try{for(var a=e.length;a--;)e[a].listener.toString()==
-b.toString()&&e.splice(a,1)}catch(f){d.log("error",f)}return!1}};this.sendEvent=function(p,a){d.exists(a)||(a={});d.extend(a,{id:m,version:h.version,type:p});j&&d.log(p,a);g(b[p],a,p);g(e,a,p)}}}(window.jwplayer),function(h){var d={},m={};h.plugins=function(){};h.plugins.loadPlugins=function(j,g){m[j]=new h.plugins.pluginloader(new h.plugins.model(d),g);return m[j]};h.plugins.registerPlugin=function(j,g,b,e){var p=h.utils.getPluginName(j);d[p]||(d[p]=new h.plugins.plugin(j));d[p].registerPlugin(j,
-g,b,e)}}(jwplayer),function(h){h.plugins.model=function(d){this.addPlugin=function(m){var j=h.utils.getPluginName(m);d[j]||(d[j]=new h.plugins.plugin(m));return d[j]};this.getPlugins=function(){return d}}}(jwplayer),function(h){var d=jwplayer.utils,m=jwplayer.events;h.pluginmodes={FLASH:0,JAVASCRIPT:1,HYBRID:2};h.plugin=function(j){function g(){switch(d.getPluginPathType(j)){case d.pluginPathType.ABSOLUTE:return j;case d.pluginPathType.RELATIVE:return d.getAbsolutePath(j,window.location.href)}}function b(){n=
-setTimeout(function(){p=d.loaderstatus.COMPLETE;k.sendEvent(m.COMPLETE)},1E3)}function e(){p=d.loaderstatus.ERROR;k.sendEvent(m.ERROR)}var p=d.loaderstatus.NEW,a,f,c,n,k=new m.eventdispatcher;d.extend(this,k);this.load=function(){if(p==d.loaderstatus.NEW)if(0<j.lastIndexOf(".swf"))a=j,p=d.loaderstatus.COMPLETE,k.sendEvent(m.COMPLETE);else if(d.getPluginPathType(j)==d.pluginPathType.CDN)p=d.loaderstatus.COMPLETE,k.sendEvent(m.COMPLETE);else{p=d.loaderstatus.LOADING;var c=new d.scriptloader(g());c.addEventListener(m.COMPLETE,
-b);c.addEventListener(m.ERROR,e);c.load()}};this.registerPlugin=function(b,e,g,j){n&&(clearTimeout(n),n=void 0);c=e;g&&j?(a=j,f=g):"string"==typeof g?a=g:"function"==typeof g?f=g:!g&&!j&&(a=b);p=d.loaderstatus.COMPLETE;k.sendEvent(m.COMPLETE)};this.getStatus=function(){return p};this.getPluginName=function(){return d.getPluginName(j)};this.getFlashPath=function(){if(a)switch(d.getPluginPathType(a)){case d.pluginPathType.ABSOLUTE:return a;case d.pluginPathType.RELATIVE:return 0<j.lastIndexOf(".swf")?
-d.getAbsolutePath(a,window.location.href):d.getAbsolutePath(a,g())}return null};this.getJS=function(){return f};this.getTarget=function(){return c};this.getPluginmode=function(){if("undefined"!=typeof a&&"undefined"!=typeof f)return h.pluginmodes.HYBRID;if("undefined"!=typeof a)return h.pluginmodes.FLASH;if("undefined"!=typeof f)return h.pluginmodes.JAVASCRIPT};this.getNewInstance=function(a,b,c){return new f(a,b,c)};this.getURL=function(){return j}}}(jwplayer.plugins),function(h){var d=h.utils,m=
-h.events,j=d.foreach;h.plugins.pluginloader=function(g,b){function e(){c&&l.sendEvent(m.ERROR,{message:n});f||(f=!0,a=d.loaderstatus.COMPLETE,l.sendEvent(m.COMPLETE))}function p(){k||e();if(!f&&!c){var a=0,b=g.getPlugins();d.foreach(k,function(f){f=d.getPluginName(f);var g=b[f];f=g.getJS();var l=g.getTarget(),g=g.getStatus();if(g==d.loaderstatus.LOADING||g==d.loaderstatus.NEW)a++;else if(f&&(!l||parseFloat(l)>parseFloat(h.version)))c=!0,n="Incompatible player version",e()});0===a&&e()}}var a=d.loaderstatus.NEW,
-f=!1,c=!1,n,k=b,l=new m.eventdispatcher;d.extend(this,l);this.setupPlugins=function(a,b,c){var f={length:0,plugins:{}},e=0,l={},k=g.getPlugins();j(b.plugins,function(g,j){var h=d.getPluginName(g),n=k[h],p=n.getFlashPath(),m=n.getJS(),L=n.getURL();p&&(f.plugins[p]=d.extend({},j),f.plugins[p].pluginmode=n.getPluginmode(),f.length++);try{if(m&&b.plugins&&b.plugins[L]){var r=document.createElement("div");r.id=a.id+"_"+h;r.style.position="absolute";r.style.top=0;r.style.zIndex=e+10;l[h]=n.getNewInstance(a,
-d.extend({},b.plugins[L]),r);e++;a.onReady(c(l[h],r,!0));a.onResize(c(l[h],r))}}catch(O){d.log("ERROR: Failed to load "+h+".")}});a.plugins=l;return f};this.load=function(){if(!(d.exists(b)&&"object"!=d.typeOf(b))){a=d.loaderstatus.LOADING;j(b,function(a){d.exists(a)&&(a=g.addPlugin(a),a.addEventListener(m.COMPLETE,p),a.addEventListener(m.ERROR,r))});var c=g.getPlugins();j(c,function(a,b){b.load()})}p()};this.destroy=function(){l&&(l.resetEventListeners(),l=null)};var r=this.pluginFailed=function(){c||
-(c=!0,n="File not found",e())};this.getStatus=function(){return a}}}(jwplayer),function(h){h.parsers={localName:function(d){return d?d.localName?d.localName:d.baseName?d.baseName:"":""},textContent:function(d){return d?d.textContent?h.utils.trim(d.textContent):d.text?h.utils.trim(d.text):"":""},getChildNode:function(d,h){return d.childNodes[h]},numChildren:function(d){return d.childNodes?d.childNodes.length:0}}}(jwplayer),function(h){var d=h.parsers;(d.jwparser=function(){}).parseEntry=function(m,
-j){for(var g=[],b=[],e=h.utils.xmlAttribute,p=0;p<m.childNodes.length;p++){var a=m.childNodes[p];if("jwplayer"==a.prefix){var f=d.localName(a);"source"==f?(delete j.sources,g.push({file:e(a,"file"),"default":e(a,"default"),label:e(a,"label"),type:e(a,"type")})):"track"==f?(delete j.tracks,b.push({file:e(a,"file"),"default":e(a,"default"),kind:e(a,"kind"),label:e(a,"label")})):(j[f]=h.utils.serialize(d.textContent(a)),"file"==f&&j.sources&&delete j.sources)}j.file||(j.file=j.link)}if(g.length){j.sources=
-[];for(p=0;p<g.length;p++)0<g[p].file.length&&(g[p]["default"]="true"==g[p]["default"]?!0:!1,g[p].label.length||delete g[p].label,j.sources.push(g[p]))}if(b.length){j.tracks=[];for(p=0;p<b.length;p++)0<b[p].file.length&&(b[p]["default"]="true"==b[p]["default"]?!0:!1,b[p].kind=!b[p].kind.length?"captions":b[p].kind,b[p].label.length||delete b[p].label,j.tracks.push(b[p]))}return j}}(jwplayer),function(h){var d=jwplayer.utils,m=d.xmlAttribute,j=h.localName,g=h.textContent,b=h.numChildren,e=h.mediaparser=
-function(){};e.parseGroup=function(h,a){var f,c,n=[];for(c=0;c<b(h);c++)if(f=h.childNodes[c],"media"==f.prefix&&j(f))switch(j(f).toLowerCase()){case "content":m(f,"duration")&&(a.duration=d.seconds(m(f,"duration")));0<b(f)&&(a=e.parseGroup(f,a));m(f,"url")&&(a.sources||(a.sources=[]),a.sources.push({file:m(f,"url"),type:m(f,"type"),width:m(f,"width"),label:m(f,"label")}));break;case "title":a.title=g(f);break;case "description":a.description=g(f);break;case "guid":a.mediaid=g(f);break;case "thumbnail":a.image||
-(a.image=m(f,"url"));break;case "group":e.parseGroup(f,a);break;case "subtitle":var k={};k.file=m(f,"url");k.kind="captions";if(0<m(f,"lang").length){var l=k;f=m(f,"lang");var r={zh:"Chinese",nl:"Dutch",en:"English",fr:"French",de:"German",it:"Italian",ja:"Japanese",pt:"Portuguese",ru:"Russian",es:"Spanish"};f=r[f]?r[f]:f;l.label=f}n.push(k)}a.hasOwnProperty("tracks")||(a.tracks=[]);for(c=0;c<n.length;c++)a.tracks.push(n[c]);return a}}(jwplayer.parsers),function(h){function d(b){for(var a={},f=0;f<
-b.childNodes.length;f++){var c=b.childNodes[f],d=e(c);if(d)switch(d.toLowerCase()){case "enclosure":a.file=m.xmlAttribute(c,"url");break;case "title":a.title=j(c);break;case "guid":a.mediaid=j(c);break;case "pubdate":a.date=j(c);break;case "description":a.description=j(c);break;case "link":a.link=j(c);break;case "category":a.tags=a.tags?a.tags+j(c):j(c)}}a=h.mediaparser.parseGroup(b,a);a=h.jwparser.parseEntry(b,a);return new jwplayer.playlist.item(a)}var m=jwplayer.utils,j=h.textContent,g=h.getChildNode,
-b=h.numChildren,e=h.localName;h.rssparser={};h.rssparser.parse=function(j){for(var a=[],f=0;f<b(j);f++){var c=g(j,f);if("channel"==e(c).toLowerCase())for(var h=0;h<b(c);h++){var k=g(c,h);"item"==e(k).toLowerCase()&&a.push(d(k))}}return a}}(jwplayer.parsers),function(h){h.playlist=function(d){var m=[];if("array"==h.utils.typeOf(d))for(var j=0;j<d.length;j++)m.push(new h.playlist.item(d[j]));else m.push(new h.playlist.item(d));return m}}(jwplayer),function(h){var d=h.item=function(m){var j=jwplayer.utils,
-g=j.extend({},d.defaults,m),b,e;g.tracks=m&&j.exists(m.tracks)?m.tracks:[];0===g.sources.length&&(g.sources=[new h.source(g)]);for(b=0;b<g.sources.length;b++)e=g.sources[b]["default"],g.sources[b]["default"]=e?"true"==e.toString():!1,g.sources[b]=new h.source(g.sources[b]);if(g.captions&&!j.exists(m.tracks)){for(m=0;m<g.captions.length;m++)g.tracks.push(g.captions[m]);delete g.captions}for(b=0;b<g.tracks.length;b++)g.tracks[b]=new h.track(g.tracks[b]);return g};d.defaults={description:void 0,image:void 0,
-mediaid:void 0,title:void 0,sources:[],tracks:[]}}(jwplayer.playlist),function(h){var d=jwplayer,m=d.utils,j=d.events,g=d.parsers;h.loader=function(){function b(b){try{var c=b.responseXML.childNodes;b="";for(var d=0;d<c.length&&!(b=c[d],8!=b.nodeType);d++);"xml"==g.localName(b)&&(b=b.nextSibling);if("rss"!=g.localName(b))p("Not a valid RSS feed");else{var e=new h(g.rssparser.parse(b));a.sendEvent(j.JWPLAYER_PLAYLIST_LOADED,{playlist:e})}}catch(l){p()}}function d(a){p(a.match(/invalid/i)?"Not a valid RSS feed":
-"")}function p(b){a.sendEvent(j.JWPLAYER_ERROR,{message:b?b:"Error loading file"})}var a=new j.eventdispatcher;m.extend(this,a);this.load=function(a){m.ajax(a,b,d)}}}(jwplayer.playlist),function(h){var d=jwplayer.utils,m={file:void 0,label:void 0,type:void 0,"default":void 0};h.source=function(j){var g=d.extend({},m);d.foreach(m,function(b){d.exists(j[b])&&(g[b]=j[b],delete j[b])});g.type&&0<g.type.indexOf("/")&&(g.type=d.extensionmap.mimeType(g.type));"m3u8"==g.type&&(g.type="hls");"smil"==g.type&&
-(g.type="rtmp");return g}}(jwplayer.playlist),function(h){var d=jwplayer.utils,m={file:void 0,label:void 0,kind:"captions","default":!1};h.track=function(j){var g=d.extend({},m);j||(j={});d.foreach(m,function(b){d.exists(j[b])&&(g[b]=j[b],delete j[b])});return g}}(jwplayer.playlist),function(h){var d=h.cast={},m=h.utils;d.adprovider=function(j,g){function b(){c={message:n,position:0,duration:-1}}function e(a,b){var c={command:a};void 0!==b&&(c.args=b);g.sendMessage(j,c,p,function(a){d.error("message send error",
-a)})}function p(){}var a=new d.provider(j,g),f=m.extend(this,a),c,n="Loading ad",k=0;f.init=function(){a.init();b()};f.destroy=function(){a.destroy()};f.updateModel=function(f,e){(f.tag||f.newstate||f.sequence||f.companions)&&d.log("received ad change:",f);f.tag&&(c.tag&&f.tag!==c.tag)&&(d.error("ad messages not received in order. new model:",f,"old model:",c),b());h.utils.extend(c,f);a.updateModel(f,e)};f.getAdModel=function(){var a=m.extend({},c);a.message=0<c.duration?this.getAdMessage():n;return a};
-f.resetAdModel=function(){b()};f.getAdMessage=function(){var a=c.message.replace(/xx/gi,""+Math.min(c.duration|0,Math.ceil(c.duration-c.position)));c.podMessage&&1<c.podcount&&(a=c.podMessage.replace(/__AD_POD_CURRENT__/g,""+c.sequence).replace(/__AD_POD_LENGTH__/g,""+c.podcount)+a);return a};f.skipAd=function(a){e("skipAd",{tag:a.tag})};f.clickAd=function(a){k=1*new Date;e("clickAd",{tag:a.tag})};f.timeSinceClick=function(){return 1*new Date-k}}}(window.jwplayer),function(h,d){function m(a,b){a[b]&&
-(a[b]=g.getAbsolutePath(a[b]))}var j=d.cast,g=d.utils,b=d.events,e=b.state,p={};j.NS="urn:x-cast:com.longtailvideo.jwplayer";j.controller=function(a,f){var c,n;function k(a){a=a.availability===h.chrome.cast.ReceiverAvailability.AVAILABLE;M.available!==a&&(M.available=a,u(b.JWPLAYER_CAST_AVAILABLE))}function l(a){j.log("existing session",a);w||(J=a.session,J.addMessageListener(j.NS,r))}function r(c,e){var g=JSON.parse(e);if(!g)throw"Message not proper JSON";if(g.reconcile){J.removeMessageListener(j.NS,
-r);var h=g.diff,l=J;if(!h.id||!g.appid||!g.pageUrl)h.id=d().id,g.appid=I.appid,g.pageUrl=P,J=w=null;h.id===a.id&&(g.appid===I.appid&&g.pageUrl===P)&&(w||(a.jwInstreamState()&&a.jwInstreamDestroy(!0),F(l),f.sendEvent(b.JWPLAYER_PLAYER_STATE,{oldstate:h.oldstate,newstate:h.newstate})),D(g));J=null}}function x(a){M.active=!!a;a=M;var c;c=w&&w.receiver?w.receiver.friendlyName:"";a.deviceName=c;u(b.JWPLAYER_CAST_SESSION,{})}function u(a){var b=g.extend({},M);f.sendEvent(a,b)}function t(a){var b=h.chrome;
-a.code!==b.cast.ErrorCode.CANCEL&&(j.log("Cast Session Error:",a,w),a.code===b.cast.ErrorCode.SESSION_ERROR&&q())}function q(){w?(G(),w.stop(A,E)):A()}function E(a){j.error("Cast Session Stop error:",a,w);A()}function F(l){w=l;w.addMessageListener(j.NS,B);w.addUpdateListener(v);a.jwPause(!0);a.jwSetFullscreen(!1);N=f.getVideo();c=f.volume;n=f.mute;C=new j.provider(j.NS,w);C.init();f.setVideo(C);a.jwPlay=function(a){!1===a?C.pause():C.play()};a.jwPause=function(b){a.jwPlay(!!b)};a.jwLoad=function(a){"number"===
-g.typeOf(a)&&f.setItem(a);C.load(a)};a.jwPlaylistItem=function(a){"number"===g.typeOf(a)&&f.setItem(a);C.playlistItem(a)};a.jwPlaylistNext=function(){a.jwPlaylistItem(f.item+1)};a.jwPlaylistPrev=function(){a.jwPlaylistItem(f.item-1)};a.jwSetVolume=function(a){g.exists(a)&&(a=Math.min(Math.max(0,a),100)|0,L(a)&&(a=Math.max(0,Math.min(a/100,1)),w.setReceiverVolumeLevel(a,y,function(a){j.error("set volume error",a);y()})))};a.jwSetMute=function(a){g.exists(a)||(a=!K.mute);Q(a)&&w.setReceiverMuted(!!a,
-y,function(a){j.error("set muted error",a);y()})};a.jwGetVolume=function(){return K.volume|0};a.jwGetMute=function(){return!!K.mute};a.jwIsBeforePlay=function(){return!1};var k=a.jwSetCurrentCaptions;a.jwSetCurrentCaptions=function(a){k(a)};a.jwSkipAd=function(a){z&&(z.skipAd(a),a=z.getAdModel(),a.complete=!0,f.sendEvent(b.JWPLAYER_CAST_AD_CHANGED,a))};a.jwClickAd=function(c){if(z&&300<z.timeSinceClick()&&(z.clickAd(c),f.state!==e.PAUSED)){var g={tag:c.tag};c.sequence&&(g.sequence=c.sequence);c.podcount&&
-(g.podcount=c.podcount);d(a.id).dispatchEvent(b.JWPLAYER_AD_CLICK,g);h.open(c.clickthrough)}};a.jwPlayAd=a.jwPauseAd=a.jwSetControls=a.jwForceState=a.jwReleaseState=a.jwSetFullscreen=a.jwDetachMedia=a.jwAttachMedia=O;var p=d(a.id).plugins;p.vast&&p.vast.jwPauseAd!==O&&(R={jwPlayAd:p.vast.jwPlayAd,jwPauseAd:p.vast.jwPauseAd},p.vast.jwPlayAd=p.vast.jwPauseAd=O);y();x(!0);l!==J&&C.setup(H(),f)}function v(a){j.log("Cast Session status",a);a?y():(C.sendEvent(b.JWPLAYER_PLAYER_STATE,{oldstate:f.state,newstate:e.BUFFERING}),
-A())}function A(){w&&(G(),w=null);if(N){delete a.jwSkipAd;delete a.jwClickAd;a.initializeAPI();var j=d(a.id).plugins;j.vast&&g.extend(j.vast,R);f.volume=c;f.mute=n;f.setVideo(N);f.duration=0;C&&(C.destroy(),C=null);z&&(z.destroy(),z=null);f.state!==e.IDLE?(f.state=e.IDLE,a.jwPlay(!0),a.jwSeek(f.position)):N.sendEvent(b.JWPLAYER_PLAYER_STATE,{oldstate:e.BUFFERING,newstate:e.IDLE});N=null}x(!1)}function G(){w.removeUpdateListener(v);w.removeMessageListener(j.NS,B)}function B(a,b){var c=JSON.parse(b);
-if(!c)throw"Message not proper JSON";D(c)}function D(c){if("state"===c.type){if(z&&(c.diff.newstate||c.diff.position))z.destroy(),z=null,f.setVideo(C),f.sendEvent(b.JWPLAYER_CAST_AD_CHANGED,{done:!0});C.updateModel(c.diff,c.type);c=c.diff;void 0!==c.item&&f.item!==c.item&&(f.item=c.item,f.sendEvent(b.JWPLAYER_PLAYLIST_ITEM,{index:f.item}))}else if("ad"===c.type){null===z&&(z=new j.adprovider(j.NS,w),z.init(),f.setVideo(z));z.updateModel(c.diff,c.type);var d=z.getAdModel();c.diff.clickthrough&&(d.onClick=
-a.jwClickAd);c.diff.skipoffset&&(d.onSkipAd=a.jwSkipAd);f.sendEvent(b.JWPLAYER_CAST_AD_CHANGED,d);c.diff.complete&&z.resetAdModel()}else"connection"===c.type?!0===c.closed&&q():j.error("received unhandled message",c.type,c)}function H(){var a=g.extend({},f.config);a.cast=g.extend({pageUrl:P},I);for(var b="base autostart controls fallback fullscreen width height mobilecontrols modes playlistlayout playlistposition playlistsize primary stretching sharing related ga skin logo listbar".split(" "),c=b.length;c--;)delete a[b[c]];
-b=a.plugins;delete a.plugins;for(var d in b)if(b.hasOwnProperty(d)){var e=b[d];if(e.client&&(/[\.\/]/.test(e.client)&&m(e,"client"),-1<e.client.indexOf("vast"))){c=a;e=g.extend({},e);e.client="vast";delete e.companiondiv;if(e.schedule){var j=void 0;for(j in e.schedule)e.schedule.hasOwnProperty(j)&&m(e.schedule[j].ad||e.schedule[j],"tag")}m(e,"tag");c.advertising=e}}f.position&&(a.position=f.position);0<f.item&&(a.item=f.item);return a}function y(){if(w&&w.receiver){var a=w.receiver.volume;if(a){var b=
-100*a.level|0;Q(!!a.muted);L(b)}}}function L(a){var c=K.volume!==a;c&&(K.volume=a,C.sendEvent(b.JWPLAYER_MEDIA_VOLUME,{volume:a}));return c}function Q(a){var c=K.mute!==a;c&&(K.mute=a,C.sendEvent(b.JWPLAYER_MEDIA_MUTE,{mute:a}));return c}function O(){}var w=null,M={available:!1,active:!1,deviceName:""},K={volume:null,mute:null},P=h.location.href,I,C=null,z=null,N=null;c=f.volume;n=f.mute;var J=null,R=null;I=g.extend({},p,f.cast);m(I,"loadscreen");m(I,"endscreen");m(I,"logo");if(I.appid&&(!h.cast||
-!h.cast.receiver))j.loader.addEventListener("availability",k),j.loader.addEventListener("session",l),j.loader.initialize();this.startCasting=function(){w||a.jwInstreamState()||h.chrome.cast.requestSession(F,t)};this.stopCasting=q};j.log=function(){if(j.debug){var a=Array.prototype.slice.call(arguments,0);console.log.apply(console,a)}};j.error=function(){var a=Array.prototype.slice.call(arguments,0);console.error.apply(console,a)}}(window,jwplayer),function(h,d){function m(){d&&d.cast&&d.cast.isAvailable&&
-!a.apiConfig?(a.apiConfig=new d.cast.ApiConfig(new d.cast.SessionRequest(l),e,p,d.cast.AutoJoinPolicy.ORIGIN_SCOPED),d.cast.initialize(a.apiConfig,b,g)):15>k++&&setTimeout(m,1E3)}function j(){n&&(n.resetEventListeners(),n=null)}function g(){a.apiConfig=null}function b(){}function e(b){a.loader.sendEvent("session",{session:b});b.sendMessage(a.NS,{whoami:1})}function p(b){a.availability=b;a.loader.sendEvent("availability",{availability:b})}window.chrome=d;var a=h.cast,f=h.utils,c=h.events,n,k=0,l="C7EF2AC5";
-a.loader=f.extend({initialize:function(){null!==a.availability?a.loader.sendEvent("availability",{availability:a.availability}):d&&d.cast?m():n||(n=new f.scriptloader("https://www.gstatic.com/cv/js/sender/v1/cast_sender.js"),n.addEventListener(c.ERROR,j),n.addEventListener(c.COMPLETE,m),n.load())}},new c.eventdispatcher("cast.loader"));a.availability=null}(window.jwplayer,window.chrome||{}),function(h){function d(b){return function(){return b}}function m(){}var j=h.cast,g=h.utils,b=h.events,e=b.state;
-j.provider=function(h,a){function f(b,c){var d={command:b};void 0!==c&&(d.args=c);a.sendMessage(h,d,m,function(a){j.error("message send error",a)})}function c(a){l.oldstate=l.newstate;l.newstate=a;n.sendEvent(b.JWPLAYER_PLAYER_STATE,{oldstate:l.oldstate,newstate:l.newstate})}var n=g.extend(this,new b.eventdispatcher("cast.provider")),k=-1,l={newstate:e.IDLE,oldstate:e.IDLE,buffer:0,position:0,duration:-1,audioMode:!1};n.isCaster=!0;n.init=function(){};n.destroy=function(){clearTimeout(k);a=null};
-n.updateModel=function(a,c){a.newstate&&(l.newstate=a.newstate,l.oldstate=a.oldstate||l.oldstate,n.sendEvent(b.JWPLAYER_PLAYER_STATE,{oldstate:l.oldstate,newstate:l.newstate}));if("ad"!==c){if(void 0!==a.position||void 0!==a.duration)void 0!==a.position&&(l.position=a.position),void 0!==a.duration&&(l.duration=a.duration),n.sendEvent(b.JWPLAYER_MEDIA_TIME,{position:l.position,duration:l.duration});void 0!==a.buffer&&(l.buffer=a.buffer,n.sendEvent(b.JWPLAYER_MEDIA_BUFFER,{bufferPercent:l.buffer}))}};
-n.supportsFullscreen=function(){return!1};n.setup=function(a,b){b.state&&(l.newstate=b.state);void 0!==b.buffer&&(l.buffer=b.buffer);void 0!==a.position&&(l.position=a.position);void 0!==a.duration&&(l.duration=a.duration);c(e.BUFFERING);f("setup",a)};n.playlistItem=function(a){c(e.BUFFERING);f("item",a)};n.load=function(a){c(e.BUFFERING);f("load",a)};n.stop=function(){clearTimeout(k);k=setTimeout(function(){c(e.IDLE);f("stop")},0)};n.play=function(){f("play")};n.pause=function(){c(e.PAUSED);f("pause")};
-n.seek=function(a){c(e.BUFFERING);n.sendEvent(b.JWPLAYER_MEDIA_SEEK,{position:l.position,offset:a});f("seek",a)};n.audioMode=function(){return l.audioMode};n.detachMedia=function(){j.error("detachMedia called while casting");return document.createElement("video")};n.attachMedia=function(){j.error("attachMedia called while casting")};var r;n.setContainer=function(a){r=a};n.getContainer=function(){return r};n.volume=n.mute=n.setControls=n.setCurrentQuality=n.remove=n.resize=n.seekDrag=n.addCaptions=
-n.resetCaptions=n.setVisibility=n.fsCaptions=m;n.setFullScreen=n.getFullScreen=n.checkComplete=d(!1);n.getWidth=n.getHeight=n.getCurrentQuality=d(0);n.getQualityLevels=d(["Auto"])}}(window.jwplayer),function(h){function d(a,b){j.foreach(b,function(b,c){var d=a[b];"function"==typeof d&&d.call(a,c)})}function m(a,b,d){var e=a.style;e.backgroundColor="#000";e.color="#FFF";e.width=j.styleDimension(d.width);e.height=j.styleDimension(d.height);e.display="table";e.opacity=1;d=document.createElement("p");
-e=d.style;e.verticalAlign="middle";e.textAlign="center";e.display="table-cell";e.font="15px/20px Arial, Helvetica, sans-serif";d.innerHTML=b.replace(":",":\x3cbr\x3e");a.innerHTML="";a.appendChild(d)}var j=h.utils,g=h.events,b=!0,e=!1,p=document,a=h.embed=function(f){function c(){if(!B)if("array"===j.typeOf(u.playlist)&&2>u.playlist.length&&(0===u.playlist.length||!u.playlist[0].sources||0===u.playlist[0].sources.length))l();else if(!G)if("string"===j.typeOf(u.playlist))A=new h.playlist.loader,A.addEventListener(g.JWPLAYER_PLAYLIST_LOADED,
-function(a){u.playlist=a.playlist;G=e;c()}),A.addEventListener(g.JWPLAYER_ERROR,function(a){G=e;l(a)}),G=b,A.load(u.playlist);else if(v.getStatus()==j.loaderstatus.COMPLETE){for(var k=0;k<u.modes.length;k++)if(u.modes[k].type&&a[u.modes[k].type]){var p=j.extend({},u),m=new a[u.modes[k].type](y,u.modes[k],p,v,f);if(m.supportsConfig())return m.addEventListener(g.ERROR,n),m.embed(),j.css("object.jwswf, .jwplayer:focus",{outline:"none"}),j.css(".jw-tab-focus:focus",{outline:"solid 2px #0B7EF4"}),d(f,
-p.events),f}var q;u.fallback?(q="No suitable players found and fallback enabled",D=setTimeout(function(){r(q,b)},10),j.log(q),new a.download(y,u,l)):(q="No suitable players found and fallback disabled",r(q,e),j.log(q),y.parentNode.replaceChild(H,y))}}function n(a){x(E+a.message)}function k(a){f.dispatchEvent(g.JWPLAYER_ERROR,{message:"Could not load plugin: "+a.message})}function l(a){a&&a.message?x("Error loading playlist: "+a.message):x(E+"No playable sources found")}function r(a,b){D&&(clearTimeout(D),
-D=null);D=setTimeout(function(){D=null;f.dispatchEvent(g.JWPLAYER_SETUP_ERROR,{message:a,fallback:b})},0)}function x(a){B||(u.fallback?(B=b,m(y,a,u),r(a,b)):r(a,e))}var u=new a.config(f.config),t=u.width,q=u.height,E="Error loading player: ",F=p.getElementById(f.id),v=h.plugins.loadPlugins(f.id,u.plugins),A,G=e,B=e,D=null,H=null;u.fallbackDiv&&(H=u.fallbackDiv,delete u.fallbackDiv);u.id=f.id;u.aspectratio?f.config.aspectratio=u.aspectratio:delete f.config.aspectratio;var y=p.createElement("div");
-y.id=F.id;y.style.width=0<t.toString().indexOf("%")?t:t+"px";y.style.height=0<q.toString().indexOf("%")?q:q+"px";F.parentNode.replaceChild(y,F);this.embed=function(){B||(v.addEventListener(g.COMPLETE,c),v.addEventListener(g.ERROR,k),v.load())};this.destroy=function(){v&&(v.destroy(),v=null);A&&(A.resetEventListeners(),A=null)};this.errorScreen=x;return this};h.embed.errorScreen=m}(jwplayer),function(h){function d(b){if(b.playlist)for(var d=0;d<b.playlist.length;d++)b.playlist[d]=new g(b.playlist[d]);
-else{var h={};j.foreach(g.defaults,function(a){m(b,h,a)});h.sources||(b.levels?(h.sources=b.levels,delete b.levels):(d={},m(b,d,"file"),m(b,d,"type"),h.sources=d.file?[d]:[]));b.playlist=[new g(h)]}}function m(b,d,g){j.exists(b[g])&&(d[g]=b[g],delete b[g])}var j=h.utils,g=h.playlist.item;(h.embed.config=function(b){var e={fallback:!0,height:270,primary:"html5",width:480,base:b.base?b.base:j.getScriptPath("jwplayer.js"),aspectratio:""};b=j.extend(e,h.defaults,b);var e={type:"html5",src:b.base+"jwplayer.html5.js"},
-g={type:"flash",src:b.base+"jwplayer.flash.swf"};b.modes="flash"==b.primary?[g,e]:[e,g];b.listbar&&(b.playlistsize=b.listbar.size,b.playlistposition=b.listbar.position,b.playlistlayout=b.listbar.layout);b.flashplayer&&(g.src=b.flashplayer);b.html5player&&(e.src=b.html5player);d(b);g=b.aspectratio;if("string"!=typeof g||!j.exists(g))e=0;else{var a=g.indexOf(":");-1==a?e=0:(e=parseFloat(g.substr(0,a)),g=parseFloat(g.substr(a+1)),e=0>=e||0>=g?0:100*(g/e)+"%")}-1==b.width.toString().indexOf("%")?delete b.aspectratio:
-e?b.aspectratio=e:delete b.aspectratio;return b}).addConfig=function(b,e){d(e);return j.extend(b,e)}}(jwplayer),function(h){var d=h.utils,m=document;h.embed.download=function(j,g,b){function e(a,b){for(var c=m.querySelectorAll(a),e=0;e<c.length;e++)d.foreach(b,function(a,b){c[e].style[a]=b})}function h(a,b,c){a=m.createElement(a);b&&(a.className="jwdownload"+b);c&&c.appendChild(a);return a}var a=d.extend({},g),f=a.width?a.width:480,c=a.height?a.height:320,n;g=g.logo?g.logo:{prefix:d.repo(),file:"logo.png",
-margin:10};var k,l,r,a=a.playlist,x,u=["mp4","aac","mp3"];if(a&&a.length){x=a[0];n=x.sources;for(a=0;a<n.length;a++){var t=n[a],q=t.type?t.type:d.extensionmap.extType(d.extension(t.file));t.file&&d.foreach(u,function(a){q==u[a]?(k=t.file,l=x.image):d.isYouTube(t.file)&&(r=t.file)})}k?(n=k,b=l,j&&(a=h("a","display",j),h("div","icon",a),h("div","logo",a),n&&a.setAttribute("href",d.getAbsolutePath(n))),a="#"+j.id+" .jwdownload",j.style.width="",j.style.height="",e(a+"display",{width:d.styleDimension(Math.max(320,
-f)),height:d.styleDimension(Math.max(180,c)),background:"black center no-repeat "+(b?"url("+b+")":""),backgroundSize:"contain",position:"relative",border:"none",display:"block"}),e(a+"display div",{position:"absolute",width:"100%",height:"100%"}),e(a+"logo",{top:g.margin+"px",right:g.margin+"px",background:"top right no-repeat url("+g.prefix+g.file+")"}),e(a+"icon",{background:"center no-repeat url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAgNJREFUeNrs28lqwkAYB/CZqNVDDj2r6FN41QeIy8Fe+gj6BL275Q08u9FbT8ZdwVfotSBYEPUkxFOoks4EKiJdaDuTjMn3wWBO0V/+sySR8SNSqVRKIR8qaXHkzlqS9jCfzzWcTCYp9hF5o+59sVjsiRzcegSckFzcjT+ruN80TeSlAjCAAXzdJSGPFXRpAAMYwACGZQkSdhG4WCzehMNhqV6vG6vVSrirKVEw66YoSqDb7cqlUilE8JjHd/y1MQefVzqdDmiaJpfLZWHgXMHn8F6vJ1cqlVAkEsGuAn83J4gAd2RZymQygX6/L1erVQt+9ZPWb+CDwcCC2zXGJaewl/DhcHhK3DVj+KfKZrMWvFarcYNLomAv4aPRSFZVlTlcSPA5fDweW/BoNIqFnKV53JvncjkLns/n/cLdS+92O7RYLLgsKfv9/t8XlDn4eDyiw+HA9Jyz2eyt0+kY2+3WFC5hluej0Ha7zQQq9PPwdDq1Et1sNsx/nFBgCqWJ8oAK1aUptNVqcYWewE4nahfU0YQnk4ntUEfGMIU2m01HoLaCKbTRaDgKtaVLk9tBYaBcE/6Artdr4RZ5TB6/dC+9iIe/WgAMYADDpAUJAxjAAAYwgGFZgoS/AtNNTF7Z2bL0BYPBV3Jw5xFwwWcYxgtBP5OkE8i9G7aWGOOCruvauwADALMLMEbKf4SdAAAAAElFTkSuQmCC)"})):
-r?(g=r,j=h("iframe","",j),j.src="http://www.youtube.com/embed/"+d.youTubeID(g),j.width=f,j.height=c,j.style.border="none"):b()}}}(jwplayer),function(h){var d=h.utils,m=h.events,j={};(h.embed.flash=function(b,e,p,a,f){function c(a,b,c){var d=document.createElement("param");d.setAttribute("name",b);d.setAttribute("value",c);a.appendChild(d)}function n(a,b,c){return function(){try{c&&document.getElementById(f.id+"_wrapper").appendChild(b);var d=document.getElementById(f.id).getPluginConfig("display");
-"function"==typeof a.resize&&a.resize(d.width,d.height);b.style.left=d.x;b.style.top=d.h}catch(e){}}}function k(a){if(!a)return{};var b={},c=[];d.foreach(a,function(a,e){var f=d.getPluginName(a);c.push(a);d.foreach(e,function(a,c){b[f+"."+a]=c})});b.plugins=c.join(",");return b}var l=new h.events.eventdispatcher,r=d.flashVersion();d.extend(this,l);this.embed=function(){p.id=f.id;if(10>r)return l.sendEvent(m.ERROR,{message:"Flash version must be 10.0 or greater"}),!1;var g,h,t=f.config.listbar,q=d.extend({},
-p);if(b.id+"_wrapper"==b.parentNode.id)g=document.getElementById(b.id+"_wrapper");else{g=document.createElement("div");h=document.createElement("div");h.style.display="none";h.id=b.id+"_aspect";g.id=b.id+"_wrapper";g.style.position="relative";g.style.display="block";g.style.width=d.styleDimension(q.width);g.style.height=d.styleDimension(q.height);if(f.config.aspectratio){var E=parseFloat(f.config.aspectratio);h.style.display="block";h.style.marginTop=f.config.aspectratio;g.style.height="auto";g.style.display=
-"inline-block";t&&("bottom"==t.position?h.style.paddingBottom=t.size+"px":"right"==t.position&&(h.style.marginBottom=-1*t.size*(E/100)+"px"))}b.parentNode.replaceChild(g,b);g.appendChild(b);g.appendChild(h)}g=a.setupPlugins(f,q,n);0<g.length?d.extend(q,k(g.plugins)):delete q.plugins;"undefined"!=typeof q["dock.position"]&&"false"==q["dock.position"].toString().toLowerCase()&&(q.dock=q["dock.position"],delete q["dock.position"]);g=q.wmode?q.wmode:q.height&&40>=q.height?"transparent":"opaque";h="height width modes events primary base fallback volume".split(" ");
-for(t=0;t<h.length;t++)delete q[h[t]];h=d.getCookies();d.foreach(h,function(a,b){"undefined"==typeof q[a]&&(q[a]=b)});h=window.location.href.split("/");h.splice(h.length-1,1);h=h.join("/");q.base=h+"/";j[b.id]=q;d.isMSIE()?(h='\x3cobject classid\x3d"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" " width\x3d"100%" height\x3d"100%"id\x3d"'+b.id+'" name\x3d"'+b.id+'" tabindex\x3d0""\x3e',h+='\x3cparam name\x3d"movie" value\x3d"'+e.src+'"\x3e',h+='\x3cparam name\x3d"allowfullscreen" value\x3d"true"\x3e\x3cparam name\x3d"allowscriptaccess" value\x3d"always"\x3e',
-h+='\x3cparam name\x3d"seamlesstabbing" value\x3d"true"\x3e',h+='\x3cparam name\x3d"wmode" value\x3d"'+g+'"\x3e',h+='\x3cparam name\x3d"bgcolor" value\x3d"#000000"\x3e',h+="\x3c/object\x3e",b.outerHTML=h,g=document.getElementById(b.id)):(h=document.createElement("object"),h.setAttribute("type","application/x-shockwave-flash"),h.setAttribute("data",e.src),h.setAttribute("width","100%"),h.setAttribute("height","100%"),h.setAttribute("bgcolor","#000000"),h.setAttribute("id",b.id),h.setAttribute("name",
-b.id),h.className="jwswf",c(h,"allowfullscreen","true"),c(h,"allowscriptaccess","always"),c(h,"seamlesstabbing","true"),c(h,"wmode",g),b.parentNode.replaceChild(h,b),g=h);f.config.aspectratio&&(g.style.position="absolute");f.container=g;f.setPlayer(g,"flash")};this.supportsConfig=function(){if(r)if(p){if("string"==d.typeOf(p.playlist))return!0;try{var a=p.playlist[0].sources;if("undefined"==typeof a)return!0;for(var b=0;b<a.length;b++)if(a[b].file&&g(a[b].file,a[b].type))return!0}catch(c){}}else return!0;
-return!1}}).getVars=function(b){return j[b]};var g=h.embed.flashCanPlay=function(b,e){if(d.isYouTube(b)||d.isRtmp(b,e)||"hls"==e)return!0;var g=d.extensionmap[e?e:d.extension(b)];return!g?!1:!!g.flash}}(jwplayer),function(h){function d(b,d,g){if(null!==navigator.userAgent.match(/BlackBerry/i))return!1;if("youtube"===d||m.isYouTube(b))return!0;var a=m.extension(b);d=d||j.extType(a);if("hls"===d)if(g){g=m.isAndroidNative;if(g(2)||g(3)||g("4.0"))return!1;if(m.isAndroid())return!0}else if(m.isAndroid())return!1;
-if(m.isRtmp(b,d))return!1;b=j[d]||j[a];if(!b||b.flash&&!b.html5)return!1;var f;a:if(b=b.html5){try{f=!!h.vid.canPlayType(b);break a}catch(c){}f=!1}else f=!0;return f}var m=h.utils,j=m.extensionmap,g=h.events;h.embed.html5=function(b,e,j,a,f){function c(a,c,d){return function(){try{var e=document.querySelector("#"+b.id+" .jwmain");d&&e.appendChild(c);"function"==typeof a.resize&&(a.resize(e.clientWidth,e.clientHeight),setTimeout(function(){a.resize(e.clientWidth,e.clientHeight)},400));c.left=e.style.left;
-c.top=e.style.top}catch(f){}}}function n(a){k.sendEvent(a.type,{message:"HTML5 player not found"})}var k=this,l=new g.eventdispatcher;m.extend(k,l);k.embed=function(){if(h.html5){a.setupPlugins(f,j,c);b.innerHTML="";var d=h.utils.extend({},j);delete d.volume;d=new h.html5.player(d);f.container=document.getElementById(f.id);f.setPlayer(d,"html5")}else d=new m.scriptloader(e.src),d.addEventListener(g.ERROR,n),d.addEventListener(g.COMPLETE,k.embed),d.load()};k.supportsConfig=function(){if(h.vid.canPlayType)try{if("string"==
-m.typeOf(j.playlist))return!0;for(var a=j.playlist[0].sources,b=0;b<a.length;b++)if(d(a[b].file,a[b].type,j.androidhls))return!0}catch(c){}return!1}};h.embed.html5CanPlay=d}(jwplayer),function(h){var d=h.embed,m=h.utils,j=/\.(js|swf)$/;h.embed=m.extend(function(g){function b(){t="Adobe SiteCatalyst Error: Could not find Media Module"}var e=m.repo(),p=m.extend({},h.defaults),a=m.extend({},p,g.config),f=g.config,c=a.plugins,n=a.analytics,k=e+"jwpsrv.js",l=e+"sharing.js",r=e+"related.js",x=e+"gapro.js",
-p=h.key?h.key:p.key,u=(new h.utils.key(p)).edition(),t,c=c?c:{};"ads"==u&&a.advertising&&(j.test(a.advertising.client)?c[a.advertising.client]=a.advertising:c[e+a.advertising.client+".js"]=a.advertising);delete f.advertising;f.key=p;a.analytics&&j.test(a.analytics.client)&&(k=a.analytics.client);delete f.analytics;n&&!("ads"===u||"enterprise"===u)&&delete n.enabled;if("free"==u||!n||!1!==n.enabled)c[k]=n?n:{};delete c.sharing;delete c.related;switch(u){case "ads":case "enterprise":if(f.sitecatalyst)try{window.s&&
-window.s.hasOwnProperty("Media")?new h.embed.sitecatalyst(g):b()}catch(q){b()}case "premium":a.related&&(j.test(a.related.client)&&(r=a.related.client),c[r]=a.related),a.ga&&(j.test(a.ga.client)&&(x=a.ga.client),c[x]=a.ga);case "pro":a.sharing&&(j.test(a.sharing.client)&&(l=a.sharing.client),c[l]=a.sharing),a.skin&&(f.skin=a.skin.replace(/^(beelden|bekle|five|glow|modieus|roundster|stormtrooper|vapor)$/i,m.repo()+"skins/$1.xml"))}f.plugins=c;g.config=f;g=new d(g);t&&g.errorScreen(t);return g},h.embed)}(jwplayer),
-function(h){var d=jwplayer.utils;h.sitecatalyst=function(h){function j(b){a.debug&&d.log(b)}function g(a){a=a.split("/");a=a[a.length-1];a=a.split("?");return a[0]}function b(){if(!l){l=!0;var a=p.getPosition();j("stop: "+c+" : "+a);s.Media.stop(c,a)}}function e(){r||(b(),r=!0,j("close: "+c),s.Media.close(c),x=!0,k=0)}var p=h,a=d.extend({},p.config.sitecatalyst),f={onPlay:function(){if(!x){var a=p.getPosition();l=!1;j("play: "+c+" : "+a);s.Media.play(c,a)}},onPause:b,onBuffer:b,onIdle:e,onPlaylistItem:function(b){try{x=
-!0;e();k=0;var f;if(a.mediaName)f=a.mediaName;else{var h=p.getPlaylistItem(b.index);f=h.title?h.title:h.file?g(h.file):h.sources&&h.sources.length?g(h.sources[0].file):""}c=f;n=a.playerName?a.playerName:p.id}catch(j){d.log(j)}},onTime:function(){if(x){var a=p.getDuration();if(-1==a)return;r=l=x=!1;j("open: "+c+" : "+a+" : "+n);s.Media.open(c,a,n);j("play: "+c+" : 0");s.Media.play(c,0)}a=p.getPosition();if(3<=Math.abs(a-k)){var b=k;j("seek: "+b+" to "+a);j("stop: "+c+" : "+b);s.Media.stop(c,b);j("play: "+
-c+" : "+a);s.Media.play(c,a)}k=a},onComplete:e},c,n,k,l=!0,r=!0,x;d.foreach(f,function(a){p[a](f[a])})}}(jwplayer.embed),function(h,d){var m=[],j=h.utils,g=h.events,b=g.state,e=document,p="getBuffer getCaptionsList getControls getCurrentCaptions getCurrentQuality getDuration getFullscreen getHeight getLockState getMute getPlaylistIndex getSafeRegion getPosition getQualityLevels getState getVolume getWidth isBeforeComplete isBeforePlay releaseState".split(" "),a="playlistNext stop forceState playlistPrev seek setCurrentCaptions setControls setCurrentQuality setVolume".split(" "),
-f={onBufferChange:g.JWPLAYER_MEDIA_BUFFER,onBufferFull:g.JWPLAYER_MEDIA_BUFFER_FULL,onError:g.JWPLAYER_ERROR,onSetupError:g.JWPLAYER_SETUP_ERROR,onFullscreen:g.JWPLAYER_FULLSCREEN,onMeta:g.JWPLAYER_MEDIA_META,onMute:g.JWPLAYER_MEDIA_MUTE,onPlaylist:g.JWPLAYER_PLAYLIST_LOADED,onPlaylistItem:g.JWPLAYER_PLAYLIST_ITEM,onPlaylistComplete:g.JWPLAYER_PLAYLIST_COMPLETE,onReady:g.API_READY,onResize:g.JWPLAYER_RESIZE,onComplete:g.JWPLAYER_MEDIA_COMPLETE,onSeek:g.JWPLAYER_MEDIA_SEEK,onTime:g.JWPLAYER_MEDIA_TIME,
-onVolume:g.JWPLAYER_MEDIA_VOLUME,onBeforePlay:g.JWPLAYER_MEDIA_BEFOREPLAY,onBeforeComplete:g.JWPLAYER_MEDIA_BEFORECOMPLETE,onDisplayClick:g.JWPLAYER_DISPLAY_CLICK,onControls:g.JWPLAYER_CONTROLS,onQualityLevels:g.JWPLAYER_MEDIA_LEVELS,onQualityChange:g.JWPLAYER_MEDIA_LEVEL_CHANGED,onCaptionsList:g.JWPLAYER_CAPTIONS_LIST,onCaptionsChange:g.JWPLAYER_CAPTIONS_CHANGED,onAdError:g.JWPLAYER_AD_ERROR,onAdClick:g.JWPLAYER_AD_CLICK,onAdImpression:g.JWPLAYER_AD_IMPRESSION,onAdTime:g.JWPLAYER_AD_TIME,onAdComplete:g.JWPLAYER_AD_COMPLETE,
-onAdCompanions:g.JWPLAYER_AD_COMPANIONS,onAdSkipped:g.JWPLAYER_AD_SKIPPED,onAdPlay:g.JWPLAYER_AD_PLAY,onAdPause:g.JWPLAYER_AD_PAUSE,onAdMeta:g.JWPLAYER_AD_META,onCast:g.JWPLAYER_CAST_SESSION},c={onBuffer:b.BUFFERING,onPause:b.PAUSED,onPlay:b.PLAYING,onIdle:b.IDLE};h.api=function(m){function k(a,b){j.foreach(a,function(a,c){q[a]=function(a){return b(c,a)}})}function l(a,b){var c="jw"+b.charAt(0).toUpperCase()+b.slice(1);q[b]=function(){var b=t.apply(this,[c].concat(Array.prototype.slice.call(arguments,
-0)));return a?q:b}}function r(a){G=[];D&&D.destroy&&D.destroy();h.api.destroyPlayer(a.id)}function x(a,b){try{a.jwAddEventListener(b,'function(dat) { jwplayer("'+q.id+'").dispatchEvent("'+b+'", dat); }')}catch(c){j.log("Could not add internal listener")}}function u(a,b){E[a]||(E[a]=[],v&&A&&x(v,a));E[a].push(b);return q}function t(){if(A){if(v){var a=Array.prototype.slice.call(arguments,0),b=a.shift();if("function"===typeof v[b]){switch(a.length){case 6:return v[b](a[0],a[1],a[2],a[3],a[4],a[5]);
-case 5:return v[b](a[0],a[1],a[2],a[3],a[4]);case 4:return v[b](a[0],a[1],a[2],a[3]);case 3:return v[b](a[0],a[1],a[2]);case 2:return v[b](a[0],a[1]);case 1:return v[b](a[0])}return v[b]()}}return null}G.push(arguments)}var q=this,E={},F={},v,A=!1,G=[],B,D,H={},y={};q.container=m;q.id=m.id;q.setup=function(a){if(h.embed){var b=e.getElementById(q.id);b&&(a.fallbackDiv=b);r(q);b=h(q.id);b.config=a;D=new h.embed(b);D.embed();return b}return q};q.getContainer=function(){return q.container};q.addButton=
-function(a,b,c,d){try{y[d]=c,t("jwDockAddButton",a,b,"jwplayer('"+q.id+"').callback('"+d+"')",d)}catch(e){j.log("Could not add dock button"+e.message)}};q.removeButton=function(a){t("jwDockRemoveButton",a)};q.callback=function(a){if(y[a])y[a]()};q.getMeta=function(){return q.getItemMeta()};q.getPlaylist=function(){var a=t("jwGetPlaylist");"flash"==q.renderingMode&&j.deepReplaceKeyName(a,["__dot__","__spc__","__dsh__","__default__"],["."," ","-","default"]);return a};q.getPlaylistItem=function(a){j.exists(a)||
-(a=q.getPlaylistIndex());return q.getPlaylist()[a]};q.getRenderingMode=function(){return q.renderingMode};q.setFullscreen=function(a){j.exists(a)?t("jwSetFullscreen",a):t("jwSetFullscreen",!t("jwGetFullscreen"));return q};q.setMute=function(a){j.exists(a)?t("jwSetMute",a):t("jwSetMute",!t("jwGetMute"));return q};q.lock=function(){return q};q.unlock=function(){return q};q.load=function(a){t("jwInstreamDestroy");h(q.id).plugins.googima&&t("jwDestroyGoogima");t("jwLoad",a);return q};q.playlistItem=function(a){t("jwPlaylistItem",
-parseInt(a,10));return q};q.resize=function(a,b){if("flash"!==q.renderingMode)t("jwResize",a,b);else{var c=e.getElementById(q.id+"_wrapper"),d=e.getElementById(q.id+"_aspect");d&&(d.style.display="none");c&&(c.style.display="block",c.style.width=j.styleDimension(a),c.style.height=j.styleDimension(b))}return q};q.play=function(a){if(a!==d)return t("jwPlay",a),q;a=q.getState();var c=B&&B.getState();c?c===b.IDLE||c===b.PLAYING||c===b.BUFFERING?t("jwInstreamPause"):t("jwInstreamPlay"):a==b.PLAYING||a==
-b.BUFFERING?t("jwPause"):t("jwPlay");return q};q.pause=function(a){a===d?(a=q.getState(),a==b.PLAYING||a==b.BUFFERING?t("jwPause"):t("jwPlay")):t("jwPause",a);return q};q.createInstream=function(){return new h.api.instream(this,v)};q.setInstream=function(a){return B=a};q.loadInstream=function(a,b){B=q.setInstream(q.createInstream()).init(b);B.loadItem(a);return B};q.destroyPlayer=function(){t("jwPlayerDestroy")};q.playAd=function(a){var b=h(q.id).plugins;b.vast?b.vast.jwPlayAd(a):t("jwPlayAd",a)};
-q.pauseAd=function(){var a=h(q.id).plugins;a.vast?a.vast.jwPauseAd():t("jwPauseAd")};k(c,function(a,b){F[a]||(F[a]=[],u(g.JWPLAYER_PLAYER_STATE,function(b){var c=b.newstate;b=b.oldstate;if(c==a){var d=F[c];if(d)for(var e=0;e<d.length;e++){var f=d[e];"function"==typeof f&&f.call(this,{oldstate:b,newstate:c})}}}));F[a].push(b);return q});k(f,u);j.foreach(p,function(a,b){l(!1,b)});j.foreach(a,function(a,b){l(!0,b)});q.remove=function(){if(!A)throw"Cannot call remove() before player is ready";r(this)};
-q.registerPlugin=function(a,b,c,d){h.plugins.registerPlugin(a,b,c,d)};q.setPlayer=function(a,b){v=a;q.renderingMode=b};q.detachMedia=function(){if("html5"==q.renderingMode)return t("jwDetachMedia")};q.attachMedia=function(a){if("html5"==q.renderingMode)return t("jwAttachMedia",a)};q.removeEventListener=function(a,b){var c=E[a];if(c)for(var d=c.length;d--;)c[d]===b&&c.splice(d,1)};q.dispatchEvent=function(a,b){var c=E[a];if(c)for(var c=c.slice(0),d=j.translateEventResponse(a,b),e=0;e<c.length;e++){var f=
-c[e];if("function"===typeof f)try{a===g.JWPLAYER_PLAYLIST_LOADED&&j.deepReplaceKeyName(d.playlist,["__dot__","__spc__","__dsh__","__default__"],["."," ","-","default"]),f.call(this,d)}catch(h){j.log("There was an error calling back an event handler")}}};q.dispatchInstreamEvent=function(a){B&&B.dispatchEvent(a,arguments)};q.callInternal=t;q.playerReady=function(a){A=!0;v||q.setPlayer(e.getElementById(a.id));q.container=e.getElementById(q.id);j.foreach(E,function(a){x(v,a)});u(g.JWPLAYER_PLAYLIST_ITEM,
-function(){H={}});u(g.JWPLAYER_MEDIA_META,function(a){j.extend(H,a.metadata)});u(g.JWPLAYER_VIEW_TAB_FOCUS,function(a){var b=q.getContainer();!0===a.hasFocus?j.addClass(b,"jw-tab-focus"):j.removeClass(b,"jw-tab-focus")});for(q.dispatchEvent(g.API_READY);0<G.length;)t.apply(this,G.shift())};q.getItemMeta=function(){return H};return q};h.playerReady=function(a){var b=h.api.playerById(a.id);b?b.playerReady(a):h.api.selectPlayer(a.id).playerReady(a)};h.api.selectPlayer=function(a){var b;j.exists(a)||
-(a=0);a.nodeType?b=a:"string"==typeof a&&(b=e.getElementById(a));return b?(a=h.api.playerById(b.id))?a:h.api.addPlayer(new h.api(b)):"number"==typeof a?m[a]:null};h.api.playerById=function(a){for(var b=0;b<m.length;b++)if(m[b].id==a)return m[b];return null};h.api.addPlayer=function(a){for(var b=0;b<m.length;b++)if(m[b]==a)return a;m.push(a);return a};h.api.destroyPlayer=function(a){var b,c,f;j.foreach(m,function(d,e){e.id===a&&(b=d,c=e)});if(b===d||c===d)return null;j.clearCss("#"+c.id);if(f=e.getElementById(c.id+
-("flash"==c.renderingMode?"_wrapper":""))){"html5"===c.renderingMode&&c.destroyPlayer();var g=e.createElement("div");g.id=c.id;f.parentNode.replaceChild(g,f)}m.splice(b,1);return null}}(window.jwplayer),function(h){var d=h.events,m=h.utils,j=d.state;h.api.instream=function(g,b){function e(a,d){c[a]||(c[a]=[],b.jwInstreamAddEventListener(a,'function(dat) { jwplayer("'+g.id+'").dispatchInstreamEvent("'+a+'", dat); }'));c[a].push(d);return this}function h(a,b){n[a]||(n[a]=[],e(d.JWPLAYER_PLAYER_STATE,
-function(b){var c=b.newstate,d=b.oldstate;if(c==a){var e=n[c];if(e)for(var f=0;f<e.length;f++){var g=e[f];"function"==typeof g&&g.call(this,{oldstate:d,newstate:c,type:b.type})}}}));n[a].push(b);return this}var a,f,c={},n={},k=this;k.type="instream";k.init=function(){g.callInternal("jwInitInstream");return k};k.loadItem=function(b,c){a=b;f=c||{};"array"==m.typeOf(b)?g.callInternal("jwLoadArrayInstream",a,f):g.callInternal("jwLoadItemInstream",a,f)};k.removeEvents=function(){c=n={}};k.removeEventListener=
-function(a,b){var d=c[a];if(d)for(var e=d.length;e--;)d[e]===b&&d.splice(e,1)};k.dispatchEvent=function(a,b){var d=c[a];if(d)for(var d=d.slice(0),e=m.translateEventResponse(a,b[1]),f=0;f<d.length;f++){var g=d[f];"function"==typeof g&&g.call(this,e)}};k.onError=function(a){return e(d.JWPLAYER_ERROR,a)};k.onMediaError=function(a){return e(d.JWPLAYER_MEDIA_ERROR,a)};k.onFullscreen=function(a){return e(d.JWPLAYER_FULLSCREEN,a)};k.onMeta=function(a){return e(d.JWPLAYER_MEDIA_META,a)};k.onMute=function(a){return e(d.JWPLAYER_MEDIA_MUTE,
-a)};k.onComplete=function(a){return e(d.JWPLAYER_MEDIA_COMPLETE,a)};k.onPlaylistComplete=function(a){return e(d.JWPLAYER_PLAYLIST_COMPLETE,a)};k.onPlaylistItem=function(a){return e(d.JWPLAYER_PLAYLIST_ITEM,a)};k.onTime=function(a){return e(d.JWPLAYER_MEDIA_TIME,a)};k.onBuffer=function(a){return h(j.BUFFERING,a)};k.onPause=function(a){return h(j.PAUSED,a)};k.onPlay=function(a){return h(j.PLAYING,a)};k.onIdle=function(a){return h(j.IDLE,a)};k.onClick=function(a){return e(d.JWPLAYER_INSTREAM_CLICK,a)};
-k.onInstreamDestroyed=function(a){return e(d.JWPLAYER_INSTREAM_DESTROYED,a)};k.onAdSkipped=function(a){return e(d.JWPLAYER_AD_SKIPPED,a)};k.play=function(a){b.jwInstreamPlay(a)};k.pause=function(a){b.jwInstreamPause(a)};k.hide=function(){g.callInternal("jwInstreamHide")};k.destroy=function(){k.removeEvents();g.callInternal("jwInstreamDestroy")};k.setText=function(a){b.jwInstreamSetText(a?a:"")};k.getState=function(){return b.jwInstreamState()};k.setClick=function(a){b.jwInstreamClick&&b.jwInstreamClick(a)}}}(window.jwplayer),
-function(h){var d=h.api,m=d.selectPlayer;d.selectPlayer=function(d){return(d=m(d))?d:{registerPlugin:function(d,b,e){h.plugins.registerPlugin(d,b,e)}}}}(jwplayer));
\ No newline at end of file
diff --git a/lib/subliminal/services/addic7ed.py b/lib/subliminal/services/addic7ed.py
index bc1ee5c96ce02c6b0b1893cec506f4d757ba744b..b4a28510637961bdd3f324e3ec7f2ac9422b55c1 100644
--- a/lib/subliminal/services/addic7ed.py
+++ b/lib/subliminal/services/addic7ed.py
@@ -39,7 +39,7 @@ class Addic7ed(ServiceBase):
     api_based = False
     #TODO: Complete this
     languages = language_set(['ar', 'ca', 'de', 'el', 'en', 'es', 'eu', 'fr', 'ga', 'gl', 'he', 'hr', 'hu',
-                              'it', 'pl', 'pt', 'ro', 'ru', 'se', 'pb'])
+                              'it', 'nl', 'pl', 'pt', 'ro', 'ru', 'se', 'pb'])
     language_map = {'Portuguese (Brazilian)': Language('pob'), 'Greek': Language('gre'),
                     'Spanish (Latin America)': Language('spa'), 'Galego': Language('glg'),
                     u'CatalĆ ': Language('cat')}
diff --git a/lib/subliminal/videos.py b/lib/subliminal/videos.py
index b249cf69a13f215fa010ed47906b2128a897a3e6..61821b45069ff0181da3b40db1aefd7bb489cc27 100644
--- a/lib/subliminal/videos.py
+++ b/lib/subliminal/videos.py
@@ -136,14 +136,15 @@ class Video(object):
             return []
         basepath = os.path.splitext(self.path)[0]
         results = []
-        video_infos = None
-        try:
-            video_infos = enzyme.parse(self.path)
-            logger.debug(u'Succeeded parsing %s with enzyme: %r' % (self.path, video_infos)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-        except:
-            logger.debug(u'Failed parsing %s with enzyme' % self.path) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
-        if isinstance(video_infos, enzyme.core.AVContainer):
-            results.extend([subtitles.EmbeddedSubtitle.from_enzyme(self.path, s) for s in video_infos.subtitles])
+        if not sickbeard.EMBEDDED_SUBTITLES_ALL:
+            video_infos = None
+            try:
+                video_infos = enzyme.parse(self.path)
+                logger.debug(u'Succeeded parsing %s with enzyme: %r' % (self.path, video_infos)) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+            except:
+                logger.debug(u'Failed parsing %s with enzyme' % self.path) if sys.platform != 'win32' else logger.debug('Log line suppressed on windows')
+            if isinstance(video_infos, enzyme.core.AVContainer):
+                results.extend([subtitles.EmbeddedSubtitle.from_enzyme(self.path, s) for s in video_infos.subtitles])
         # cannot use glob here because it chokes if there are any square
         # brackets inside the filename, so we have to use basic string
         # startswith/endswith comparisons
diff --git a/lib/trakt/exceptions.py b/lib/trakt/exceptions.py
index f82e88e5c711ee5ee5544e9f54e7e2708bc86dd9..a8d7b3fcdf7f43ce00b478649cccc7fe30338e23 100644
--- a/lib/trakt/exceptions.py
+++ b/lib/trakt/exceptions.py
@@ -5,4 +5,4 @@ class traktAuthException(traktException):
     pass
 
 class traktServerBusy(traktException):
-    pass
\ No newline at end of file
+    pass
diff --git a/lib/trakt/trakt.py b/lib/trakt/trakt.py
index d2f615a6cf2bc01a85e820ac5b0aafdc36482997..3d04d99edd5843383bafbc4aaae2d4e9af392b81 100644
--- a/lib/trakt/trakt.py
+++ b/lib/trakt/trakt.py
@@ -40,8 +40,11 @@ class TraktAPI():
                 logger.log(u"Retrying trakt api request: auth/login", logger.WARNING)
                 return self.validateAccount()
             elif code == 401:
+                logger.log(u"Unauthorized. Please check your Trakt settings", logger.WARNING)
                 raise traktAuthException(e)
-            elif code == 503:
+            elif code in (500,501,503,504,520,521,522):
+                #http://docs.trakt.apiary.io/#introduction/status-codes
+                logger.log(u"Trakt may have some issues and it's unavailable. Try again later please", logger.WARNING)
                 raise traktServerBusy(e)
             else:
                 raise traktException(e)
@@ -79,8 +82,11 @@ class TraktAPI():
                 logger.log(u"Retrying trakt api request: %s" % path, logger.WARNING)
                 return self.traktRequest(path, data, method)
             elif code == 401:
+                logger.log(u"Unauthorized. Please check your Trakt settings", logger.WARNING)
                 raise traktAuthException(e)
-            elif code == 503:
+            elif code in (500,501,503,504,520,521,522):
+                #http://docs.trakt.apiary.io/#introduction/status-codes
+                logger.log(u"Trakt may have some issues and it's unavailable. Try again later please", logger.WARNING)
                 raise traktServerBusy(e)
             else:
                 raise traktException(e)
@@ -94,4 +100,4 @@ class TraktAPI():
             else:
                 raise traktException('Unknown Error')
 
-        return resp
\ No newline at end of file
+        return resp
diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py
index 066a97c77a926ee65805f38247ec561297508e75..e539522b18f4ef97dd15cca9f3d2a58c8ce5b788 100755
--- a/sickbeard/__init__.py
+++ b/sickbeard/__init__.py
@@ -38,7 +38,7 @@ from sickbeard import providers, metadata, config, webserveInit
 from sickbeard.providers.generic import GenericProvider
 from providers import ezrss, btn, newznab, womble, thepiratebay, oldpiratebay, torrentleech, kat, iptorrents, \
     omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, hounddawgs, nextgen, speedcd, nyaatorrents, animenzb, torrentbytes, animezb, \
-    freshontv, morethantv, bitsoup, t411, tokyotoshokan, shazbat, rarbg, alpharatio, tntvillage, binsearch
+    freshontv, morethantv, bitsoup, t411, tokyotoshokan, shazbat, rarbg, alpharatio, tntvillage, binsearch, eztv
 from sickbeard.config import CheckSection, check_setting_int, check_setting_str, check_setting_float, ConfigMigrator, \
     naming_ep_type
 from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
@@ -66,10 +66,11 @@ CFG = None
 CONFIG_FILE = None
 
 # This is the version of the config we EXPECT to find
-CONFIG_VERSION = 6
+CONFIG_VERSION = 7
 
 # Default encryption version (0 for None)
 ENCRYPTION_VERSION = 0
+ENCRYPTION_SECRET = None
 
 PROG_DIR = '.'
 MY_FULLNAME = None
@@ -100,6 +101,7 @@ properFinderScheduler = None
 autoPostProcesserScheduler = None
 subtitlesFinderScheduler = None
 traktCheckerScheduler = None
+traktRollingScheduler = None
 
 showList = None
 loadingShowList = None
@@ -148,8 +150,6 @@ WEB_HOST = None
 WEB_IPV6 = None
 WEB_COOKIE_SECRET = None
 
-PLAY_VIDEOS = False
-
 DOWNLOAD_URL = None
 
 HANDLE_REVERSE_PROXY = False
@@ -169,6 +169,7 @@ ENABLE_HTTPS = False
 HTTPS_CERT = None
 HTTPS_KEY = None
 
+INDEXER_DEFAULT_LANGUAGE = None
 LAUNCH_BROWSER = False
 CACHE_DIR = None
 ACTUAL_CACHE_DIR = None
@@ -423,6 +424,11 @@ TRAKT_DEFAULT_INDEXER = None
 TRAKT_DISABLE_SSL_VERIFY = False
 TRAKT_TIMEOUT = 60
 TRAKT_BLACKLIST_NAME = ''
+TRAKT_USE_ROLLING_DOWNLOAD = 0
+TRAKT_ROLLING_NUM_EP = 0
+TRAKT_ROLLING_ADD_PAUSED = 1
+TRAKT_ROLLING_FREQUENCY = 15
+TRAKT_ROLLING_DEFAULT_WATCHED_STATUS = 7
 
 USE_PYTIVO = False
 PYTIVO_NOTIFY_ONSNATCH = False
@@ -490,6 +496,7 @@ SUBTITLES_DIR = ''
 SUBTITLES_SERVICES_LIST = []
 SUBTITLES_SERVICES_ENABLED = []
 SUBTITLES_HISTORY = False
+EMBEDDED_SUBTITLES_ALL = False
 SUBTITLES_FINDER_FREQUENCY = 1
 SUBTITLES_MULTI = False
 
@@ -520,17 +527,17 @@ def get_backlog_cycle_time():
 def initialize(consoleLogging=True):
     with INIT_LOCK:
 
-        global BRANCH, GIT_RESET, GIT_REMOTE, GIT_REMOTE_URL, CUR_COMMIT_HASH, CUR_COMMIT_BRANCH, ACTUAL_LOG_DIR, LOG_DIR, LOG_NR, LOG_SIZE, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, WEB_COOKIE_SECRET, API_KEY, API_ROOT, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
+        global BRANCH, GIT_RESET, GIT_REMOTE, GIT_REMOTE_URL, CUR_COMMIT_HASH, CUR_COMMIT_BRANCH, ACTUAL_LOG_DIR, LOG_DIR, LOG_NR, LOG_SIZE, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, ENCRYPTION_SECRET, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, WEB_COOKIE_SECRET, API_KEY, API_ROOT, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \
             HANDLE_REVERSE_PROXY, USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, RANDOMIZE_PROVIDERS, CHECK_PROPERS_INTERVAL, ALLOW_HIGH_PRIORITY, SAB_FORCED, TORRENT_METHOD, \
             SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_CATEGORY_ANIME, SAB_HOST, \
             NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_CATEGORY_ANIME, NZBGET_PRIORITY, NZBGET_HOST, NZBGET_USE_HTTPS, backlogSearchScheduler, \
             TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_SEED_TIME, TORRENT_PAUSED, TORRENT_HIGH_BANDWIDTH, TORRENT_LABEL, TORRENT_LABEL_ANIME, TORRENT_VERIFY_CERT, TORRENT_RPCURL, TORRENT_AUTH_TYPE, \
             USE_KODI, KODI_ALWAYS_ON, KODI_NOTIFY_ONSNATCH, KODI_NOTIFY_ONDOWNLOAD, KODI_NOTIFY_ONSUBTITLEDOWNLOAD, KODI_UPDATE_FULL, KODI_UPDATE_ONLYFIRST, \
             KODI_UPDATE_LIBRARY, KODI_HOST, KODI_USERNAME, KODI_PASSWORD, BACKLOG_FREQUENCY, \
-            USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_REMOVE_WATCHLIST, TRAKT_SYNC_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_DISABLE_SSL_VERIFY, TRAKT_TIMEOUT, TRAKT_BLACKLIST_NAME, \
+            USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_REMOVE_WATCHLIST, TRAKT_SYNC_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, traktRollingScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_DISABLE_SSL_VERIFY, TRAKT_TIMEOUT, TRAKT_BLACKLIST_NAME, TRAKT_USE_ROLLING_DOWNLOAD, TRAKT_ROLLING_NUM_EP, TRAKT_ROLLING_ADD_PAUSED, TRAKT_ROLLING_FREQUENCY, TRAKT_ROLLING_DEFAULT_WATCHED_STATUS, \
             USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_NOTIFY_ONSUBTITLEDOWNLOAD, PLEX_UPDATE_LIBRARY, \
             PLEX_SERVER_HOST, PLEX_SERVER_TOKEN, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, DEFAULT_BACKLOG_FREQUENCY, MIN_BACKLOG_FREQUENCY, BACKLOG_STARTUP, SKIP_REMOVED_FILES, \
-            showUpdateScheduler, __INITIALIZED__, LAUNCH_BROWSER, UPDATE_SHOWS_ON_START, UPDATE_SHOWS_ON_SNATCH, TRASH_REMOVE_SHOW, TRASH_ROTATE_LOGS, SORT_ARTICLE, showList, loadingShowList, \
+            showUpdateScheduler, __INITIALIZED__, INDEXER_DEFAULT_LANGUAGE, LAUNCH_BROWSER, UPDATE_SHOWS_ON_START, UPDATE_SHOWS_ON_SNATCH, TRASH_REMOVE_SHOW, TRASH_ROTATE_LOGS, SORT_ARTICLE, showList, loadingShowList, \
             NEWZNAB_DATA, NZBS, NZBS_UID, NZBS_HASH, INDEXER_DEFAULT, INDEXER_TIMEOUT, USENET_RETENTION, TORRENT_DIR, \
             QUALITY_DEFAULT, FLATTEN_FOLDERS_DEFAULT, SUBTITLES_DEFAULT, STATUS_DEFAULT, DAILYSEARCH_STARTUP, \
             GROWL_NOTIFY_ONSNATCH, GROWL_NOTIFY_ONDOWNLOAD, GROWL_NOTIFY_ONSUBTITLEDOWNLOAD, TWITTER_NOTIFY_ONSNATCH, TWITTER_NOTIFY_ONDOWNLOAD, TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD, USE_FREEMOBILE, FREEMOBILE_ID, FREEMOBILE_APIKEY, FREEMOBILE_NOTIFY_ONSNATCH, FREEMOBILE_NOTIFY_ONDOWNLOAD, FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD, \
@@ -557,11 +564,11 @@ def initialize(consoleLogging=True):
             GUI_NAME, HOME_LAYOUT, HISTORY_LAYOUT, DISPLAY_SHOW_SPECIALS, COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, COMING_EPS_MISSED_RANGE, DISPLAY_FILESIZE, FUZZY_DATING, TRIM_ZERO, DATE_PRESET, TIME_PRESET, TIME_PRESET_W_SECONDS, THEME_NAME, \
             POSTER_SORTBY, POSTER_SORTDIR, \
             METADATA_WDTV, METADATA_TIVO, METADATA_MEDE8ER, IGNORE_WORDS, REQUIRE_WORDS, CALENDAR_UNPROTECTED, NO_RESTART, CREATE_MISSING_SHOW_DIRS, \
-            ADD_SHOWS_WO_DIR, USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, SUBTITLES_MULTI, subtitlesFinderScheduler, \
+            ADD_SHOWS_WO_DIR, USE_SUBTITLES, SUBTITLES_LANGUAGES, SUBTITLES_DIR, SUBTITLES_SERVICES_LIST, SUBTITLES_SERVICES_ENABLED, SUBTITLES_HISTORY, SUBTITLES_FINDER_FREQUENCY, SUBTITLES_MULTI, EMBEDDED_SUBTITLES_ALL, subtitlesFinderScheduler, \
             USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, TMDB_API_KEY, DEBUG, PROXY_SETTING, PROXY_INDEXERS, \
             AUTOPOSTPROCESSER_FREQUENCY, SHOWUPDATE_HOUR, DEFAULT_AUTOPOSTPROCESSER_FREQUENCY, MIN_AUTOPOSTPROCESSER_FREQUENCY, \
             ANIME_DEFAULT, NAMING_ANIME, ANIMESUPPORT, USE_ANIDB, ANIDB_USERNAME, ANIDB_PASSWORD, ANIDB_USE_MYLIST, \
-            ANIME_SPLIT_HOME, SCENE_DEFAULT, PLAY_VIDEOS, DOWNLOAD_URL, BACKLOG_DAYS, GIT_ORG, GIT_REPO, GIT_USERNAME, GIT_PASSWORD, \
+            ANIME_SPLIT_HOME, SCENE_DEFAULT, DOWNLOAD_URL, BACKLOG_DAYS, GIT_ORG, GIT_REPO, GIT_USERNAME, GIT_PASSWORD, \
             GIT_AUTOISSUES, DEVELOPER, gh
 
         if __INITIALIZED__:
@@ -591,6 +598,7 @@ def initialize(consoleLogging=True):
 
         # Need to be before any passwords
         ENCRYPTION_VERSION = check_setting_int(CFG, 'General', 'encryption_version', 0)
+        ENCRYPTION_SECRET = check_setting_str(CFG, 'General', 'encryption_secret', helpers.generateCookieSecret(), censor_log=True)
 
         GIT_AUTOISSUES = bool(check_setting_int(CFG, 'General', 'git_autoissues', 0))
 
@@ -713,9 +721,10 @@ def initialize(consoleLogging=True):
         WEB_COOKIE_SECRET = check_setting_str(CFG, 'General', 'web_cookie_secret', helpers.generateCookieSecret(), censor_log=True)
         if not WEB_COOKIE_SECRET:
             WEB_COOKIE_SECRET = helpers.generateCookieSecret()
-        LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
 
-        PLAY_VIDEOS = bool(check_setting_int(CFG, 'General', 'play_videos', 0))
+        INDEXER_DEFAULT_LANGUAGE = check_setting_str(CFG, 'General', 'indexerDefaultLang', 'en')
+
+        LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
 
         DOWNLOAD_URL = check_setting_str(CFG, 'General', 'download_url', "")
 
@@ -998,6 +1007,11 @@ def initialize(consoleLogging=True):
         TRAKT_DISABLE_SSL_VERIFY = bool(check_setting_int(CFG, 'Trakt', 'trakt_disable_ssl_verify', 0))
         TRAKT_TIMEOUT = check_setting_int(CFG, 'Trakt', 'trakt_timeout', 30)
         TRAKT_BLACKLIST_NAME = check_setting_str(CFG, 'Trakt', 'trakt_blacklist_name', '')
+        TRAKT_USE_ROLLING_DOWNLOAD = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_rolling_download', 0))
+        TRAKT_ROLLING_NUM_EP = check_setting_int(CFG, 'Trakt', 'trakt_rolling_num_ep', 0)
+        TRAKT_ROLLING_ADD_PAUSED = check_setting_int(CFG, 'Trakt', 'trakt_rolling_add_paused', 1)
+        TRAKT_ROLLING_FREQUENCY = check_setting_int(CFG, 'Trakt', 'trakt_rolling_frequency', 15)
+        TRAKT_ROLLING_DEFAULT_WATCHED_STATUS = check_setting_int(CFG, 'Trakt', 'trakt_rolling_default_watched_status', 3)
 
         CheckSection(CFG, 'pyTivo')
         USE_PYTIVO = bool(check_setting_int(CFG, 'pyTivo', 'use_pytivo', 0))
@@ -1054,6 +1068,7 @@ def initialize(consoleLogging=True):
                                       if x]
         SUBTITLES_DEFAULT = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_default', 0))
         SUBTITLES_HISTORY = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_history', 0))
+        EMBEDDED_SUBTITLES_ALL = bool(check_setting_int(CFG, 'Subtitles', 'embedded_subtitles_all', 0))
         SUBTITLES_FINDER_FREQUENCY = check_setting_int(CFG, 'Subtitles', 'subtitles_finder_frequency', 1)
         SUBTITLES_MULTI = bool(check_setting_int(CFG, 'Subtitles', 'subtitles_multi', 1))
 
@@ -1318,6 +1333,11 @@ def initialize(consoleLogging=True):
                                                     threadName="TRAKTCHECKER",
                                                     silent=not USE_TRAKT)
 
+        traktRollingScheduler = scheduler.Scheduler(traktChecker.TraktRolling(),
+                                                    cycleTime=datetime.timedelta(TRAKT_ROLLING_FREQUENCY),
+                                                    threadName="TRAKTROLLING",
+                                                    silent=not TRAKT_USE_ROLLING_DOWNLOAD)
+
         subtitlesFinderScheduler = scheduler.Scheduler(subtitles.SubtitlesFinder(),
                                                        cycleTime=datetime.timedelta(hours=SUBTITLES_FINDER_FREQUENCY),
                                                        threadName="FINDSUBTITLES",
@@ -1334,7 +1354,7 @@ def start():
     global __INITIALIZED__, backlogSearchScheduler, \
         showUpdateScheduler, versionCheckScheduler, showQueueScheduler, \
         properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \
-        subtitlesFinderScheduler, USE_SUBTITLES, traktCheckerScheduler, \
+        subtitlesFinderScheduler, USE_SUBTITLES, traktCheckerScheduler, traktRollingScheduler,  \
         dailySearchScheduler, events, started
 
     with INIT_LOCK:
@@ -1376,6 +1396,10 @@ def start():
             if USE_TRAKT:
                 traktCheckerScheduler.start()
 
+            # start the trakt checker
+            if TRAKT_USE_ROLLING_DOWNLOAD and USE_TRAKT:
+                traktRollingScheduler.start()
+
             started = True
 
 
@@ -1383,7 +1407,7 @@ def halt():
     global __INITIALIZED__, backlogSearchScheduler, \
         showUpdateScheduler, versionCheckScheduler, showQueueScheduler, \
         properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \
-        subtitlesFinderScheduler, traktCheckerScheduler, \
+        subtitlesFinderScheduler, traktCheckerScheduler, traktRollingScheduler, \
         dailySearchScheduler, events, started
 
     with INIT_LOCK:
@@ -1457,6 +1481,14 @@ def halt():
                 except:
                     pass
 
+            if TRAKT_USE_ROLLING_DOWNLOAD and USE_TRAKT:
+                traktRollingScheduler.stop.set()
+                logger.log(u"Waiting for the TRAKTROLLING thread to exit")
+                try:
+                    traktRollingScheduler.join(10)
+                except:
+                    pass
+
             if DOWNLOAD_PROPERS:
                 properFinderScheduler.stop.set()
                 logger.log(u"Waiting for the PROPERFINDER thread to exit")
@@ -1480,7 +1512,7 @@ def halt():
                     ADBA_CONNECTION.join(10)
                 except:
                     pass
-            
+
             __INITIALIZED__ = False
             started = False
 
@@ -1531,6 +1563,7 @@ def save_config():
     new_config['General']['cur_commit_branch'] = CUR_COMMIT_BRANCH
     new_config['General']['config_version'] = CONFIG_VERSION
     new_config['General']['encryption_version'] = int(ENCRYPTION_VERSION)
+    new_config['General']['encryption_secret'] = ENCRYPTION_SECRET
     new_config['General']['log_dir'] = ACTUAL_LOG_DIR if ACTUAL_LOG_DIR else 'Logs'
     new_config['General']['log_nr'] = int(LOG_NR)
     new_config['General']['log_size'] = int(LOG_SIZE)
@@ -1543,7 +1576,6 @@ def save_config():
     new_config['General']['web_username'] = WEB_USERNAME
     new_config['General']['web_password'] = helpers.encrypt(WEB_PASSWORD, ENCRYPTION_VERSION)
     new_config['General']['web_cookie_secret'] = WEB_COOKIE_SECRET
-    new_config['General']['play_videos'] = int(PLAY_VIDEOS)
     new_config['General']['download_url'] = DOWNLOAD_URL
     new_config['General']['localhost_ip'] = LOCALHOST_IP
     new_config['General']['cpu_preset'] = CPU_PRESET
@@ -1593,6 +1625,7 @@ def save_config():
     new_config['General']['naming_multi_ep'] = int(NAMING_MULTI_EP)
     new_config['General']['naming_anime_multi_ep'] = int(NAMING_ANIME_MULTI_EP)
     new_config['General']['naming_anime'] = int(NAMING_ANIME)
+    new_config['General']['indexerDefaultLang'] = INDEXER_DEFAULT_LANGUAGE
     new_config['General']['launch_browser'] = int(LAUNCH_BROWSER)
     new_config['General']['update_shows_on_start'] = int(UPDATE_SHOWS_ON_START)
     new_config['General']['update_shows_on_snatch'] = int(UPDATE_SHOWS_ON_SNATCH)
@@ -1899,6 +1932,11 @@ def save_config():
     new_config['Trakt']['trakt_disable_ssl_verify'] = int(TRAKT_DISABLE_SSL_VERIFY)
     new_config['Trakt']['trakt_timeout'] = int(TRAKT_TIMEOUT)
     new_config['Trakt']['trakt_blacklist_name'] = TRAKT_BLACKLIST_NAME
+    new_config['Trakt']['trakt_use_rolling_download'] = int(TRAKT_USE_ROLLING_DOWNLOAD)
+    new_config['Trakt']['trakt_rolling_num_ep'] = int(TRAKT_ROLLING_NUM_EP)
+    new_config['Trakt']['trakt_rolling_add_paused'] = int(TRAKT_ROLLING_ADD_PAUSED)
+    new_config['Trakt']['trakt_rolling_frequency'] = int(TRAKT_ROLLING_FREQUENCY)
+    new_config['Trakt']['trakt_rolling_default_watched_status'] = int(TRAKT_ROLLING_DEFAULT_WATCHED_STATUS)
 
     new_config['pyTivo'] = {}
     new_config['pyTivo']['use_pytivo'] = int(USE_PYTIVO)
@@ -1979,6 +2017,7 @@ def save_config():
     new_config['Subtitles']['subtitles_dir'] = SUBTITLES_DIR
     new_config['Subtitles']['subtitles_default'] = int(SUBTITLES_DEFAULT)
     new_config['Subtitles']['subtitles_history'] = int(SUBTITLES_HISTORY)
+    new_config['Subtitles']['embedded_subtitles_all'] = int(EMBEDDED_SUBTITLES_ALL)
     new_config['Subtitles']['subtitles_finder_frequency'] = int(SUBTITLES_FINDER_FREQUENCY)
     new_config['Subtitles']['subtitles_multi'] = int(SUBTITLES_MULTI)
 
diff --git a/sickbeard/clients/generic.py b/sickbeard/clients/generic.py
index 0b1ec57076835c8a61a137dc085801e866a7e351..7940ab17da62cc70cf64a1271facf9acc9b40d5b 100644
--- a/sickbeard/clients/generic.py
+++ b/sickbeard/clients/generic.py
@@ -1,241 +1,241 @@
-import re
-import time
-from hashlib import sha1
-from base64 import b16encode, b32decode
-
-import sickbeard
-from sickbeard import logger
-from sickbeard.exceptions import ex
-from sickbeard.clients import http_error_code
-from lib.bencode import bencode, bdecode
-from lib import requests
-from lib.requests import exceptions
-from lib.bencode.BTL import BTFailure
-
-class GenericClient(object):
-    def __init__(self, name, host=None, username=None, password=None):
-
-        self.name = name
-        self.username = sickbeard.TORRENT_USERNAME if username is None else username
-        self.password = sickbeard.TORRENT_PASSWORD if password is None else password
-        self.host = sickbeard.TORRENT_HOST if host is None else host
-        self.rpcurl = sickbeard.TORRENT_RPCURL
-
-        self.url = None
-        self.response = None
-        self.auth = None
-        self.last_time = time.time()
-        self.session = requests.session()
-        self.session.auth = (self.username, self.password)
-
-    def _request(self, method='get', params={}, data=None, files=None):
-
-        if time.time() > self.last_time + 1800 or not self.auth:
-            self.last_time = time.time()
-            self._get_auth()
-
-        logger.log(
-            self.name + u': Requested a ' + method.upper() + ' connection to url ' + self.url + ' with Params= ' + str(
-                params) + ' Data=' + str(data if data else 'None')[0:99] + (
-            '...' if len(data if data else 'None') > 200 else ''), logger.DEBUG)
-
-        logger.log(
-            self.name + u': Requested a ' + method.upper() + ' connection to url ' + self.url + ' with Params= ' + str(
-                params) + (
-                (' Data=' + str(data)[0:100] + ('...' if len(data) > 100 else '')) if data is not None else ""),
-            logger.DEBUG)
-
-        if not self.auth:
-            logger.log(self.name + u': Authentication Failed', logger.ERROR)
-            return False
-        try:
-            self.response = self.session.__getattribute__(method)(self.url, params=params, data=data, files=files,
-                                                                  timeout=120, verify=False)
-        except requests.exceptions.ConnectionError, e:
-            logger.log(self.name + u': Unable to connect ' + str(e), logger.ERROR)
-            return False
-        except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
-            logger.log(self.name + u': Invalid Host', logger.ERROR)
-            return False
-        except requests.exceptions.HTTPError, e:
-            logger.log(self.name + u': Invalid HTTP Request ' + str(e), logger.ERROR)
-            return False
-        except requests.exceptions.Timeout, e:
-            logger.log(self.name + u': Connection Timeout ' + str(e), logger.ERROR)
-            return False
-        except Exception, e:
-            logger.log(self.name + u': Unknown exception raised when send torrent to ' + self.name + ': ' + str(e),
-                       logger.ERROR)
-            return False
-
-        if self.response.status_code == 401:
-            logger.log(self.name + u': Invalid Username or Password, check your config', logger.ERROR)
-            return False
-
-        if self.response.status_code in http_error_code.keys():
-            logger.log(self.name + u': ' + http_error_code[self.response.status_code], logger.DEBUG)
-            return False
-
-        logger.log(self.name + u': Response to ' + method.upper() + ' request is ' + self.response.text, logger.DEBUG)
-
-        return True
-
-    def _get_auth(self):
-        """
-        This should be overridden and should return the auth_id needed for the client
-        """
-        return None
-
-    def _add_torrent_uri(self, result):
-        """
-        This should be overridden should return the True/False from the client
-        when a torrent is added via url (magnet or .torrent link)
-        """
-        return False
-
-    def _add_torrent_file(self, result):
-        """
-        This should be overridden should return the True/False from the client
-        when a torrent is added via result.content (only .torrent file)
-        """
-        return False
-
-    def _set_torrent_label(self, result):
-        """
-        This should be overridden should return the True/False from the client
-        when a torrent is set with label
-        """
-        return True
-
-    def _set_torrent_ratio(self, result):
-        """
-        This should be overridden should return the True/False from the client
-        when a torrent is set with ratio
-        """
-        return True
-
-    def _set_torrent_seed_time(self, result):
-        """
-        This should be overridden should return the True/False from the client
-        when a torrent is set with a seed time
-        """
-        return True
-
-    def _set_torrent_priority(self, result):
-        """
-        This should be overriden should return the True/False from the client
-        when a torrent is set with result.priority (-1 = low, 0 = normal, 1 = high)
-        """
-        return True
-
-    def _set_torrent_path(self, torrent_path):
-        """
-        This should be overridden should return the True/False from the client
-        when a torrent is set with path
-        """
-        return True
-
-    def _set_torrent_pause(self, result):
-        """
-        This should be overridden should return the True/False from the client
-        when a torrent is set with pause
-        """
-        return True
-
-    def _get_torrent_hash(self, result):
-
-        if result.url.startswith('magnet'):
-            result.hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0]
-            if len(result.hash) == 32:
-                result.hash = b16encode(b32decode(result.hash)).lower()
-        else:
-            if not result.content:
-                logger.log('Torrent without content', logger.ERROR)
-                raise Exception('Torrent without content')
-
-            try:
-                torrent_bdecode = bdecode(result.content)
-            except BTFailure as e:
-                logger.log('Unable to bdecode torrent', logger.ERROR)
-                logger.log('Torrent bencoded data: {0}'.format(str(result.content)), logger.DEBUG)
-                raise
-            try:
-                info = torrent_bdecode["info"]
-            except Exception as e:
-                logger.log('Unable to find info field in torrent', logger.ERROR)
-                raise
-            result.hash = sha1(bencode(info)).hexdigest()
-
-        return result
-
-    def sendTORRENT(self, result):
-
-        r_code = False
-
-        logger.log(u'Calling ' + self.name + ' Client', logger.DEBUG)
-
-        if not self._get_auth():
-            logger.log(self.name + u': Authentication Failed', logger.ERROR)
-            return r_code
-
-        try:
-            # Sets per provider seed ratio
-            result.ratio = result.provider.seedRatio()
-
-            # lazy fix for now, I'm sure we already do this somewhere else too
-            result = self._get_torrent_hash(result)
-
-            if result.url.startswith('magnet'):
-                r_code = self._add_torrent_uri(result)
-            else:
-                r_code = self._add_torrent_file(result)
-
-            if not r_code:
-                logger.log(self.name + u': Unable to send Torrent: Return code undefined', logger.ERROR)
-                return False
-
-            if not self._set_torrent_pause(result):
-                logger.log(self.name + u': Unable to set the pause for Torrent', logger.ERROR)
-
-            if not self._set_torrent_label(result):
-                logger.log(self.name + u': Unable to set the label for Torrent', logger.ERROR)
-
-            if not self._set_torrent_ratio(result):
-                logger.log(self.name + u': Unable to set the ratio for Torrent', logger.ERROR)
-
-            if not self._set_torrent_seed_time(result):
-                logger.log(self.name + u': Unable to set the seed time for Torrent', logger.ERROR)
-
-            if not self._set_torrent_path(result):
-                logger.log(self.name + u': Unable to set the path for Torrent', logger.ERROR)
-
-            if result.priority != 0 and not self._set_torrent_priority(result):
-                logger.log(self.name + u': Unable to set priority for Torrent', logger.ERROR)
-
-        except Exception, e:
-            logger.log(self.name + u': Failed Sending Torrent', logger.ERROR)
-            logger.log(self.name + u': Exception raised when sending torrent: ' + str(result) + u'. Error: ' + str(e), logger.DEBUG)
-            return r_code
-
-        return r_code
-
-    def testAuthentication(self):
-
-        try:
-            self.response = self.session.get(self.url, timeout=120, verify=False)
-        except requests.exceptions.ConnectionError, e:
-            return False, 'Error: ' + self.name + ' Connection Error'
-        except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
-            return False, 'Error: Invalid ' + self.name + ' host'
-
-        if self.response.status_code == 401:
-            return False, 'Error: Invalid ' + self.name + ' Username or Password, check your config!'
-
-        try:
-            self._get_auth()
-            if self.response.status_code == 200 and self.auth:
-                return True, 'Success: Connected and Authenticated'
-            else:
-                return False, 'Error: Unable to get ' + self.name + ' Authentication, check your config!'
-        except Exception:
-            return False, 'Error: Unable to connect to ' + self.name
+import re
+import time
+from hashlib import sha1
+from base64 import b16encode, b32decode
+
+import sickbeard
+from sickbeard import logger
+from sickbeard.exceptions import ex
+from sickbeard.clients import http_error_code
+from lib.bencode import bencode, bdecode
+from lib import requests
+from lib.requests import exceptions
+from lib.bencode.BTL import BTFailure
+
+class GenericClient(object):
+    def __init__(self, name, host=None, username=None, password=None):
+
+        self.name = name
+        self.username = sickbeard.TORRENT_USERNAME if username is None else username
+        self.password = sickbeard.TORRENT_PASSWORD if password is None else password
+        self.host = sickbeard.TORRENT_HOST if host is None else host
+        self.rpcurl = sickbeard.TORRENT_RPCURL
+
+        self.url = None
+        self.response = None
+        self.auth = None
+        self.last_time = time.time()
+        self.session = requests.session()
+        self.session.auth = (self.username, self.password)
+
+    def _request(self, method='get', params={}, data=None, files=None):
+
+        if time.time() > self.last_time + 1800 or not self.auth:
+            self.last_time = time.time()
+            self._get_auth()
+
+        logger.log(
+            self.name + u': Requested a ' + method.upper() + ' connection to url ' + self.url + ' with Params= ' + str(
+                params) + ' Data=' + str(data if data else 'None')[0:99] + (
+            '...' if len(data if data else 'None') > 200 else ''), logger.DEBUG)
+
+        logger.log(
+            self.name + u': Requested a ' + method.upper() + ' connection to url ' + self.url + ' with Params= ' + str(
+                params) + (
+                (' Data=' + str(data)[0:100] + ('...' if len(data) > 100 else '')) if data is not None else ""),
+            logger.DEBUG)
+
+        if not self.auth:
+            logger.log(self.name + u': Authentication Failed', logger.ERROR)
+            return False
+        try:
+            self.response = self.session.__getattribute__(method)(self.url, params=params, data=data, files=files,
+                                                                  timeout=120, verify=False)
+        except requests.exceptions.ConnectionError, e:
+            logger.log(self.name + u': Unable to connect ' + str(e), logger.ERROR)
+            return False
+        except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
+            logger.log(self.name + u': Invalid Host', logger.ERROR)
+            return False
+        except requests.exceptions.HTTPError, e:
+            logger.log(self.name + u': Invalid HTTP Request ' + str(e), logger.ERROR)
+            return False
+        except requests.exceptions.Timeout, e:
+            logger.log(self.name + u': Connection Timeout ' + str(e), logger.ERROR)
+            return False
+        except Exception, e:
+            logger.log(self.name + u': Unknown exception raised when send torrent to ' + self.name + ': ' + str(e),
+                       logger.ERROR)
+            return False
+
+        if self.response.status_code == 401:
+            logger.log(self.name + u': Invalid Username or Password, check your config', logger.ERROR)
+            return False
+
+        if self.response.status_code in http_error_code.keys():
+            logger.log(self.name + u': ' + http_error_code[self.response.status_code], logger.DEBUG)
+            return False
+
+        logger.log(self.name + u': Response to ' + method.upper() + ' request is ' + self.response.text, logger.DEBUG)
+
+        return True
+
+    def _get_auth(self):
+        """
+        This should be overridden and should return the auth_id needed for the client
+        """
+        return None
+
+    def _add_torrent_uri(self, result):
+        """
+        This should be overridden should return the True/False from the client
+        when a torrent is added via url (magnet or .torrent link)
+        """
+        return False
+
+    def _add_torrent_file(self, result):
+        """
+        This should be overridden should return the True/False from the client
+        when a torrent is added via result.content (only .torrent file)
+        """
+        return False
+
+    def _set_torrent_label(self, result):
+        """
+        This should be overridden should return the True/False from the client
+        when a torrent is set with label
+        """
+        return True
+
+    def _set_torrent_ratio(self, result):
+        """
+        This should be overridden should return the True/False from the client
+        when a torrent is set with ratio
+        """
+        return True
+
+    def _set_torrent_seed_time(self, result):
+        """
+        This should be overridden should return the True/False from the client
+        when a torrent is set with a seed time
+        """
+        return True
+
+    def _set_torrent_priority(self, result):
+        """
+        This should be overriden should return the True/False from the client
+        when a torrent is set with result.priority (-1 = low, 0 = normal, 1 = high)
+        """
+        return True
+
+    def _set_torrent_path(self, torrent_path):
+        """
+        This should be overridden should return the True/False from the client
+        when a torrent is set with path
+        """
+        return True
+
+    def _set_torrent_pause(self, result):
+        """
+        This should be overridden should return the True/False from the client
+        when a torrent is set with pause
+        """
+        return True
+
+    def _get_torrent_hash(self, result):
+
+        if result.url.startswith('magnet'):
+            result.hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0]
+            if len(result.hash) == 32:
+                result.hash = b16encode(b32decode(result.hash)).lower()
+        else:
+            if not result.content:
+                logger.log('Torrent without content', logger.ERROR)
+                raise Exception('Torrent without content')
+
+            try:
+                torrent_bdecode = bdecode(result.content)
+            except BTFailure as e:
+                logger.log('Unable to bdecode torrent', logger.ERROR)
+                logger.log('Torrent bencoded data: {0}'.format(str(result.content)), logger.DEBUG)
+                raise
+            try:
+                info = torrent_bdecode["info"]
+            except Exception as e:
+                logger.log('Unable to find info field in torrent', logger.ERROR)
+                raise
+            result.hash = sha1(bencode(info)).hexdigest()
+
+        return result
+
+    def sendTORRENT(self, result):
+
+        r_code = False
+
+        logger.log(u'Calling ' + self.name + ' Client', logger.DEBUG)
+
+        if not self._get_auth():
+            logger.log(self.name + u': Authentication Failed', logger.ERROR)
+            return r_code
+
+        try:
+            # Sets per provider seed ratio
+            result.ratio = result.provider.seedRatio()
+
+            # lazy fix for now, I'm sure we already do this somewhere else too
+            result = self._get_torrent_hash(result)
+
+            if result.url.startswith('magnet'):
+                r_code = self._add_torrent_uri(result)
+            else:
+                r_code = self._add_torrent_file(result)
+
+            if not r_code:
+                logger.log(self.name + u': Unable to send Torrent: Return code undefined', logger.ERROR)
+                return False
+
+            if not self._set_torrent_pause(result):
+                logger.log(self.name + u': Unable to set the pause for Torrent', logger.ERROR)
+
+            if not self._set_torrent_label(result):
+                logger.log(self.name + u': Unable to set the label for Torrent', logger.ERROR)
+
+            if not self._set_torrent_ratio(result):
+                logger.log(self.name + u': Unable to set the ratio for Torrent', logger.ERROR)
+
+            if not self._set_torrent_seed_time(result):
+                logger.log(self.name + u': Unable to set the seed time for Torrent', logger.ERROR)
+
+            if not self._set_torrent_path(result):
+                logger.log(self.name + u': Unable to set the path for Torrent', logger.ERROR)
+
+            if result.priority != 0 and not self._set_torrent_priority(result):
+                logger.log(self.name + u': Unable to set priority for Torrent', logger.ERROR)
+
+        except Exception, e:
+            logger.log(self.name + u': Failed Sending Torrent', logger.ERROR)
+            logger.log(self.name + u': Exception raised when sending torrent: ' + str(result) + u'. Error: ' + str(e), logger.DEBUG)
+            return r_code
+
+        return r_code
+
+    def testAuthentication(self):
+
+        try:
+            self.response = self.session.get(self.url, timeout=120, verify=False)
+        except requests.exceptions.ConnectionError, e:
+            return False, 'Error: ' + self.name + ' Connection Error'
+        except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
+            return False, 'Error: Invalid ' + self.name + ' host'
+
+        if self.response.status_code == 401:
+            return False, 'Error: Invalid ' + self.name + ' Username or Password, check your config!'
+
+        try:
+            self._get_auth()
+            if self.response.status_code == 200 and self.auth:
+                return True, 'Success: Connected and Authenticated'
+            else:
+                return False, 'Error: Unable to get ' + self.name + ' Authentication, check your config!'
+        except Exception:
+            return False, 'Error: Unable to connect to ' + self.name
diff --git a/sickbeard/config.py b/sickbeard/config.py
index 060cc74c236215f5e383ad9fc613a5d0c2f6746b..4b86e6f3bb6e6c3fb4b970c69636394f122f3630 100644
--- a/sickbeard/config.py
+++ b/sickbeard/config.py
@@ -461,7 +461,8 @@ class ConfigMigrator():
                                 3: 'Rename omgwtfnzb variables',
                                 4: 'Add newznab catIDs',
                                 5: 'Metadata update',
-                                6: 'Convert from XBMC to new KODI variables'
+                                6: 'Convert from XBMC to new KODI variables',
+                                7: 'Use version 2 for password encryption'
         }
 
     def migrate_config(self):
@@ -491,7 +492,7 @@ class ConfigMigrator():
             else:
                 logger.log(u"Proceeding with upgrade")
 
-            # do the                                                                                                migration, expect a method named _migrate_v<num>
+            # do the migration, expect a method named _migrate_v<num>
             logger.log(u"Migrating config up to version " + str(next_version) + migration_name)
             getattr(self, '_migrate_v' + str(next_version))()
             self.config_version = next_version
@@ -745,3 +746,6 @@ class ConfigMigrator():
         sickbeard.METADATA_KODI = check_setting_str(self.config_obj, 'General', 'metadata_xbmc', '0|0|0|0|0|0|0|0|0|0')
         sickbeard.METADATA_KODI_12PLUS = check_setting_str(self.config_obj, 'General', 'metadata_xbmc_12plus', '0|0|0|0|0|0|0|0|0|0')
 
+    # Migration v6: Use version 2 for password encryption
+    def _migrate_v7(self):
+        sickbeard.ENCRYPTION_VERSION = 2
diff --git a/sickbeard/dailysearcher.py b/sickbeard/dailysearcher.py
index d54e4d2d4ecf1f649cafa68c62cdd5e4335c5123..66657c38615e895df764aef159a5d8cedec6de21 100644
--- a/sickbeard/dailysearcher.py
+++ b/sickbeard/dailysearcher.py
@@ -89,7 +89,29 @@ class DailySearcher():
                 if ep.show.paused:
                     ep.status = common.SKIPPED
                 else:
-                    ep.status = common.WANTED
+                    myDB = db.DBConnection()
+                    sql_selection="SELECT show_name, indexer_id, season, episode, paused FROM (SELECT * FROM tv_shows s,tv_episodes e WHERE s.indexer_id = e.showid) T1 WHERE T1.paused = 0 and T1.episode_id IN (SELECT T2.episode_id FROM tv_episodes T2 WHERE T2.showid = T1.indexer_id and T2.status in (?) ORDER BY T2.season,T2.episode LIMIT 1) and airdate is not null and indexer_id = ? ORDER BY T1.show_name,season,episode"
+                    results = myDB.select(sql_selection, [common.SKIPPED, sqlEp["showid"]])
+                    if not sickbeard.TRAKT_USE_ROLLING_DOWNLOAD:
+                        if ep.season == 0: 
+                            logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED, due to trakt integration")
+                            ep.status = common.SKIPPED
+                        else:  
+                            logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to WANTED")
+                            ep.status = common.WANTED                         
+                    else:
+                        sn_sk = results[0]["season"]
+                        ep_sk = results[0]["episode"]
+                        if (int(sn_sk)*100+int(ep_sk)) < (int(sqlEp["season"])*100+int(sqlEp["episode"])):
+                            logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED, due to trakt integration")
+                            ep.status = common.SKIPPED
+                        else:
+                            if ep.season == 0: 
+                                logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED, due to trakt integration")
+                                ep.status = common.SKIPPED
+                            else:
+                                logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to WANTED")
+                                ep.status = common.WANTED
 
                 sql_l.append(ep.get_sql())
         else:
@@ -103,4 +125,4 @@ class DailySearcher():
         dailysearch_queue_item = sickbeard.search_queue.DailySearchQueueItem()
         sickbeard.searchQueueScheduler.action.add_item(dailysearch_queue_item)
 
-        self.amActive = False
\ No newline at end of file
+        self.amActive = False
diff --git a/sickbeard/helpers.py b/sickbeard/helpers.py
index 193771bf1a4cdc462f8495381a33fae55702380a..5382e42182e523fcc4622d14cc3a9565622ccef8 100644
--- a/sickbeard/helpers.py
+++ b/sickbeard/helpers.py
@@ -1,1459 +1,1524 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty    of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
-import os
-import ctypes
-import random
-import re
-import socket
-import stat
-import tempfile
-import time
-import traceback
-import urllib
-import hashlib
-import httplib
-import urlparse
-import uuid
-import base64
-import zipfile
-import datetime
-import errno
-import ast
-import operator
-
-import sickbeard
-import subliminal
-import adba
-import requests
-import requests.exceptions
-import xmltodict
-
-import subprocess
-
-from sickbeard.exceptions import MultipleShowObjectsException, ex
-from sickbeard import logger, classes
-from sickbeard.common import USER_AGENT, mediaExtensions, subtitleExtensions
-from sickbeard import db
-from sickbeard import encodingKludge as ek
-from sickbeard import notifiers
-from sickbeard import clients
-
-from cachecontrol import CacheControl, caches
-from itertools import izip, cycle
-
-import shutil
-import lib.shutil_custom
-
-shutil.copyfile = lib.shutil_custom.copyfile_custom
-
-urllib._urlopener = classes.SickBeardURLopener()
-
-
-def indentXML(elem, level=0):
-    '''
-    Does our pretty printing, makes Matt very happy
-    '''
-    i = "\n" + level * "  "
-    if len(elem):
-        if not elem.text or not elem.text.strip():
-            elem.text = i + "  "
-        if not elem.tail or not elem.tail.strip():
-            elem.tail = i
-        for elem in elem:
-            indentXML(elem, level + 1)
-        if not elem.tail or not elem.tail.strip():
-            elem.tail = i
-    else:
-        # Strip out the newlines from text
-        if elem.text:
-            elem.text = elem.text.replace('\n', ' ')
-        if level and (not elem.tail or not elem.tail.strip()):
-            elem.tail = i
-
-
-def remove_extension(name):
-    """
-    Remove download or media extension from name (if any)
-    """
-
-    if name and "." in name:
-        base_name, sep, extension = name.rpartition('.')  # @UnusedVariable
-        if base_name and extension.lower() in ['nzb', 'torrent'] + mediaExtensions:
-            name = base_name
-
-    return name
-
-
-def remove_non_release_groups(name):
-    """
-    Remove non release groups from name
-    """
-
-    if name and "-" in name:
-        name_group = name.rsplit('-', 1)
-        if name_group[-1].upper() in ["RP", "NZBGEEK"]:
-            name = name_group[0]
-
-    return name
-
-
-def replaceExtension(filename, newExt):
-    '''
-    >>> replaceExtension('foo.avi', 'mkv')
-    'foo.mkv'
-    >>> replaceExtension('.vimrc', 'arglebargle')
-    '.vimrc'
-    >>> replaceExtension('a.b.c', 'd')
-    'a.b.d'
-    >>> replaceExtension('', 'a')
-    ''
-    >>> replaceExtension('foo.bar', '')
-    'foo.'
-    '''
-    sepFile = filename.rpartition(".")
-    if sepFile[0] == "":
-        return filename
-    else:
-        return sepFile[0] + "." + newExt
-
-
-def isSyncFile(filename):
-    extension = filename.rpartition(".")[2].lower()
-    #if extension == '!sync' or extension == 'lftp-pget-status' or extension == 'part' or extension == 'bts':
-    syncfiles = sickbeard.SYNC_FILES
-    if extension in syncfiles.split(","):
-        return True
-    else:
-        return False
-
-
-def isMediaFile(filename):
-    # ignore samples
-    if re.search('(^|[\W_])(sample\d*)[\W_]', filename, re.I):
-        return False
-
-    # ignore MAC OS's retarded "resource fork" files
-    if filename.startswith('._'):
-        return False
-
-    sepFile = filename.rpartition(".")
-
-    if re.search('extras?$', sepFile[0], re.I):
-        return False
-
-    if sepFile[2].lower() in mediaExtensions:
-        return True
-    else:
-        return False
-
-
-def isRarFile(filename):
-    archive_regex = '(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
-
-    if re.search(archive_regex, filename):
-        return True
-
-    return False
-
-
-def isBeingWritten(filepath):
-    # Return True if file was modified within 60 seconds. it might still be being written to.
-    ctime = max(ek.ek(os.path.getctime, filepath), ek.ek(os.path.getmtime, filepath))
-    if ctime > time.time() - 60:
-        return True
-
-    return False
-
-
-def sanitizeFileName(name):
-    '''
-    >>> sanitizeFileName('a/b/c')
-    'a-b-c'
-    >>> sanitizeFileName('abc')
-    'abc'
-    >>> sanitizeFileName('a"b')
-    'ab'
-    >>> sanitizeFileName('.a.b..')
-    'a.b'
-    '''
-
-    # remove bad chars from the filename
-    name = re.sub(r'[\\/\*]', '-', name)
-    name = re.sub(r'[:"<>|?]', '', name)
-    name = re.sub(ur'\u2122', '', name) # Trade Mark Sign
-
-    # remove leading/trailing periods and spaces
-    name = name.strip(' .')
-
-    return name
-
-
-def _remove_file_failed(file):
-    try:
-        ek.ek(os.remove, file)
-    except:
-        pass
-
-def findCertainShow(showList, indexerid):
-
-    results = []
-
-    if not isinstance(indexerid, list):
-        indexerid = [indexerid]
-
-    if showList and len(indexerid):
-        results = filter(lambda x: int(x.indexerid) in indexerid, showList)
-
-    if len(results) == 1:
-        return results[0]
-    elif len(results) > 1:
-        raise MultipleShowObjectsException()
-
-def makeDir(path):
-    if not ek.ek(os.path.isdir, path):
-        try:
-            ek.ek(os.makedirs, path)
-            # do the library update for synoindex
-            notifiers.synoindex_notifier.addFolder(path)
-        except OSError:
-            return False
-    return True
-
-
-def searchDBForShow(regShowName, log=False):
-    showNames = [re.sub('[. -]', ' ', regShowName)]
-
-    yearRegex = "([^()]+?)\s*(\()?(\d{4})(?(2)\))$"
-
-    myDB = db.DBConnection()
-    for showName in showNames:
-
-        sqlResults = myDB.select("SELECT * FROM tv_shows WHERE show_name LIKE ?",
-                                 [showName])
-
-        if len(sqlResults) == 1:
-            return int(sqlResults[0]["indexer_id"])
-        else:
-            # if we didn't get exactly one result then try again with the year stripped off if possible
-            match = re.match(yearRegex, showName)
-            if match and match.group(1):
-                if log:
-                    logger.log(u"Unable to match original name but trying to manually strip and specify show year",
-                               logger.DEBUG)
-                sqlResults = myDB.select(
-                    "SELECT * FROM tv_shows WHERE (show_name LIKE ?) AND startyear = ?",
-                    [match.group(1) + '%', match.group(3)])
-
-            if len(sqlResults) == 0:
-                if log:
-                    logger.log(u"Unable to match a record in the DB for " + showName, logger.DEBUG)
-                continue
-            elif len(sqlResults) > 1:
-                if log:
-                    logger.log(u"Multiple results for " + showName + " in the DB, unable to match show name",
-                               logger.DEBUG)
-                continue
-            else:
-                return int(sqlResults[0]["indexer_id"])
-
-
-def searchIndexerForShowID(regShowName, indexer=None, indexer_id=None, ui=None):
-    showNames = [re.sub('[. -]', ' ', regShowName)]
-
-    # Query Indexers for each search term and build the list of results
-    for i in sickbeard.indexerApi().indexers if not indexer else int(indexer or []):
-        # Query Indexers for each search term and build the list of results
-        lINDEXER_API_PARMS = sickbeard.indexerApi(i).api_params.copy()
-        if ui is not None: lINDEXER_API_PARMS['custom_ui'] = ui
-        t = sickbeard.indexerApi(i).indexer(**lINDEXER_API_PARMS)
-
-        for name in showNames:
-            logger.log(u"Trying to find " + name + " on " + sickbeard.indexerApi(i).name, logger.DEBUG)
-
-            try:
-                search = t[indexer_id] if indexer_id else t[name]
-            except:
-                continue
-
-            try:
-                seriesname = search[0]['seriesname']
-            except:
-                seriesname = None
-
-            try:
-                series_id = search[0]['id']
-            except:
-                series_id = None
-
-            if not (seriesname and series_id):
-                continue
-            ShowObj = findCertainShow(sickbeard.showList, int(series_id))
-            #Check if we can find the show in our list (if not, it's not the right show)
-            if (indexer_id is None) and (ShowObj is not None) and (ShowObj.indexerid == int(series_id)):
-                return (seriesname, i, int(series_id))
-            elif (indexer_id is not None) and (int(indexer_id) == int(series_id)):
-                return (seriesname, i, int(indexer_id))
-
-        if indexer:
-            break
-
-    return (None, None, None)
-
-
-def sizeof_fmt(num):
-    '''
-    >>> sizeof_fmt(2)
-    '2.0 bytes'
-    >>> sizeof_fmt(1024)
-    '1.0 KB'
-    >>> sizeof_fmt(2048)
-    '2.0 KB'
-    >>> sizeof_fmt(2**20)
-    '1.0 MB'
-    >>> sizeof_fmt(1234567)
-    '1.2 MB'
-    '''
-    for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
-        if num < 1024.0:
-            return "%3.1f %s" % (num, x)
-        num /= 1024.0
-
-
-def listMediaFiles(path):
-    if not dir or not ek.ek(os.path.isdir, path):
-        return []
-
-    files = []
-    for curFile in ek.ek(os.listdir, path):
-        fullCurFile = ek.ek(os.path.join, path, curFile)
-
-        # if it's a folder do it recursively
-        if ek.ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras':
-            files += listMediaFiles(fullCurFile)
-
-        elif isMediaFile(curFile):
-            files.append(fullCurFile)
-
-    return files
-
-
-def copyFile(srcFile, destFile):
-    ek.ek(shutil.copyfile, srcFile, destFile)
-    try:
-        ek.ek(shutil.copymode, srcFile, destFile)
-    except OSError:
-        pass
-
-
-def moveFile(srcFile, destFile):
-    try:
-        ek.ek(shutil.move, srcFile, destFile)
-        fixSetGroupID(destFile)
-    except OSError:
-        copyFile(srcFile, destFile)
-        ek.ek(os.unlink, srcFile)
-
-
-def link(src, dst):
-    if os.name == 'nt':
-        import ctypes
-
-        if ctypes.windll.kernel32.CreateHardLinkW(unicode(dst), unicode(src), 0) == 0: raise ctypes.WinError()
-    else:
-        os.link(src, dst)
-
-
-def hardlinkFile(srcFile, destFile):
-    try:
-        ek.ek(link, srcFile, destFile)
-        fixSetGroupID(destFile)
-    except Exception, e:
-        logger.log(u"Failed to create hardlink of " + srcFile + " at " + destFile + ": " + ex(e) + ". Copying instead",
-                   logger.ERROR)
-        copyFile(srcFile, destFile)
-
-
-def symlink(src, dst):
-    if os.name == 'nt':
-        import ctypes
-
-        if ctypes.windll.kernel32.CreateSymbolicLinkW(unicode(dst), unicode(src), 1 if os.path.isdir(src) else 0) in [0,
-                                                                                                                      1280]: raise ctypes.WinError()
-    else:
-        os.symlink(src, dst)
-
-
-def moveAndSymlinkFile(srcFile, destFile):
-    try:
-        ek.ek(shutil.move, srcFile, destFile)
-        fixSetGroupID(destFile)
-        ek.ek(symlink, destFile, srcFile)
-    except:
-        logger.log(u"Failed to create symlink of " + srcFile + " at " + destFile + ". Copying instead", logger.ERROR)
-        copyFile(srcFile, destFile)
-
-
-def make_dirs(path):
-    """
-    Creates any folders that are missing and assigns them the permissions of their
-    parents
-    """
-
-    logger.log(u"Checking if the path " + path + " already exists", logger.DEBUG)
-
-    if not ek.ek(os.path.isdir, path):
-        # Windows, create all missing folders
-        if os.name == 'nt' or os.name == 'ce':
-            try:
-                logger.log(u"Folder " + path + " didn't exist, creating it", logger.DEBUG)
-                ek.ek(os.makedirs, path)
-            except (OSError, IOError), e:
-                logger.log(u"Failed creating " + path + " : " + ex(e), logger.ERROR)
-                return False
-
-        # not Windows, create all missing folders and set permissions
-        else:
-            sofar = ''
-            folder_list = path.split(os.path.sep)
-
-            # look through each subfolder and make sure they all exist
-            for cur_folder in folder_list:
-                sofar += cur_folder + os.path.sep
-
-                # if it exists then just keep walking down the line
-                if ek.ek(os.path.isdir, sofar):
-                    continue
-
-                try:
-                    logger.log(u"Folder " + sofar + " didn't exist, creating it", logger.DEBUG)
-                    ek.ek(os.mkdir, sofar)
-                    # use normpath to remove end separator, otherwise checks permissions against itself
-                    chmodAsParent(ek.ek(os.path.normpath, sofar))
-                    # do the library update for synoindex
-                    notifiers.synoindex_notifier.addFolder(sofar)
-                except (OSError, IOError), e:
-                    logger.log(u"Failed creating " + sofar + " : " + ex(e), logger.ERROR)
-                    return False
-
-    return True
-
-
-def rename_ep_file(cur_path, new_path, old_path_length=0):
-    """
-    Creates all folders needed to move a file to its new location, renames it, then cleans up any folders
-    left that are now empty.
-
-    cur_path: The absolute path to the file you want to move/rename
-    new_path: The absolute path to the destination for the file WITHOUT THE EXTENSION
-    old_path_length: The length of media file path (old name) WITHOUT THE EXTENSION
-    """
-
-    new_dest_dir, new_dest_name = os.path.split(new_path)  # @UnusedVariable
-
-    if old_path_length == 0 or old_path_length > len(cur_path):
-        # approach from the right
-        cur_file_name, cur_file_ext = os.path.splitext(cur_path)  # @UnusedVariable
-    else:
-        # approach from the left
-        cur_file_ext = cur_path[old_path_length:]
-        cur_file_name = cur_path[:old_path_length]
-
-    if cur_file_ext[1:] in subtitleExtensions:
-        # Extract subtitle language from filename
-        sublang = os.path.splitext(cur_file_name)[1][1:]
-
-        # Check if the language extracted from filename is a valid language
-        try:
-            language = subliminal.language.Language(sublang, strict=True)
-            cur_file_ext = '.' + sublang + cur_file_ext
-        except ValueError:
-            pass
-
-    # put the extension on the incoming file
-    new_path += cur_file_ext
-
-    make_dirs(os.path.dirname(new_path))
-
-    # move the file
-    try:
-        logger.log(u"Renaming file from " + cur_path + " to " + new_path)
-        ek.ek(shutil.move, cur_path, new_path)
-    except (OSError, IOError), e:
-        logger.log(u"Failed renaming " + cur_path + " to " + new_path + ": " + ex(e), logger.ERROR)
-        return False
-
-    # clean up any old folders that are empty
-    delete_empty_folders(ek.ek(os.path.dirname, cur_path))
-
-    return True
-
-
-def delete_empty_folders(check_empty_dir, keep_dir=None):
-    """
-    Walks backwards up the path and deletes any empty folders found.
-
-    check_empty_dir: The path to clean (absolute path to a folder)
-    keep_dir: Clean until this path is reached
-    """
-
-    # treat check_empty_dir as empty when it only contains these items
-    ignore_items = []
-
-    logger.log(u"Trying to clean any empty folders under " + check_empty_dir)
-
-    # as long as the folder exists and doesn't contain any files, delete it
-    while ek.ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir:
-        check_files = ek.ek(os.listdir, check_empty_dir)
-
-        if not check_files or (len(check_files) <= len(ignore_items) and all(
-                [check_file in ignore_items for check_file in check_files])):
-            # directory is empty or contains only ignore_items
-            try:
-                logger.log(u"Deleting empty folder: " + check_empty_dir)
-                # need shutil.rmtree when ignore_items is really implemented
-                ek.ek(os.rmdir, check_empty_dir)
-                # do the library update for synoindex
-                notifiers.synoindex_notifier.deleteFolder(check_empty_dir)
-            except OSError, e:
-                logger.log(u"Unable to delete " + check_empty_dir + ": " + repr(e) + " / " + str(e), logger.WARNING)
-                break
-            check_empty_dir = ek.ek(os.path.dirname, check_empty_dir)
-        else:
-            break
-
-
-def fileBitFilter(mode):
-    for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
-        if mode & bit:
-            mode -= bit
-
-    return mode
-
-
-def chmodAsParent(childPath):
-    if os.name == 'nt' or os.name == 'ce':
-        return
-
-    parentPath = ek.ek(os.path.dirname, childPath)
-
-    if not parentPath:
-        logger.log(u"No parent path provided in " + childPath + ", unable to get permissions from it", logger.DEBUG)
-        return
-
-    parentPathStat = ek.ek(os.stat, parentPath)
-    parentMode = stat.S_IMODE(parentPathStat[stat.ST_MODE])
-
-    childPathStat = ek.ek(os.stat, childPath)
-    childPath_mode = stat.S_IMODE(childPathStat[stat.ST_MODE])
-
-    if ek.ek(os.path.isfile, childPath):
-        childMode = fileBitFilter(parentMode)
-    else:
-        childMode = parentMode
-
-    if childPath_mode == childMode:
-        return
-
-    childPath_owner = childPathStat.st_uid
-    user_id = os.geteuid()  # @UndefinedVariable - only available on UNIX
-
-    if user_id != 0 and user_id != childPath_owner:
-        logger.log(u"Not running as root or owner of " + childPath + ", not trying to set permissions", logger.DEBUG)
-        return
-
-    try:
-        ek.ek(os.chmod, childPath, childMode)
-        logger.log(u"Setting permissions for %s to %o as parent directory has %o" % (childPath, childMode, parentMode),
-                   logger.DEBUG)
-    except OSError:
-        logger.log(u"Failed to set permission for %s to %o" % (childPath, childMode), logger.ERROR)
-
-
-def fixSetGroupID(childPath):
-    if os.name == 'nt' or os.name == 'ce':
-        return
-
-    parentPath = ek.ek(os.path.dirname, childPath)
-    parentStat = ek.ek(os.stat, parentPath)
-    parentMode = stat.S_IMODE(parentStat[stat.ST_MODE])
-
-    if parentMode & stat.S_ISGID:
-        parentGID = parentStat[stat.ST_GID]
-        childStat = ek.ek(os.stat, childPath)
-        childGID = childStat[stat.ST_GID]
-
-        if childGID == parentGID:
-            return
-
-        childPath_owner = childStat.st_uid
-        user_id = os.geteuid()  # @UndefinedVariable - only available on UNIX
-
-        if user_id != 0 and user_id != childPath_owner:
-            logger.log(u"Not running as root or owner of " + childPath + ", not trying to set the set-group-ID",
-                       logger.DEBUG)
-            return
-
-        try:
-            ek.ek(os.chown, childPath, -1, parentGID)  # @UndefinedVariable - only available on UNIX
-            logger.log(u"Respecting the set-group-ID bit on the parent directory for %s" % (childPath), logger.DEBUG)
-        except OSError:
-            logger.log(
-                u"Failed to respect the set-group-ID bit on the parent directory for %s (setting group ID %i)" % (
-                    childPath, parentGID), logger.ERROR)
-
-
-def is_anime_in_show_list():
-    for show in sickbeard.showList:
-        if show.is_anime:
-            return True
-    return False
-
-
-def update_anime_support():
-    sickbeard.ANIMESUPPORT = is_anime_in_show_list()
-
-
-def get_absolute_number_from_season_and_episode(show, season, episode):
-    absolute_number = None
-
-    if season and episode:
-        myDB = db.DBConnection()
-        sql = "SELECT * FROM tv_episodes WHERE showid = ? and season = ? and episode = ?"
-        sqlResults = myDB.select(sql, [show.indexerid, season, episode])
-
-        if len(sqlResults) == 1:
-            absolute_number = int(sqlResults[0]["absolute_number"])
-            logger.log(
-                "Found absolute_number:" + str(absolute_number) + " by " + str(season) + "x" + str(episode),
-                logger.DEBUG)
-        else:
-            logger.log(
-                "No entries for absolute number in show: " + show.name + " found using " + str(season) + "x" + str(
-                    episode),
-                logger.DEBUG)
-
-    return absolute_number
-
-
-def get_all_episodes_from_absolute_number(show, absolute_numbers, indexer_id=None):
-    episodes = []
-    season = None
-
-    if len(absolute_numbers):
-        if not show and indexer_id:
-            show = findCertainShow(sickbeard.showList, indexer_id)
-
-        for absolute_number in absolute_numbers if show else []:
-            ep = show.getEpisode(None, None, absolute_number=absolute_number)
-            if ep:
-                episodes.append(ep.episode)
-                season = ep.season  # this will always take the last found seson so eps that cross the season border are not handeled well
-
-    return (season, episodes)
-
-
-def sanitizeSceneName(name, ezrss=False, anime=False):
-    """
-    Takes a show name and returns the "scenified" version of it.
-
-    ezrss: If true the scenified version will follow EZRSS's cracksmoker rules as best as possible
-    
-    anime: Some show have a ' in their name(Kuroko's Basketball) and is needed for search.
-
-    Returns: A string containing the scene version of the show name given.
-    """
-
-    if name:
-        # anime: removed ' for Kuroko's Basketball
-        if anime:
-            bad_chars = u",:()!?\u2019"
-        # ezrss leaves : and ! in their show names as far as I can tell
-        elif ezrss:
-            bad_chars = u",()'?\u2019"
-        else:
-            bad_chars = u",:()'!?\u2019"
-
-        # strip out any bad chars
-        for x in bad_chars:
-            name = name.replace(x, "")
-
-        # tidy up stuff that doesn't belong in scene names
-        name = name.replace("- ", ".").replace(" ", ".").replace("&", "and").replace('/', '.')
-        name = re.sub("\.\.*", ".", name)
-
-        if name.endswith('.'):
-            name = name[:-1]
-
-        return name
-    else:
-        return ''
-
-
-_binOps = {
-    ast.Add: operator.add,
-    ast.Sub: operator.sub,
-    ast.Mult: operator.mul,
-    ast.Div: operator.div,
-    ast.Mod: operator.mod
-}
-
-
-def arithmeticEval(s):
-    """
-    A safe eval supporting basic arithmetic operations.
-
-    :param s: expression to evaluate
-    :return: value
-    """
-    node = ast.parse(s, mode='eval')
-
-    def _eval(node):
-        if isinstance(node, ast.Expression):
-            return _eval(node.body)
-        elif isinstance(node, ast.Str):
-            return node.s
-        elif isinstance(node, ast.Num):
-            return node.n
-        elif isinstance(node, ast.BinOp):
-            return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
-        else:
-            raise Exception('Unsupported type {}'.format(node))
-
-    return _eval(node.body)
-
-def create_https_certificates(ssl_cert, ssl_key):
-    """
-    Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key'
-    """
-    try:
-        from OpenSSL import crypto  # @UnresolvedImport
-        from lib.certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, \
-            serial  # @UnresolvedImport
-    except Exception, e:
-        logger.log(u"pyopenssl module missing, please install for https access", logger.WARNING)
-        return False
-
-    # Create the CA Certificate
-    cakey = createKeyPair(TYPE_RSA, 1024)
-    careq = createCertRequest(cakey, CN='Certificate Authority')
-    cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10))  # ten years
-
-    cname = 'SickRage'
-    pkey = createKeyPair(TYPE_RSA, 1024)
-    req = createCertRequest(pkey, CN=cname)
-    cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10))  # ten years
-
-    # Save the key and certificate to disk
-    try:
-        open(ssl_key, 'w').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
-        open(ssl_cert, 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
-    except:
-        logger.log(u"Error creating SSL key and certificate", logger.ERROR)
-        return False
-
-    return True
-
-def backupVersionedFile(old_file, version):
-    numTries = 0
-
-    new_file = old_file + '.' + 'v' + str(version)
-
-    while not ek.ek(os.path.isfile, new_file):
-        if not ek.ek(os.path.isfile, old_file):
-            logger.log(u"Not creating backup, " + old_file + " doesn't exist", logger.DEBUG)
-            break
-
-        try:
-            logger.log(u"Trying to back up " + old_file + " to " + new_file, logger.DEBUG)
-            shutil.copy(old_file, new_file)
-            logger.log(u"Backup done", logger.DEBUG)
-            break
-        except Exception, e:
-            logger.log(u"Error while trying to back up " + old_file + " to " + new_file + " : " + ex(e), logger.WARNING)
-            numTries += 1
-            time.sleep(1)
-            logger.log(u"Trying again.", logger.DEBUG)
-
-        if numTries >= 10:
-            logger.log(u"Unable to back up " + old_file + " to " + new_file + " please do it manually.", logger.ERROR)
-            return False
-
-    return True
-
-
-def restoreVersionedFile(backup_file, version):
-    numTries = 0
-
-    new_file, backup_version = os.path.splitext(backup_file)
-    restore_file = new_file + '.' + 'v' + str(version)
-
-    if not ek.ek(os.path.isfile, new_file):
-        logger.log(u"Not restoring, " + new_file + " doesn't exist", logger.DEBUG)
-        return False
-
-    try:
-        logger.log(
-            u"Trying to backup " + new_file + " to " + new_file + "." + "r" + str(version) + " before restoring backup",
-            logger.DEBUG)
-        shutil.move(new_file, new_file + '.' + 'r' + str(version))
-    except Exception, e:
-        logger.log(
-            u"Error while trying to backup DB file " + restore_file + " before proceeding with restore: " + ex(e),
-            logger.WARNING)
-        return False
-
-    while not ek.ek(os.path.isfile, new_file):
-        if not ek.ek(os.path.isfile, restore_file):
-            logger.log(u"Not restoring, " + restore_file + " doesn't exist", logger.DEBUG)
-            break
-
-        try:
-            logger.log(u"Trying to restore " + restore_file + " to " + new_file, logger.DEBUG)
-            shutil.copy(restore_file, new_file)
-            logger.log(u"Restore done", logger.DEBUG)
-            break
-        except Exception, e:
-            logger.log(u"Error while trying to restore " + restore_file + ": " + ex(e), logger.WARNING)
-            numTries += 1
-            time.sleep(1)
-            logger.log(u"Trying again.", logger.DEBUG)
-
-        if numTries >= 10:
-            logger.log(u"Unable to restore " + restore_file + " to " + new_file + " please do it manually.",
-                       logger.ERROR)
-            return False
-
-    return True
-
-
-# try to convert to int, if it fails the default will be returned
-def tryInt(s, s_default=0):
-    try:
-        return int(s)
-    except:
-        return s_default
-
-
-# generates a md5 hash of a file
-def md5_for_file(filename, block_size=2 ** 16):
-    try:
-        with open(filename, 'rb') as f:
-            md5 = hashlib.md5()
-            while True:
-                data = f.read(block_size)
-                if not data:
-                    break
-                md5.update(data)
-            f.close()
-            return md5.hexdigest()
-    except Exception:
-        return None
-
-
-def get_lan_ip():
-    try:return [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0]
-    except:return socket.gethostname()
-
-def check_url(url):
-    """
-    Check if a URL exists without downloading the whole file.
-    We only check the URL header.
-    """
-    # see also http://stackoverflow.com/questions/2924422
-    # http://stackoverflow.com/questions/1140661
-    good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]
-
-    host, path = urlparse.urlparse(url)[1:3]  # elems [1] and [2]
-    try:
-        conn = httplib.HTTPConnection(host)
-        conn.request('HEAD', path)
-        return conn.getresponse().status in good_codes
-    except StandardError:
-        return None
-
-
-def anon_url(*url):
-    """
-    Return a URL string consisting of the Anonymous redirect URL and an arbitrary number of values appended.
-    """
-    return '' if None in url else '%s%s' % (sickbeard.ANON_REDIRECT, ''.join(str(s) for s in url))
-
-
-"""
-Encryption
-==========
-By Pedro Jose Pereira Vieito <pvieito@gmail.com> (@pvieito)
-
-* If encryption_version==0 then return data without encryption
-* The keys should be unique for each device
-
-To add a new encryption_version:
-  1) Code your new encryption_version
-  2) Update the last encryption_version available in webserve.py
-  3) Remember to maintain old encryption versions and key generators for retrocompatibility
-"""
-
-# Key Generators
-unique_key1 = hex(uuid.getnode() ** 2)  # Used in encryption v1
-
-# Encryption Functions
-def encrypt(data, encryption_version=0, decrypt=False):
-    # Version 1: Simple XOR encryption (this is not very secure, but works)
-    if encryption_version == 1:
-        if decrypt:
-            return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(unique_key1)))
-        else:
-            return base64.encodestring(
-                ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(unique_key1)))).strip()
-    # Version 0: Plain text
-    else:
-        return data
-
-
-def decrypt(data, encryption_version=0):
-    return encrypt(data, encryption_version, decrypt=True)
-
-
-def full_sanitizeSceneName(name):
-    return re.sub('[. -]', ' ', sanitizeSceneName(name)).lower().lstrip()
-
-
-def _check_against_names(nameInQuestion, show, season=-1):
-    showNames = []
-    if season in [-1, 1]:
-        showNames = [show.name]
-
-    showNames.extend(sickbeard.scene_exceptions.get_scene_exceptions(show.indexerid, season=season))
-
-    for showName in showNames:
-        nameFromList = full_sanitizeSceneName(showName)
-        if nameFromList == nameInQuestion:
-            return True
-
-    return False
-
-
-def get_show(name, tryIndexers=False, trySceneExceptions=False):
-    if not sickbeard.showList:
-        return
-
-    showObj = None
-    fromCache = False
-
-    if not name:
-        return showObj
-
-    try:
-        # check cache for show
-        cache = sickbeard.name_cache.retrieveNameFromCache(name)
-        if cache:
-            fromCache = True
-            showObj = findCertainShow(sickbeard.showList, int(cache))
-        
-        #try indexers    
-        if not showObj and tryIndexers:
-            showObj = findCertainShow(sickbeard.showList,
-                                      searchIndexerForShowID(full_sanitizeSceneName(name), ui=classes.ShowListUI)[2])
-        
-        #try scene exceptions
-        if not showObj and trySceneExceptions:
-            ShowID = sickbeard.scene_exceptions.get_scene_exception_by_name(name)[0]
-            if ShowID:
-                showObj = findCertainShow(sickbeard.showList, int(ShowID))
-                
-        # add show to cache
-        if showObj and not fromCache:
-            sickbeard.name_cache.addNameToCache(name, showObj.indexerid)
-    except Exception as e:
-        logger.log(u"Error when attempting to find show: " + name + " in SickRage: " + str(e), logger.DEBUG)
-
-    return showObj
-
-
-def is_hidden_folder(folder):
-    """
-    Returns True if folder is hidden.
-    On Linux based systems hidden folders start with . (dot)
-    folder: Full path of folder to check
-    """
-    def is_hidden(filepath):
-        name = os.path.basename(os.path.abspath(filepath))
-        return name.startswith('.') or has_hidden_attribute(filepath)
-
-    def has_hidden_attribute(filepath):
-        try:
-            attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(filepath))
-            assert attrs != -1
-            result = bool(attrs & 2)
-        except (AttributeError, AssertionError):
-            result = False
-        return result
-    
-    if ek.ek(os.path.isdir, folder):
-        if is_hidden(folder):
-            return True
-
-    return False
-
-
-def real_path(path):
-    """
-    Returns: the canonicalized absolute pathname. The resulting path will have no symbolic link, '/./' or '/../' components.
-    """
-    return ek.ek(os.path.normpath, ek.ek(os.path.normcase, ek.ek(os.path.realpath, path)))
-
-
-def validateShow(show, season=None, episode=None):
-    indexer_lang = show.lang
-
-    try:
-        lINDEXER_API_PARMS = sickbeard.indexerApi(show.indexer).api_params.copy()
-
-        if indexer_lang and not indexer_lang == 'en':
-            lINDEXER_API_PARMS['language'] = indexer_lang
-
-        t = sickbeard.indexerApi(show.indexer).indexer(**lINDEXER_API_PARMS)
-        if season is None and episode is None:
-            return t
-
-        return t[show.indexerid][season][episode]
-    except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
-        pass
-
-
-def set_up_anidb_connection():
-    if not sickbeard.USE_ANIDB:
-        logger.log(u"Usage of anidb disabled. Skiping", logger.DEBUG)
-        return False
-
-    if not sickbeard.ANIDB_USERNAME and not sickbeard.ANIDB_PASSWORD:
-        logger.log(u"anidb username and/or password are not set. Aborting anidb lookup.", logger.DEBUG)
-        return False
-
-    if not sickbeard.ADBA_CONNECTION:
-        anidb_logger = lambda x: logger.log("ANIDB: " + str(x), logger.DEBUG)
-        try:
-            sickbeard.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=anidb_logger)
-        except Exception as e:
-            logger.log(u"anidb exception msg: " + str(e))
-            return False
-
-    try:
-        if not sickbeard.ADBA_CONNECTION.authed():
-            sickbeard.ADBA_CONNECTION.auth(sickbeard.ANIDB_USERNAME, sickbeard.ANIDB_PASSWORD)
-        else:
-            return True
-    except Exception as e:
-        logger.log(u"anidb exception msg: " + str(e))
-        return False
-
-    return sickbeard.ADBA_CONNECTION.authed()
-
-
-def makeZip(fileList, archive):
-    """
-    'fileList' is a list of file names - full path each name
-    'archive' is the file name for the archive with a full path
-    """
-    try:
-        a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED)
-        for f in fileList:
-            a.write(f)
-        a.close()
-        return True
-    except Exception as e:
-        logger.log(u"Zip creation error: " + str(e), logger.ERROR)
-        return False
-
-
-def extractZip(archive, targetDir):
-    """
-    'fileList' is a list of file names - full path each name
-    'archive' is the file name for the archive with a full path
-    """
-    try:
-        if not os.path.exists(targetDir):
-            os.mkdir(targetDir)
-
-        zip_file = zipfile.ZipFile(archive, 'r')
-        for member in zip_file.namelist():
-            filename = os.path.basename(member)
-            # skip directories
-            if not filename:
-                continue
-
-            # copy file (taken from zipfile's extract)
-            source = zip_file.open(member)
-            target = file(os.path.join(targetDir, filename), "wb")
-            shutil.copyfileobj(source, target)
-            source.close()
-            target.close()
-        zip_file.close()
-        return True
-    except Exception as e:
-        logger.log(u"Zip extraction error: " + str(e), logger.ERROR)
-        return False
-
-
-def backupConfigZip(fileList, archive, arcname = None):
-    try:
-        a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED)
-        for f in fileList:
-            a.write(f, os.path.relpath(f, arcname))
-        a.close()
-        return True
-    except Exception as e:
-        logger.log(u"Zip creation error: " + str(e), logger.ERROR)
-        return False
-
-
-def restoreConfigZip(archive, targetDir):
-    import ntpath
-    try:
-        if not os.path.exists(targetDir):
-            os.mkdir(targetDir)
-        else:
-            def path_leaf(path):
-                head, tail = ntpath.split(path)
-                return tail or ntpath.basename(head)
-            bakFilename = '{0}-{1}'.format(path_leaf(targetDir), datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d_%H%M%S'))
-            shutil.move(targetDir, os.path.join(ntpath.dirname(targetDir), bakFilename))
-
-        zip_file = zipfile.ZipFile(archive, 'r')
-        for member in zip_file.namelist():
-            zip_file.extract(member, targetDir)
-        zip_file.close()
-        return True
-    except Exception as e:
-        logger.log(u"Zip extraction error: " + str(e), logger.ERROR)
-        shutil.rmtree(targetDir)
-        return False
-
-
-def mapIndexersToShow(showObj):
-    mapped = {}
-
-    # init mapped indexers object
-    for indexer in sickbeard.indexerApi().indexers:
-        mapped[indexer] = showObj.indexerid if int(indexer) == int(showObj.indexer) else 0
-
-    myDB = db.DBConnection()
-    sqlResults = myDB.select(
-        "SELECT * FROM indexer_mapping WHERE indexer_id = ? AND indexer = ?",
-        [showObj.indexerid, showObj.indexer])
-
-    # for each mapped entry
-    for curResult in sqlResults:
-        nlist = [i for i in curResult if i is not None]
-        # Check if its mapped with both tvdb and tvrage.
-        if len(nlist) >= 4:
-            logger.log(u"Found indexer mapping in cache for show: " + showObj.name, logger.DEBUG)
-            mapped[int(curResult['mindexer'])] = int(curResult['mindexer_id'])
-            return mapped
-    else:
-        sql_l = []
-        for indexer in sickbeard.indexerApi().indexers:
-            if indexer == showObj.indexer:
-                mapped[indexer] = showObj.indexerid
-                continue
-
-            lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
-            lINDEXER_API_PARMS['custom_ui'] = classes.ShowListUI
-            t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
-
-            try:
-                mapped_show = t[showObj.name]
-            except Exception:
-                logger.log(u"Unable to map " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
-                    indexer).name + " for show: " + showObj.name + ", skipping it", logger.DEBUG)
-                continue
-
-            if mapped_show and len(mapped_show) == 1:
-                logger.log(u"Mapping " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
-                    indexer).name + " for show: " + showObj.name, logger.DEBUG)
-
-                mapped[indexer] = int(mapped_show[0]['id'])
-
-                logger.log(u"Adding indexer mapping to DB for show: " + showObj.name, logger.DEBUG)
-
-                sql_l.append([
-                    "INSERT OR IGNORE INTO indexer_mapping (indexer_id, indexer, mindexer_id, mindexer) VALUES (?,?,?,?)",
-                    [showObj.indexerid, showObj.indexer, int(mapped_show[0]['id']), indexer]])
-
-        if len(sql_l) > 0:
-            myDB = db.DBConnection()
-            myDB.mass_action(sql_l)
-
-    return mapped
-
-
-def touchFile(fname, atime=None):
-    if None != atime:
-        try:
-            with file(fname, 'a'):
-                os.utime(fname, (atime, atime))
-                return True
-        except Exception as e:
-            if e.errno == errno.ENOSYS:
-                logger.log(u"File air date stamping not available on your OS", logger.DEBUG)
-            elif e.errno == errno.EACCES:
-                logger.log(u"File air date stamping failed(Permission denied). Check permissions for file: {0}".format(fname), logger.ERROR)
-            else:
-                logger.log(u"File air date stamping failed. The error is: {0} and the message is: {1}.".format(e.errno, e.strerror), logger.ERROR)
-            pass
-
-    return False
-
-
-def _getTempDir():
-    import getpass
-
-    """Returns the [system temp dir]/tvdb_api-u501 (or
-    tvdb_api-myuser)
-    """
-    if hasattr(os, 'getuid'):
-        uid = "u%d" % (os.getuid())
-    else:
-        # For Windows
-        try:
-            uid = getpass.getuser()
-        except ImportError:
-            return os.path.join(tempfile.gettempdir(), "sickrage")
-
-    return os.path.join(tempfile.gettempdir(), "sickrage-%s" % (uid))
-
-def getURL(url, post_data=None, params=None, headers={}, timeout=30, session=None, json=False, proxyGlypeProxySSLwarning=None):
-    """
-    Returns a byte-string retrieved from the url provider.
-    """
-
-    # request session
-    cache_dir = sickbeard.CACHE_DIR or _getTempDir()
-    session = CacheControl(sess=session, cache=caches.FileCache(os.path.join(cache_dir, 'sessions')))
-
-    # request session headers
-    session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
-    session.headers.update(headers)
-
-    # request session ssl verify
-    session.verify = False
-
-    # request session paramaters
-    session.params = params
-
-    try:
-        # request session proxies
-        if sickbeard.PROXY_SETTING:
-            logger.log("Using proxy for url: " + url, logger.DEBUG)
-            session.proxies = {
-                "http": sickbeard.PROXY_SETTING,
-                "https": sickbeard.PROXY_SETTING,
-            }
-
-        # decide if we get or post data to server
-        if post_data:
-            resp = session.post(url, data=post_data, timeout=timeout)
-        else:
-            resp = session.get(url, timeout=timeout)
-
-        if not resp.ok:
-            logger.log(u"Requested url " + url + " returned status code is " + str(
-                resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.DEBUG)
-            return
-
-        if proxyGlypeProxySSLwarning is not None:
-            if re.search('The site you are attempting to browse is on a secure connection', resp.text):
-                resp = session.get(proxyGlypeProxySSLwarning)
-
-                if not resp.ok:
-                    logger.log(u"GlypeProxySSLwarning: Requested url " + url + " returned status code is " + str(
-                        resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.DEBUG)
-                    return
-
-    except requests.exceptions.HTTPError, e:
-        logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
-        return
-    except requests.exceptions.ConnectionError, e:
-        logger.log(u"Connection error " + str(e.message) + " while loading URL " + url, logger.WARNING)
-        return
-    except requests.exceptions.Timeout, e:
-        logger.log(u"Connection timed out " + str(e.message) + " while loading URL " + url, logger.WARNING)
-        return
-    except Exception:
-        logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING)
-        return
-
-    return resp.content if not json else resp.json()
-
-def download_file(url, filename, session=None):
-    # create session
-    cache_dir = sickbeard.CACHE_DIR or _getTempDir()
-    session = CacheControl(sess=session, cache=caches.FileCache(os.path.join(cache_dir, 'sessions')))
-
-    # request session headers
-    session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
-
-    # request session ssl verify
-    session.verify = False
-
-    # request session streaming
-    session.stream = True
-
-    # request session proxies
-    if sickbeard.PROXY_SETTING:
-        logger.log("Using proxy for url: " + url, logger.DEBUG)
-        session.proxies = {
-            "http": sickbeard.PROXY_SETTING,
-            "https": sickbeard.PROXY_SETTING,
-        }
-
-    try:
-        resp = session.get(url)
-        if not resp.ok:
-            logger.log(u"Requested url " + url + " returned status code is " + str(
-                resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.DEBUG)
-            return False
-
-        with open(filename, 'wb') as fp:
-            for chunk in resp.iter_content(chunk_size=1024):
-                if chunk:
-                    fp.write(chunk)
-                    fp.flush()
-
-        chmodAsParent(filename)
-    except requests.exceptions.HTTPError, e:
-        _remove_file_failed(filename)
-        logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
-        return False
-    except requests.exceptions.ConnectionError, e:
-        _remove_file_failed(filename)
-        logger.log(u"Connection error " + str(e.message) + " while loading URL " + url, logger.WARNING)
-        return False
-    except requests.exceptions.Timeout, e:
-        _remove_file_failed(filename)
-        logger.log(u"Connection timed out " + str(e.message) + " while loading URL " + url, logger.WARNING)
-        return False
-    except EnvironmentError, e:
-        _remove_file_failed(filename)
-        logger.log(u"Unable to save the file: " + ex(e), logger.ERROR)
-        return False
-    except Exception:
-        _remove_file_failed(filename)
-        logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING)
-        return False
-
-    return True
-
-
-def clearCache(force=False):
-    update_datetime = datetime.datetime.now()
-
-    # clean out cache directory, remove everything > 12 hours old
-    if sickbeard.CACHE_DIR:
-        logger.log(u"Trying to clean cache folder " + sickbeard.CACHE_DIR)
-
-        # Does our cache_dir exists
-        if not ek.ek(os.path.isdir, sickbeard.CACHE_DIR):
-            logger.log(u"Can't clean " + sickbeard.CACHE_DIR + " if it doesn't exist", logger.WARNING)
-        else:
-            max_age = datetime.timedelta(hours=12)
-
-            # Get all our cache files
-            exclude = ['rss', 'images']
-            for cache_root, cache_dirs, cache_files in os.walk(sickbeard.CACHE_DIR, topdown=True):
-                cache_dirs[:] = [d for d in cache_dirs if d not in exclude]
-
-                for file in cache_files:
-                    cache_file = ek.ek(os.path.join, cache_root, file)
-
-                    if ek.ek(os.path.isfile, cache_file):
-                        cache_file_modified = datetime.datetime.fromtimestamp(
-                            ek.ek(os.path.getmtime, cache_file))
-
-                        if force or (update_datetime - cache_file_modified > max_age):
-                            try:
-                                ek.ek(os.remove, cache_file)
-                            except OSError, e:
-                                logger.log(u"Unable to clean " + cache_root + ": " + repr(e) + " / " + str(e),
-                                           logger.WARNING)
-                                break
-
-def get_size(start_path='.'):
-
-    total_size = 0
-    for dirpath, dirnames, filenames in ek.ek(os.walk, start_path):
-        for f in filenames:
-            fp = ek.ek(os.path.join, dirpath, f)
-            total_size += ek.ek(os.path.getsize, fp)
-    return total_size
-
-def generateApiKey():
-    """ Return a new randomized API_KEY
-    """
-
-    try:
-        from hashlib import md5
-    except ImportError:
-        from md5 import md5
-
-    # Create some values to seed md5
-    t = str(time.time())
-    r = str(random.random())
-
-    # Create the md5 instance and give it the current time
-    m = md5(t)
-
-    # Update the md5 instance with the random variable
-    m.update(r)
-
-    # Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
-    logger.log(u"New API generated")
-    return m.hexdigest()
-
-def pretty_filesize(file_bytes):
-    file_bytes = float(file_bytes)
-    if file_bytes >= 1099511627776:
-        terabytes = file_bytes / 1099511627776
-        size = '%.2f TB' % terabytes
-    elif file_bytes >= 1073741824:
-        gigabytes = file_bytes / 1073741824
-        size = '%.2f GB' % gigabytes
-    elif file_bytes >= 1048576:
-        megabytes = file_bytes / 1048576
-        size = '%.2f MB' % megabytes
-    elif file_bytes >= 1024:
-        kilobytes = file_bytes / 1024
-        size = '%.2f KB' % kilobytes
-    else:
-        size = '%.2f b' % file_bytes
-
-    return size
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()
-
-def remove_article(text=''):
-    return re.sub(r'(?i)^(?:(?:A(?!\s+to)n?)|The)\s(\w)', r'\1', text)
-
-def generateCookieSecret():
-
-    return base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes)
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty    of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import with_statement
+
+import os
+import ctypes
+import random
+import re
+import socket
+import stat
+import tempfile
+import time
+import traceback
+import urllib
+import hashlib
+import httplib
+import urlparse
+import uuid
+import base64
+import zipfile
+import datetime
+import errno
+import ast
+import operator
+
+import sickbeard
+import subliminal
+import adba
+import requests
+import requests.exceptions
+import xmltodict
+
+import subprocess
+
+from sickbeard.exceptions import MultipleShowObjectsException, ex
+from sickbeard import logger, classes
+from sickbeard.common import USER_AGENT, mediaExtensions, subtitleExtensions
+from sickbeard import db
+from sickbeard import encodingKludge as ek
+from sickbeard import notifiers
+from sickbeard import clients
+
+from cachecontrol import CacheControl, caches
+from itertools import izip, cycle
+
+import shutil
+import lib.shutil_custom
+
+shutil.copyfile = lib.shutil_custom.copyfile_custom
+
+urllib._urlopener = classes.SickBeardURLopener()
+
+
+def indentXML(elem, level=0):
+    '''
+    Does our pretty printing, makes Matt very happy
+    '''
+    i = "\n" + level * "  "
+    if len(elem):
+        if not elem.text or not elem.text.strip():
+            elem.text = i + "  "
+        if not elem.tail or not elem.tail.strip():
+            elem.tail = i
+        for elem in elem:
+            indentXML(elem, level + 1)
+        if not elem.tail or not elem.tail.strip():
+            elem.tail = i
+    else:
+        # Strip out the newlines from text
+        if elem.text:
+            elem.text = elem.text.replace('\n', ' ')
+        if level and (not elem.tail or not elem.tail.strip()):
+            elem.tail = i
+
+
+def remove_extension(name):
+    """
+    Remove download or media extension from name (if any)
+    """
+
+    if name and "." in name:
+        base_name, sep, extension = name.rpartition('.')  # @UnusedVariable
+        if base_name and extension.lower() in ['nzb', 'torrent'] + mediaExtensions:
+            name = base_name
+
+    return name
+
+
+def remove_non_release_groups(name):
+    """
+    Remove non release groups from name
+    """
+
+    if name and "-" in name:
+        name_group = name.rsplit('-', 1)
+        if name_group[-1].upper() in ["RP", "NZBGEEK"]:
+            name = name_group[0]
+
+    return name
+
+
+def replaceExtension(filename, newExt):
+    '''
+    >>> replaceExtension('foo.avi', 'mkv')
+    'foo.mkv'
+    >>> replaceExtension('.vimrc', 'arglebargle')
+    '.vimrc'
+    >>> replaceExtension('a.b.c', 'd')
+    'a.b.d'
+    >>> replaceExtension('', 'a')
+    ''
+    >>> replaceExtension('foo.bar', '')
+    'foo.'
+    '''
+    sepFile = filename.rpartition(".")
+    if sepFile[0] == "":
+        return filename
+    else:
+        return sepFile[0] + "." + newExt
+
+
+def isSyncFile(filename):
+    extension = filename.rpartition(".")[2].lower()
+    #if extension == '!sync' or extension == 'lftp-pget-status' or extension == 'part' or extension == 'bts':
+    syncfiles = sickbeard.SYNC_FILES
+    if extension in syncfiles.split(","):
+        return True
+    else:
+        return False
+
+
+def isMediaFile(filename):
+    # ignore samples
+    if re.search('(^|[\W_])(sample\d*)[\W_]', filename, re.I):
+        return False
+
+    # ignore MAC OS's retarded "resource fork" files
+    if filename.startswith('._'):
+        return False
+
+    sepFile = filename.rpartition(".")
+
+    if re.search('extras?$', sepFile[0], re.I):
+        return False
+
+    if sepFile[2].lower() in mediaExtensions:
+        return True
+    else:
+        return False
+
+
+def isRarFile(filename):
+    archive_regex = '(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
+
+    if re.search(archive_regex, filename):
+        return True
+
+    return False
+
+
+def isBeingWritten(filepath):
+    # Return True if file was modified within 60 seconds. it might still be being written to.
+    ctime = max(ek.ek(os.path.getctime, filepath), ek.ek(os.path.getmtime, filepath))
+    if ctime > time.time() - 60:
+        return True
+
+    return False
+
+
+def sanitizeFileName(name):
+    '''
+    >>> sanitizeFileName('a/b/c')
+    'a-b-c'
+    >>> sanitizeFileName('abc')
+    'abc'
+    >>> sanitizeFileName('a"b')
+    'ab'
+    >>> sanitizeFileName('.a.b..')
+    'a.b'
+    '''
+
+    # remove bad chars from the filename
+    name = re.sub(r'[\\/\*]', '-', name)
+    name = re.sub(r'[:"<>|?]', '', name)
+    name = re.sub(ur'\u2122', '', name) # Trade Mark Sign
+
+    # remove leading/trailing periods and spaces
+    name = name.strip(' .')
+
+    return name
+
+
+def _remove_file_failed(file):
+    try:
+        ek.ek(os.remove, file)
+    except:
+        pass
+
+def findCertainShow(showList, indexerid):
+
+    results = []
+
+    if not isinstance(indexerid, list):
+        indexerid = [indexerid]
+
+    if showList and len(indexerid):
+        results = filter(lambda x: int(x.indexerid) in indexerid, showList)
+
+    if len(results) == 1:
+        return results[0]
+    elif len(results) > 1:
+        raise MultipleShowObjectsException()
+
+def makeDir(path):
+    if not ek.ek(os.path.isdir, path):
+        try:
+            ek.ek(os.makedirs, path)
+            # do the library update for synoindex
+            notifiers.synoindex_notifier.addFolder(path)
+        except OSError:
+            return False
+    return True
+
+
+def searchDBForShow(regShowName, log=False):
+    showNames = [re.sub('[. -]', ' ', regShowName)]
+
+    yearRegex = "([^()]+?)\s*(\()?(\d{4})(?(2)\))$"
+
+    myDB = db.DBConnection()
+    for showName in showNames:
+
+        sqlResults = myDB.select("SELECT * FROM tv_shows WHERE show_name LIKE ?",
+                                 [showName])
+
+        if len(sqlResults) == 1:
+            return int(sqlResults[0]["indexer_id"])
+        else:
+            # if we didn't get exactly one result then try again with the year stripped off if possible
+            match = re.match(yearRegex, showName)
+            if match and match.group(1):
+                if log:
+                    logger.log(u"Unable to match original name but trying to manually strip and specify show year",
+                               logger.DEBUG)
+                sqlResults = myDB.select(
+                    "SELECT * FROM tv_shows WHERE (show_name LIKE ?) AND startyear = ?",
+                    [match.group(1) + '%', match.group(3)])
+
+            if len(sqlResults) == 0:
+                if log:
+                    logger.log(u"Unable to match a record in the DB for " + showName, logger.DEBUG)
+                continue
+            elif len(sqlResults) > 1:
+                if log:
+                    logger.log(u"Multiple results for " + showName + " in the DB, unable to match show name",
+                               logger.DEBUG)
+                continue
+            else:
+                return int(sqlResults[0]["indexer_id"])
+
+
+def searchIndexerForShowID(regShowName, indexer=None, indexer_id=None, ui=None):
+    showNames = [re.sub('[. -]', ' ', regShowName)]
+
+    # Query Indexers for each search term and build the list of results
+    for i in sickbeard.indexerApi().indexers if not indexer else int(indexer or []):
+        # Query Indexers for each search term and build the list of results
+        lINDEXER_API_PARMS = sickbeard.indexerApi(i).api_params.copy()
+        if ui is not None: lINDEXER_API_PARMS['custom_ui'] = ui
+        t = sickbeard.indexerApi(i).indexer(**lINDEXER_API_PARMS)
+
+        for name in showNames:
+            logger.log(u"Trying to find " + name + " on " + sickbeard.indexerApi(i).name, logger.DEBUG)
+
+            try:
+                search = t[indexer_id] if indexer_id else t[name]
+            except:
+                continue
+
+            try:
+                seriesname = search[0]['seriesname']
+            except:
+                seriesname = None
+
+            try:
+                series_id = search[0]['id']
+            except:
+                series_id = None
+
+            if not (seriesname and series_id):
+                continue
+            ShowObj = findCertainShow(sickbeard.showList, int(series_id))
+            #Check if we can find the show in our list (if not, it's not the right show)
+            if (indexer_id is None) and (ShowObj is not None) and (ShowObj.indexerid == int(series_id)):
+                return (seriesname, i, int(series_id))
+            elif (indexer_id is not None) and (int(indexer_id) == int(series_id)):
+                return (seriesname, i, int(indexer_id))
+
+        if indexer:
+            break
+
+    return (None, None, None)
+
+
+def sizeof_fmt(num):
+    '''
+    >>> sizeof_fmt(2)
+    '2.0 bytes'
+    >>> sizeof_fmt(1024)
+    '1.0 KB'
+    >>> sizeof_fmt(2048)
+    '2.0 KB'
+    >>> sizeof_fmt(2**20)
+    '1.0 MB'
+    >>> sizeof_fmt(1234567)
+    '1.2 MB'
+    '''
+    for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
+        if num < 1024.0:
+            return "%3.1f %s" % (num, x)
+        num /= 1024.0
+
+
+def listMediaFiles(path):
+    if not dir or not ek.ek(os.path.isdir, path):
+        return []
+
+    files = []
+    for curFile in ek.ek(os.listdir, path):
+        fullCurFile = ek.ek(os.path.join, path, curFile)
+
+        # if it's a folder do it recursively
+        if ek.ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras':
+            files += listMediaFiles(fullCurFile)
+
+        elif isMediaFile(curFile):
+            files.append(fullCurFile)
+
+    return files
+
+
+def copyFile(srcFile, destFile):
+    ek.ek(shutil.copyfile, srcFile, destFile)
+    try:
+        ek.ek(shutil.copymode, srcFile, destFile)
+    except OSError:
+        pass
+
+
+def moveFile(srcFile, destFile):
+    try:
+        ek.ek(shutil.move, srcFile, destFile)
+        fixSetGroupID(destFile)
+    except OSError:
+        copyFile(srcFile, destFile)
+        ek.ek(os.unlink, srcFile)
+
+
+def link(src, dst):
+    if os.name == 'nt':
+        import ctypes
+
+        if ctypes.windll.kernel32.CreateHardLinkW(unicode(dst), unicode(src), 0) == 0: raise ctypes.WinError()
+    else:
+        os.link(src, dst)
+
+
+def hardlinkFile(srcFile, destFile):
+    try:
+        ek.ek(link, srcFile, destFile)
+        fixSetGroupID(destFile)
+    except Exception, e:
+        logger.log(u"Failed to create hardlink of " + srcFile + " at " + destFile + ": " + ex(e) + ". Copying instead",
+                   logger.ERROR)
+        copyFile(srcFile, destFile)
+
+
+def symlink(src, dst):
+    if os.name == 'nt':
+        import ctypes
+
+        if ctypes.windll.kernel32.CreateSymbolicLinkW(unicode(dst), unicode(src), 1 if os.path.isdir(src) else 0) in [0,
+                                                                                                                      1280]: raise ctypes.WinError()
+    else:
+        os.symlink(src, dst)
+
+
+def moveAndSymlinkFile(srcFile, destFile):
+    try:
+        ek.ek(shutil.move, srcFile, destFile)
+        fixSetGroupID(destFile)
+        ek.ek(symlink, destFile, srcFile)
+    except:
+        logger.log(u"Failed to create symlink of " + srcFile + " at " + destFile + ". Copying instead", logger.ERROR)
+        copyFile(srcFile, destFile)
+
+
+def make_dirs(path):
+    """
+    Creates any folders that are missing and assigns them the permissions of their
+    parents
+    """
+
+    logger.log(u"Checking if the path " + path + " already exists", logger.DEBUG)
+
+    if not ek.ek(os.path.isdir, path):
+        # Windows, create all missing folders
+        if os.name == 'nt' or os.name == 'ce':
+            try:
+                logger.log(u"Folder " + path + " didn't exist, creating it", logger.DEBUG)
+                ek.ek(os.makedirs, path)
+            except (OSError, IOError), e:
+                logger.log(u"Failed creating " + path + " : " + ex(e), logger.ERROR)
+                return False
+
+        # not Windows, create all missing folders and set permissions
+        else:
+            sofar = ''
+            folder_list = path.split(os.path.sep)
+
+            # look through each subfolder and make sure they all exist
+            for cur_folder in folder_list:
+                sofar += cur_folder + os.path.sep
+
+                # if it exists then just keep walking down the line
+                if ek.ek(os.path.isdir, sofar):
+                    continue
+
+                try:
+                    logger.log(u"Folder " + sofar + " didn't exist, creating it", logger.DEBUG)
+                    ek.ek(os.mkdir, sofar)
+                    # use normpath to remove end separator, otherwise checks permissions against itself
+                    chmodAsParent(ek.ek(os.path.normpath, sofar))
+                    # do the library update for synoindex
+                    notifiers.synoindex_notifier.addFolder(sofar)
+                except (OSError, IOError), e:
+                    logger.log(u"Failed creating " + sofar + " : " + ex(e), logger.ERROR)
+                    return False
+
+    return True
+
+
+def rename_ep_file(cur_path, new_path, old_path_length=0):
+    """
+    Creates all folders needed to move a file to its new location, renames it, then cleans up any folders
+    left that are now empty.
+
+    cur_path: The absolute path to the file you want to move/rename
+    new_path: The absolute path to the destination for the file WITHOUT THE EXTENSION
+    old_path_length: The length of media file path (old name) WITHOUT THE EXTENSION
+    """
+
+    new_dest_dir, new_dest_name = os.path.split(new_path)  # @UnusedVariable
+
+    if old_path_length == 0 or old_path_length > len(cur_path):
+        # approach from the right
+        cur_file_name, cur_file_ext = os.path.splitext(cur_path)  # @UnusedVariable
+    else:
+        # approach from the left
+        cur_file_ext = cur_path[old_path_length:]
+        cur_file_name = cur_path[:old_path_length]
+
+    if cur_file_ext[1:] in subtitleExtensions:
+        # Extract subtitle language from filename
+        sublang = os.path.splitext(cur_file_name)[1][1:]
+
+        # Check if the language extracted from filename is a valid language
+        try:
+            language = subliminal.language.Language(sublang, strict=True)
+            cur_file_ext = '.' + sublang + cur_file_ext
+        except ValueError:
+            pass
+
+    # put the extension on the incoming file
+    new_path += cur_file_ext
+
+    make_dirs(os.path.dirname(new_path))
+
+    # move the file
+    try:
+        logger.log(u"Renaming file from " + cur_path + " to " + new_path)
+        ek.ek(shutil.move, cur_path, new_path)
+    except (OSError, IOError), e:
+        logger.log(u"Failed renaming " + cur_path + " to " + new_path + ": " + ex(e), logger.ERROR)
+        return False
+
+    # clean up any old folders that are empty
+    delete_empty_folders(ek.ek(os.path.dirname, cur_path))
+
+    return True
+
+
+def delete_empty_folders(check_empty_dir, keep_dir=None):
+    """
+    Walks backwards up the path and deletes any empty folders found.
+
+    check_empty_dir: The path to clean (absolute path to a folder)
+    keep_dir: Clean until this path is reached
+    """
+
+    # treat check_empty_dir as empty when it only contains these items
+    ignore_items = []
+
+    logger.log(u"Trying to clean any empty folders under " + check_empty_dir)
+
+    # as long as the folder exists and doesn't contain any files, delete it
+    while ek.ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir:
+        check_files = ek.ek(os.listdir, check_empty_dir)
+
+        if not check_files or (len(check_files) <= len(ignore_items) and all(
+                [check_file in ignore_items for check_file in check_files])):
+            # directory is empty or contains only ignore_items
+            try:
+                logger.log(u"Deleting empty folder: " + check_empty_dir)
+                # need shutil.rmtree when ignore_items is really implemented
+                ek.ek(os.rmdir, check_empty_dir)
+                # do the library update for synoindex
+                notifiers.synoindex_notifier.deleteFolder(check_empty_dir)
+            except OSError, e:
+                logger.log(u"Unable to delete " + check_empty_dir + ": " + repr(e) + " / " + str(e), logger.WARNING)
+                break
+            check_empty_dir = ek.ek(os.path.dirname, check_empty_dir)
+        else:
+            break
+
+
+def fileBitFilter(mode):
+    for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
+        if mode & bit:
+            mode -= bit
+
+    return mode
+
+
+def chmodAsParent(childPath):
+    if os.name == 'nt' or os.name == 'ce':
+        return
+
+    parentPath = ek.ek(os.path.dirname, childPath)
+
+    if not parentPath:
+        logger.log(u"No parent path provided in " + childPath + ", unable to get permissions from it", logger.DEBUG)
+        return
+
+    parentPathStat = ek.ek(os.stat, parentPath)
+    parentMode = stat.S_IMODE(parentPathStat[stat.ST_MODE])
+
+    childPathStat = ek.ek(os.stat, childPath)
+    childPath_mode = stat.S_IMODE(childPathStat[stat.ST_MODE])
+
+    if ek.ek(os.path.isfile, childPath):
+        childMode = fileBitFilter(parentMode)
+    else:
+        childMode = parentMode
+
+    if childPath_mode == childMode:
+        return
+
+    childPath_owner = childPathStat.st_uid
+    user_id = os.geteuid()  # @UndefinedVariable - only available on UNIX
+
+    if user_id != 0 and user_id != childPath_owner:
+        logger.log(u"Not running as root or owner of " + childPath + ", not trying to set permissions", logger.DEBUG)
+        return
+
+    try:
+        ek.ek(os.chmod, childPath, childMode)
+        logger.log(u"Setting permissions for %s to %o as parent directory has %o" % (childPath, childMode, parentMode),
+                   logger.DEBUG)
+    except OSError:
+        logger.log(u"Failed to set permission for %s to %o" % (childPath, childMode), logger.ERROR)
+
+
+def fixSetGroupID(childPath):
+    if os.name == 'nt' or os.name == 'ce':
+        return
+
+    parentPath = ek.ek(os.path.dirname, childPath)
+    parentStat = ek.ek(os.stat, parentPath)
+    parentMode = stat.S_IMODE(parentStat[stat.ST_MODE])
+
+    if parentMode & stat.S_ISGID:
+        parentGID = parentStat[stat.ST_GID]
+        childStat = ek.ek(os.stat, childPath)
+        childGID = childStat[stat.ST_GID]
+
+        if childGID == parentGID:
+            return
+
+        childPath_owner = childStat.st_uid
+        user_id = os.geteuid()  # @UndefinedVariable - only available on UNIX
+
+        if user_id != 0 and user_id != childPath_owner:
+            logger.log(u"Not running as root or owner of " + childPath + ", not trying to set the set-group-ID",
+                       logger.DEBUG)
+            return
+
+        try:
+            ek.ek(os.chown, childPath, -1, parentGID)  # @UndefinedVariable - only available on UNIX
+            logger.log(u"Respecting the set-group-ID bit on the parent directory for %s" % (childPath), logger.DEBUG)
+        except OSError:
+            logger.log(
+                u"Failed to respect the set-group-ID bit on the parent directory for %s (setting group ID %i)" % (
+                    childPath, parentGID), logger.ERROR)
+
+
+def is_anime_in_show_list():
+    for show in sickbeard.showList:
+        if show.is_anime:
+            return True
+    return False
+
+
+def update_anime_support():
+    sickbeard.ANIMESUPPORT = is_anime_in_show_list()
+
+
+def get_absolute_number_from_season_and_episode(show, season, episode):
+    absolute_number = None
+
+    if season and episode:
+        myDB = db.DBConnection()
+        sql = "SELECT * FROM tv_episodes WHERE showid = ? and season = ? and episode = ?"
+        sqlResults = myDB.select(sql, [show.indexerid, season, episode])
+
+        if len(sqlResults) == 1:
+            absolute_number = int(sqlResults[0]["absolute_number"])
+            logger.log(
+                "Found absolute_number:" + str(absolute_number) + " by " + str(season) + "x" + str(episode),
+                logger.DEBUG)
+        else:
+            logger.log(
+                "No entries for absolute number in show: " + show.name + " found using " + str(season) + "x" + str(
+                    episode),
+                logger.DEBUG)
+
+    return absolute_number
+
+
+def get_all_episodes_from_absolute_number(show, absolute_numbers, indexer_id=None):
+    episodes = []
+    season = None
+
+    if len(absolute_numbers):
+        if not show and indexer_id:
+            show = findCertainShow(sickbeard.showList, indexer_id)
+
+        for absolute_number in absolute_numbers if show else []:
+            ep = show.getEpisode(None, None, absolute_number=absolute_number)
+            if ep:
+                episodes.append(ep.episode)
+                season = ep.season  # this will always take the last found seson so eps that cross the season border are not handeled well
+
+    return (season, episodes)
+
+
+def sanitizeSceneName(name, ezrss=False, anime=False):
+    """
+    Takes a show name and returns the "scenified" version of it.
+
+    ezrss: If true the scenified version will follow EZRSS's cracksmoker rules as best as possible
+    
+    anime: Some show have a ' in their name(Kuroko's Basketball) and is needed for search.
+
+    Returns: A string containing the scene version of the show name given.
+    """
+
+    if name:
+        # anime: removed ' for Kuroko's Basketball
+        if anime:
+            bad_chars = u",:()!?\u2019"
+        # ezrss leaves : and ! in their show names as far as I can tell
+        elif ezrss:
+            bad_chars = u",()'?\u2019"
+        else:
+            bad_chars = u",:()'!?\u2019"
+
+        # strip out any bad chars
+        for x in bad_chars:
+            name = name.replace(x, "")
+
+        # tidy up stuff that doesn't belong in scene names
+        name = name.replace("- ", ".").replace(" ", ".").replace("&", "and").replace('/', '.')
+        name = re.sub("\.\.*", ".", name)
+
+        if name.endswith('.'):
+            name = name[:-1]
+
+        return name
+    else:
+        return ''
+
+
+_binOps = {
+    ast.Add: operator.add,
+    ast.Sub: operator.sub,
+    ast.Mult: operator.mul,
+    ast.Div: operator.div,
+    ast.Mod: operator.mod
+}
+
+
+def arithmeticEval(s):
+    """
+    A safe eval supporting basic arithmetic operations.
+
+    :param s: expression to evaluate
+    :return: value
+    """
+    node = ast.parse(s, mode='eval')
+
+    def _eval(node):
+        if isinstance(node, ast.Expression):
+            return _eval(node.body)
+        elif isinstance(node, ast.Str):
+            return node.s
+        elif isinstance(node, ast.Num):
+            return node.n
+        elif isinstance(node, ast.BinOp):
+            return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
+        else:
+            raise Exception('Unsupported type {}'.format(node))
+
+    return _eval(node.body)
+
+def create_https_certificates(ssl_cert, ssl_key):
+    """
+    Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key'
+    """
+    try:
+        from OpenSSL import crypto  # @UnresolvedImport
+        from lib.certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, \
+            serial  # @UnresolvedImport
+    except Exception, e:
+        logger.log(u"pyopenssl module missing, please install for https access", logger.WARNING)
+        return False
+
+    # Create the CA Certificate
+    cakey = createKeyPair(TYPE_RSA, 1024)
+    careq = createCertRequest(cakey, CN='Certificate Authority')
+    cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10))  # ten years
+
+    cname = 'SickRage'
+    pkey = createKeyPair(TYPE_RSA, 1024)
+    req = createCertRequest(pkey, CN=cname)
+    cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10))  # ten years
+
+    # Save the key and certificate to disk
+    try:
+        open(ssl_key, 'w').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
+        open(ssl_cert, 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
+    except:
+        logger.log(u"Error creating SSL key and certificate", logger.ERROR)
+        return False
+
+    return True
+
+def backupVersionedFile(old_file, version):
+    numTries = 0
+
+    new_file = old_file + '.' + 'v' + str(version)
+
+    while not ek.ek(os.path.isfile, new_file):
+        if not ek.ek(os.path.isfile, old_file):
+            logger.log(u"Not creating backup, " + old_file + " doesn't exist", logger.DEBUG)
+            break
+
+        try:
+            logger.log(u"Trying to back up " + old_file + " to " + new_file, logger.DEBUG)
+            shutil.copy(old_file, new_file)
+            logger.log(u"Backup done", logger.DEBUG)
+            break
+        except Exception, e:
+            logger.log(u"Error while trying to back up " + old_file + " to " + new_file + " : " + ex(e), logger.WARNING)
+            numTries += 1
+            time.sleep(1)
+            logger.log(u"Trying again.", logger.DEBUG)
+
+        if numTries >= 10:
+            logger.log(u"Unable to back up " + old_file + " to " + new_file + " please do it manually.", logger.ERROR)
+            return False
+
+    return True
+
+
+def restoreVersionedFile(backup_file, version):
+    numTries = 0
+
+    new_file, backup_version = os.path.splitext(backup_file)
+    restore_file = new_file + '.' + 'v' + str(version)
+
+    if not ek.ek(os.path.isfile, new_file):
+        logger.log(u"Not restoring, " + new_file + " doesn't exist", logger.DEBUG)
+        return False
+
+    try:
+        logger.log(
+            u"Trying to backup " + new_file + " to " + new_file + "." + "r" + str(version) + " before restoring backup",
+            logger.DEBUG)
+        shutil.move(new_file, new_file + '.' + 'r' + str(version))
+    except Exception, e:
+        logger.log(
+            u"Error while trying to backup DB file " + restore_file + " before proceeding with restore: " + ex(e),
+            logger.WARNING)
+        return False
+
+    while not ek.ek(os.path.isfile, new_file):
+        if not ek.ek(os.path.isfile, restore_file):
+            logger.log(u"Not restoring, " + restore_file + " doesn't exist", logger.DEBUG)
+            break
+
+        try:
+            logger.log(u"Trying to restore " + restore_file + " to " + new_file, logger.DEBUG)
+            shutil.copy(restore_file, new_file)
+            logger.log(u"Restore done", logger.DEBUG)
+            break
+        except Exception, e:
+            logger.log(u"Error while trying to restore " + restore_file + ": " + ex(e), logger.WARNING)
+            numTries += 1
+            time.sleep(1)
+            logger.log(u"Trying again.", logger.DEBUG)
+
+        if numTries >= 10:
+            logger.log(u"Unable to restore " + restore_file + " to " + new_file + " please do it manually.",
+                       logger.ERROR)
+            return False
+
+    return True
+
+
+# try to convert to int, if it fails the default will be returned
+def tryInt(s, s_default=0):
+    try:
+        return int(s)
+    except:
+        return s_default
+
+
+# generates a md5 hash of a file
+def md5_for_file(filename, block_size=2 ** 16):
+    try:
+        with open(filename, 'rb') as f:
+            md5 = hashlib.md5()
+            while True:
+                data = f.read(block_size)
+                if not data:
+                    break
+                md5.update(data)
+            f.close()
+            return md5.hexdigest()
+    except Exception:
+        return None
+
+
+def get_lan_ip():
+    try:return [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0]
+    except:return socket.gethostname()
+
+def check_url(url):
+    """
+    Check if a URL exists without downloading the whole file.
+    We only check the URL header.
+    """
+    # see also http://stackoverflow.com/questions/2924422
+    # http://stackoverflow.com/questions/1140661
+    good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY]
+
+    host, path = urlparse.urlparse(url)[1:3]  # elems [1] and [2]
+    try:
+        conn = httplib.HTTPConnection(host)
+        conn.request('HEAD', path)
+        return conn.getresponse().status in good_codes
+    except StandardError:
+        return None
+
+
+def anon_url(*url):
+    """
+    Return a URL string consisting of the Anonymous redirect URL and an arbitrary number of values appended.
+    """
+    return '' if None in url else '%s%s' % (sickbeard.ANON_REDIRECT, ''.join(str(s) for s in url))
+
+
+"""
+Encryption
+==========
+By Pedro Jose Pereira Vieito <pvieito@gmail.com> (@pvieito)
+
+* If encryption_version==0 then return data without encryption
+* The keys should be unique for each device
+
+To add a new encryption_version:
+  1) Code your new encryption_version
+  2) Update the last encryption_version available in webserve.py
+  3) Remember to maintain old encryption versions and key generators for retrocompatibility
+"""
+
+# Key Generators
+unique_key1 = hex(uuid.getnode() ** 2)  # Used in encryption v1
+
+# Encryption Functions
+def encrypt(data, encryption_version=0, decrypt=False):
+    # Version 1: Simple XOR encryption (this is not very secure, but works)
+    if encryption_version == 1:
+        if decrypt:
+            return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(unique_key1)))
+        else:
+            return base64.encodestring(
+                ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(unique_key1)))).strip()
+    # Version 2: Simple XOR encryption (this is not very secure, but works)
+    elif encryption_version == 2:
+        if decrypt:
+            return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(sickbeard.ENCRYPTION_SECRET)))
+        else:
+            return base64.encodestring(
+                ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(sickbeard.ENCRYPTION_SECRET)))).strip()
+    # Version 0: Plain text
+    else:
+        return data
+
+
+def decrypt(data, encryption_version=0):
+    return encrypt(data, encryption_version, decrypt=True)
+
+
+def full_sanitizeSceneName(name):
+    return re.sub('[. -]', ' ', sanitizeSceneName(name)).lower().lstrip()
+
+
+def _check_against_names(nameInQuestion, show, season=-1):
+    showNames = []
+    if season in [-1, 1]:
+        showNames = [show.name]
+
+    showNames.extend(sickbeard.scene_exceptions.get_scene_exceptions(show.indexerid, season=season))
+
+    for showName in showNames:
+        nameFromList = full_sanitizeSceneName(showName)
+        if nameFromList == nameInQuestion:
+            return True
+
+    return False
+
+
+def get_show(name, tryIndexers=False, trySceneExceptions=False):
+    if not sickbeard.showList:
+        return
+
+    showObj = None
+    fromCache = False
+
+    if not name:
+        return showObj
+
+    try:
+        # check cache for show
+        cache = sickbeard.name_cache.retrieveNameFromCache(name)
+        if cache:
+            fromCache = True
+            showObj = findCertainShow(sickbeard.showList, int(cache))
+        
+        #try indexers    
+        if not showObj and tryIndexers:
+            showObj = findCertainShow(sickbeard.showList,
+                                      searchIndexerForShowID(full_sanitizeSceneName(name), ui=classes.ShowListUI)[2])
+        
+        #try scene exceptions
+        if not showObj and trySceneExceptions:
+            ShowID = sickbeard.scene_exceptions.get_scene_exception_by_name(name)[0]
+            if ShowID:
+                showObj = findCertainShow(sickbeard.showList, int(ShowID))
+                
+        # add show to cache
+        if showObj and not fromCache:
+            sickbeard.name_cache.addNameToCache(name, showObj.indexerid)
+    except Exception as e:
+        logger.log(u"Error when attempting to find show: " + name + " in SickRage: " + str(e), logger.DEBUG)
+
+    return showObj
+
+
+def is_hidden_folder(folder):
+    """
+    Returns True if folder is hidden.
+    On Linux based systems hidden folders start with . (dot)
+    folder: Full path of folder to check
+    """
+    def is_hidden(filepath):
+        name = os.path.basename(os.path.abspath(filepath))
+        return name.startswith('.') or has_hidden_attribute(filepath)
+
+    def has_hidden_attribute(filepath):
+        try:
+            attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(filepath))
+            assert attrs != -1
+            result = bool(attrs & 2)
+        except (AttributeError, AssertionError):
+            result = False
+        return result
+    
+    if ek.ek(os.path.isdir, folder):
+        if is_hidden(folder):
+            return True
+
+    return False
+
+
+def real_path(path):
+    """
+    Returns: the canonicalized absolute pathname. The resulting path will have no symbolic link, '/./' or '/../' components.
+    """
+    return ek.ek(os.path.normpath, ek.ek(os.path.normcase, ek.ek(os.path.realpath, path)))
+
+
+def validateShow(show, season=None, episode=None):
+    indexer_lang = show.lang
+
+    try:
+        lINDEXER_API_PARMS = sickbeard.indexerApi(show.indexer).api_params.copy()
+
+        if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
+            lINDEXER_API_PARMS['language'] = indexer_lang
+
+        t = sickbeard.indexerApi(show.indexer).indexer(**lINDEXER_API_PARMS)
+        if season is None and episode is None:
+            return t
+
+        return t[show.indexerid][season][episode]
+    except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
+        pass
+
+
+def set_up_anidb_connection():
+    if not sickbeard.USE_ANIDB:
+        logger.log(u"Usage of anidb disabled. Skiping", logger.DEBUG)
+        return False
+
+    if not sickbeard.ANIDB_USERNAME and not sickbeard.ANIDB_PASSWORD:
+        logger.log(u"anidb username and/or password are not set. Aborting anidb lookup.", logger.DEBUG)
+        return False
+
+    if not sickbeard.ADBA_CONNECTION:
+        anidb_logger = lambda x: logger.log("ANIDB: " + str(x), logger.DEBUG)
+        try:
+            sickbeard.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=anidb_logger)
+        except Exception as e:
+            logger.log(u"anidb exception msg: " + str(e))
+            return False
+
+    try:
+        if not sickbeard.ADBA_CONNECTION.authed():
+            sickbeard.ADBA_CONNECTION.auth(sickbeard.ANIDB_USERNAME, sickbeard.ANIDB_PASSWORD)
+        else:
+            return True
+    except Exception as e:
+        logger.log(u"anidb exception msg: " + str(e))
+        return False
+
+    return sickbeard.ADBA_CONNECTION.authed()
+
+
+def makeZip(fileList, archive):
+    """
+    'fileList' is a list of file names - full path each name
+    'archive' is the file name for the archive with a full path
+    """
+    try:
+        a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED)
+        for f in fileList:
+            a.write(f)
+        a.close()
+        return True
+    except Exception as e:
+        logger.log(u"Zip creation error: " + str(e), logger.ERROR)
+        return False
+
+
+def extractZip(archive, targetDir):
+    """
+    'fileList' is a list of file names - full path each name
+    'archive' is the file name for the archive with a full path
+    """
+    try:
+        if not os.path.exists(targetDir):
+            os.mkdir(targetDir)
+
+        zip_file = zipfile.ZipFile(archive, 'r')
+        for member in zip_file.namelist():
+            filename = os.path.basename(member)
+            # skip directories
+            if not filename:
+                continue
+
+            # copy file (taken from zipfile's extract)
+            source = zip_file.open(member)
+            target = file(os.path.join(targetDir, filename), "wb")
+            shutil.copyfileobj(source, target)
+            source.close()
+            target.close()
+        zip_file.close()
+        return True
+    except Exception as e:
+        logger.log(u"Zip extraction error: " + str(e), logger.ERROR)
+        return False
+
+
+def backupConfigZip(fileList, archive, arcname = None):
+    try:
+        a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED)
+        for f in fileList:
+            a.write(f, os.path.relpath(f, arcname))
+        a.close()
+        return True
+    except Exception as e:
+        logger.log(u"Zip creation error: " + str(e), logger.ERROR)
+        return False
+
+
+def restoreConfigZip(archive, targetDir):
+    import ntpath
+    try:
+        if not os.path.exists(targetDir):
+            os.mkdir(targetDir)
+        else:
+            def path_leaf(path):
+                head, tail = ntpath.split(path)
+                return tail or ntpath.basename(head)
+            bakFilename = '{0}-{1}'.format(path_leaf(targetDir), datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d_%H%M%S'))
+            shutil.move(targetDir, os.path.join(ntpath.dirname(targetDir), bakFilename))
+
+        zip_file = zipfile.ZipFile(archive, 'r')
+        for member in zip_file.namelist():
+            zip_file.extract(member, targetDir)
+        zip_file.close()
+        return True
+    except Exception as e:
+        logger.log(u"Zip extraction error: " + str(e), logger.ERROR)
+        shutil.rmtree(targetDir)
+        return False
+
+
+def mapIndexersToShow(showObj):
+    mapped = {}
+
+    # init mapped indexers object
+    for indexer in sickbeard.indexerApi().indexers:
+        mapped[indexer] = showObj.indexerid if int(indexer) == int(showObj.indexer) else 0
+
+    myDB = db.DBConnection()
+    sqlResults = myDB.select(
+        "SELECT * FROM indexer_mapping WHERE indexer_id = ? AND indexer = ?",
+        [showObj.indexerid, showObj.indexer])
+
+    # for each mapped entry
+    for curResult in sqlResults:
+        nlist = [i for i in curResult if i is not None]
+        # Check if its mapped with both tvdb and tvrage.
+        if len(nlist) >= 4:
+            logger.log(u"Found indexer mapping in cache for show: " + showObj.name, logger.DEBUG)
+            mapped[int(curResult['mindexer'])] = int(curResult['mindexer_id'])
+            return mapped
+    else:
+        sql_l = []
+        for indexer in sickbeard.indexerApi().indexers:
+            if indexer == showObj.indexer:
+                mapped[indexer] = showObj.indexerid
+                continue
+
+            lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
+            lINDEXER_API_PARMS['custom_ui'] = classes.ShowListUI
+            t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
+
+            try:
+                mapped_show = t[showObj.name]
+            except Exception:
+                logger.log(u"Unable to map " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
+                    indexer).name + " for show: " + showObj.name + ", skipping it", logger.DEBUG)
+                continue
+
+            if mapped_show and len(mapped_show) == 1:
+                logger.log(u"Mapping " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
+                    indexer).name + " for show: " + showObj.name, logger.DEBUG)
+
+                mapped[indexer] = int(mapped_show[0]['id'])
+
+                logger.log(u"Adding indexer mapping to DB for show: " + showObj.name, logger.DEBUG)
+
+                sql_l.append([
+                    "INSERT OR IGNORE INTO indexer_mapping (indexer_id, indexer, mindexer_id, mindexer) VALUES (?,?,?,?)",
+                    [showObj.indexerid, showObj.indexer, int(mapped_show[0]['id']), indexer]])
+
+        if len(sql_l) > 0:
+            myDB = db.DBConnection()
+            myDB.mass_action(sql_l)
+
+    return mapped
+
+
+def touchFile(fname, atime=None):
+    if None != atime:
+        try:
+            with file(fname, 'a'):
+                os.utime(fname, (atime, atime))
+                return True
+        except Exception as e:
+            if e.errno == errno.ENOSYS:
+                logger.log(u"File air date stamping not available on your OS", logger.DEBUG)
+            elif e.errno == errno.EACCES:
+                logger.log(u"File air date stamping failed(Permission denied). Check permissions for file: {0}".format(fname), logger.ERROR)
+            else:
+                logger.log(u"File air date stamping failed. The error is: {0} and the message is: {1}.".format(e.errno, e.strerror), logger.ERROR)
+            pass
+
+    return False
+
+
+def _getTempDir():
+    import getpass
+
+    """Returns the [system temp dir]/tvdb_api-u501 (or
+    tvdb_api-myuser)
+    """
+    if hasattr(os, 'getuid'):
+        uid = "u%d" % (os.getuid())
+    else:
+        # For Windows
+        try:
+            uid = getpass.getuser()
+        except ImportError:
+            return os.path.join(tempfile.gettempdir(), "sickrage")
+
+    return os.path.join(tempfile.gettempdir(), "sickrage-%s" % (uid))
+
+def getURL(url, post_data=None, params=None, headers={}, timeout=30, session=None, json=False, proxyGlypeProxySSLwarning=None):
+    """
+    Returns a byte-string retrieved from the url provider.
+    """
+
+    # request session
+    cache_dir = sickbeard.CACHE_DIR or _getTempDir()
+    session = CacheControl(sess=session, cache=caches.FileCache(os.path.join(cache_dir, 'sessions')))
+
+    # request session headers
+    session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
+    session.headers.update(headers)
+
+    # request session ssl verify
+    session.verify = False
+
+    # request session paramaters
+    session.params = params
+
+    try:
+        # request session proxies
+        if sickbeard.PROXY_SETTING:
+            logger.log("Using proxy for url: " + url, logger.DEBUG)
+            session.proxies = {
+                "http": sickbeard.PROXY_SETTING,
+                "https": sickbeard.PROXY_SETTING,
+            }
+
+        # decide if we get or post data to server
+        if post_data:
+            resp = session.post(url, data=post_data, timeout=timeout)
+        else:
+            resp = session.get(url, timeout=timeout)
+
+        if not resp.ok:
+            logger.log(u"Requested url " + url + " returned status code is " + str(
+                resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.DEBUG)
+            return
+
+        if proxyGlypeProxySSLwarning is not None:
+            if re.search('The site you are attempting to browse is on a secure connection', resp.text):
+                resp = session.get(proxyGlypeProxySSLwarning)
+
+                if not resp.ok:
+                    logger.log(u"GlypeProxySSLwarning: Requested url " + url + " returned status code is " + str(
+                        resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.DEBUG)
+                    return
+
+    except requests.exceptions.HTTPError, e:
+        logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
+        return
+    except requests.exceptions.ConnectionError, e:
+        logger.log(u"Connection error " + str(e.message) + " while loading URL " + url, logger.WARNING)
+        return
+    except requests.exceptions.Timeout, e:
+        logger.log(u"Connection timed out " + str(e.message) + " while loading URL " + url, logger.WARNING)
+        return
+    except Exception:
+        logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING)
+        return
+
+    return resp.content if not json else resp.json()
+
+def download_file(url, filename, session=None):
+    # create session
+    cache_dir = sickbeard.CACHE_DIR or _getTempDir()
+    session = CacheControl(sess=session, cache=caches.FileCache(os.path.join(cache_dir, 'sessions')))
+
+    # request session headers
+    session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
+
+    # request session ssl verify
+    session.verify = False
+
+    # request session streaming
+    session.stream = True
+
+    # request session proxies
+    if sickbeard.PROXY_SETTING:
+        logger.log("Using proxy for url: " + url, logger.DEBUG)
+        session.proxies = {
+            "http": sickbeard.PROXY_SETTING,
+            "https": sickbeard.PROXY_SETTING,
+        }
+
+    try:
+        resp = session.get(url)
+        if not resp.ok:
+            logger.log(u"Requested url " + url + " returned status code is " + str(
+                resp.status_code) + ': ' + clients.http_error_code[resp.status_code], logger.DEBUG)
+            return False
+
+        with open(filename, 'wb') as fp:
+            for chunk in resp.iter_content(chunk_size=1024):
+                if chunk:
+                    fp.write(chunk)
+                    fp.flush()
+
+        chmodAsParent(filename)
+    except requests.exceptions.HTTPError, e:
+        _remove_file_failed(filename)
+        logger.log(u"HTTP error " + str(e.errno) + " while loading URL " + url, logger.WARNING)
+        return False
+    except requests.exceptions.ConnectionError, e:
+        _remove_file_failed(filename)
+        logger.log(u"Connection error " + str(e.message) + " while loading URL " + url, logger.WARNING)
+        return False
+    except requests.exceptions.Timeout, e:
+        _remove_file_failed(filename)
+        logger.log(u"Connection timed out " + str(e.message) + " while loading URL " + url, logger.WARNING)
+        return False
+    except EnvironmentError, e:
+        _remove_file_failed(filename)
+        logger.log(u"Unable to save the file: " + ex(e), logger.ERROR)
+        return False
+    except Exception:
+        _remove_file_failed(filename)
+        logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING)
+        return False
+
+    return True
+
+
+def clearCache(force=False):
+    update_datetime = datetime.datetime.now()
+
+    # clean out cache directory, remove everything > 12 hours old
+    if sickbeard.CACHE_DIR:
+        logger.log(u"Trying to clean cache folder " + sickbeard.CACHE_DIR)
+
+        # Does our cache_dir exists
+        if not ek.ek(os.path.isdir, sickbeard.CACHE_DIR):
+            logger.log(u"Can't clean " + sickbeard.CACHE_DIR + " if it doesn't exist", logger.WARNING)
+        else:
+            max_age = datetime.timedelta(hours=12)
+
+            # Get all our cache files
+            exclude = ['rss', 'images']
+            for cache_root, cache_dirs, cache_files in os.walk(sickbeard.CACHE_DIR, topdown=True):
+                cache_dirs[:] = [d for d in cache_dirs if d not in exclude]
+
+                for file in cache_files:
+                    cache_file = ek.ek(os.path.join, cache_root, file)
+
+                    if ek.ek(os.path.isfile, cache_file):
+                        cache_file_modified = datetime.datetime.fromtimestamp(
+                            ek.ek(os.path.getmtime, cache_file))
+
+                        if force or (update_datetime - cache_file_modified > max_age):
+                            try:
+                                ek.ek(os.remove, cache_file)
+                            except OSError, e:
+                                logger.log(u"Unable to clean " + cache_root + ": " + repr(e) + " / " + str(e),
+                                           logger.WARNING)
+                                break
+
+def get_size(start_path='.'):
+
+    total_size = 0
+    for dirpath, dirnames, filenames in ek.ek(os.walk, start_path):
+        for f in filenames:
+            fp = ek.ek(os.path.join, dirpath, f)
+            total_size += ek.ek(os.path.getsize, fp)
+    return total_size
+
+def generateApiKey():
+    """ Return a new randomized API_KEY
+    """
+
+    try:
+        from hashlib import md5
+    except ImportError:
+        from md5 import md5
+
+    # Create some values to seed md5
+    t = str(time.time())
+    r = str(random.random())
+
+    # Create the md5 instance and give it the current time
+    m = md5(t)
+
+    # Update the md5 instance with the random variable
+    m.update(r)
+
+    # Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
+    logger.log(u"New API generated")
+    return m.hexdigest()
+
+def pretty_filesize(file_bytes):
+    file_bytes = float(file_bytes)
+    if file_bytes >= 1099511627776:
+        terabytes = file_bytes / 1099511627776
+        size = '%.2f TB' % terabytes
+    elif file_bytes >= 1073741824:
+        gigabytes = file_bytes / 1073741824
+        size = '%.2f GB' % gigabytes
+    elif file_bytes >= 1048576:
+        megabytes = file_bytes / 1048576
+        size = '%.2f MB' % megabytes
+    elif file_bytes >= 1024:
+        kilobytes = file_bytes / 1024
+        size = '%.2f KB' % kilobytes
+    else:
+        size = '%.2f b' % file_bytes
+
+    return size
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
+
+def remove_article(text=''):
+    return re.sub(r'(?i)^(?:(?:A(?!\s+to)n?)|The)\s(\w)', r'\1', text)
+
+def generateCookieSecret():
+
+    return base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes)
+
+def verify_freespace(src, dest, oldfile=None):
+    """ Checks if the target system has enough free space to copy or move a file,
+    Returns true if there is, False if there isn't.
+    Also returns True if the OS doesn't support this option
+    """
+    if not isinstance(oldfile, list):
+        oldfile = [oldfile]
+
+    logger.log("Trying to determine free space on destination drive", logger.DEBUG)
+    
+    if hasattr(os, 'statvfs'):  # POSIX
+        def disk_usage(path):
+            st = os.statvfs(path)
+            free = st.f_bavail * st.f_frsize
+            return free
+    
+    elif os.name == 'nt':       # Windows
+        import sys
+    
+        def disk_usage(path):
+            _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
+                               ctypes.c_ulonglong()
+            if sys.version_info >= (3,) or isinstance(path, unicode):
+                fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
+            else:
+                fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
+            ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
+            if ret == 0:
+                logger.log("Unable to determine free space, something went wrong", logger.WARNING)
+                raise ctypes.WinError()
+            return free.value
+    else:
+        logger.log("Unable to determine free space on your OS")
+        return True
+
+    if not ek.ek(os.path.isfile, src):
+        logger.log("A path to a file is required for the source. " + src + " is not a file.", logger.WARNING)
+        return False
+    
+    try:
+        diskfree = disk_usage(dest)
+    except:
+        logger.log("Unable to determine free space, so I will assume there is enough.", logger.WARNING)
+        return True
+    
+    neededspace = ek.ek(os.path.getsize, src)
+    
+    if oldfile:
+        for file in oldfile:
+            if os.path.isfile(file.location):
+                diskfree += ek.ek(os.path.getsize, file.location)
+        
+    if diskfree > neededspace:
+        return True
+    else:
+        logger.log("Not enough free space: Needed: " + str(neededspace) + " bytes (" + pretty_filesize(neededspace) + "), found: " + str(diskfree) + " bytes (" + pretty_filesize(diskfree) + ")", logger.WARNING)
+        return False  
\ No newline at end of file
diff --git a/sickbeard/indexers/indexer_api.py b/sickbeard/indexers/indexer_api.py
index cc2550e961960e1a6271a05a74d44c8811a72ae4..130dcdcf71db71604f5438db34b3807030be233f 100644
--- a/sickbeard/indexers/indexer_api.py
+++ b/sickbeard/indexers/indexer_api.py
@@ -36,7 +36,12 @@ class indexerApi(object):
     def config(self):
         if self.indexerID:
             return indexerConfig[self.indexerID]
-        return initConfig
+        _ = initConfig
+        if sickbeard.INDEXER_DEFAULT_LANGUAGE in _:
+            del _[_['valid_languages'].index(sickbeard.INDEXER_DEFAULT_LANGUAGE)]
+        _['valid_languages'].sort()
+        _['valid_languages'].insert(0, sickbeard.INDEXER_DEFAULT_LANGUAGE)
+        return _
 
     @property
     def name(self):
diff --git a/sickbeard/metadata/generic.py b/sickbeard/metadata/generic.py
index 6070c10950fb2d93547d24300e188f88bbaf4c1f..bd14875d003131092c3a2e624490ddc5851aa351 100644
--- a/sickbeard/metadata/generic.py
+++ b/sickbeard/metadata/generic.py
@@ -757,7 +757,7 @@ class GenericMetadata():
 
             lINDEXER_API_PARMS['banners'] = True
 
-            if indexer_lang and not indexer_lang == 'en':
+            if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                 lINDEXER_API_PARMS['language'] = indexer_lang
 
             if show_obj.dvdorder != 0:
@@ -827,7 +827,7 @@ class GenericMetadata():
 
             lINDEXER_API_PARMS['banners'] = True
 
-            if indexer_lang and not indexer_lang == 'en':
+            if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                 lINDEXER_API_PARMS['language'] = indexer_lang
 
             if show_obj.dvdorder != 0:
@@ -859,7 +859,7 @@ class GenericMetadata():
 
         # find the correct season in the TVDB and TVRAGE object and just copy the dict into our result dict
         for seasonArtID in seasonsArtObj.keys():
-            if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == 'en':
+            if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                 result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath']
 
         return result
@@ -884,7 +884,7 @@ class GenericMetadata():
 
             lINDEXER_API_PARMS['banners'] = True
 
-            if indexer_lang and not indexer_lang == 'en':
+            if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                 lINDEXER_API_PARMS['language'] = indexer_lang
 
             t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)
@@ -914,7 +914,7 @@ class GenericMetadata():
 
         # find the correct season in the TVDB and TVRAGE object and just copy the dict into our result dict
         for seasonArtID in seasonsArtObj.keys():
-            if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == 'en':
+            if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                 result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath']
 
         return result
diff --git a/sickbeard/metadata/kodi_12plus.py b/sickbeard/metadata/kodi_12plus.py
index 9a916b6d357a98ca1d09cceb007dd009b1f92663..1dba0d1730678ff7d099e39ee961a235ac39b91d 100644
--- a/sickbeard/metadata/kodi_12plus.py
+++ b/sickbeard/metadata/kodi_12plus.py
@@ -101,7 +101,7 @@ class KODI_12PlusMetadata(generic.GenericMetadata):
 
         lINDEXER_API_PARMS['actors'] = True
 
-        if indexer_lang and not indexer_lang == 'en':
+        if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
             lINDEXER_API_PARMS['language'] = indexer_lang
 
         if show_obj.dvdorder != 0:
@@ -222,7 +222,7 @@ class KODI_12PlusMetadata(generic.GenericMetadata):
 
         lINDEXER_API_PARMS['actors'] = True
 
-        if indexer_lang and not indexer_lang == 'en':
+        if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
             lINDEXER_API_PARMS['language'] = indexer_lang
 
         if ep_obj.show.dvdorder != 0:
diff --git a/sickbeard/metadata/mede8er.py b/sickbeard/metadata/mede8er.py
index b4a9d7f645a6efea07da282fcd7f225d406df168..0fe31761a6395f2d933b4807ef5de940d3940778 100644
--- a/sickbeard/metadata/mede8er.py
+++ b/sickbeard/metadata/mede8er.py
@@ -1,449 +1,449 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import datetime
-import os.path
-
-import sickbeard
-
-import mediabrowser
-
-from sickbeard import logger, exceptions, helpers
-from sickbeard.exceptions import ex
-from sickbeard import encodingKludge as ek
-
-try:
-    import xml.etree.cElementTree as etree
-except ImportError:
-    import elementtree.ElementTree as etree
-
-
-class Mede8erMetadata(mediabrowser.MediaBrowserMetadata):
-    """
-    Metadata generation class for Mede8er based on the MediaBrowser.
-
-    The following file structure is used:
-
-    show_root/series.xml                    (show metadata)
-    show_root/folder.jpg                    (poster)
-    show_root/fanart.jpg                    (fanart)
-    show_root/Season ##/folder.jpg          (season thumb)
-    show_root/Season ##/filename.ext        (*)
-    show_root/Season ##/filename.xml        (episode metadata)
-    show_root/Season ##/filename.jpg        (episode thumb)
-    """
-
-    def __init__(self,
-                 show_metadata=False,
-                 episode_metadata=False,
-                 fanart=False,
-                 poster=False,
-                 banner=False,
-                 episode_thumbnails=False,
-                 season_posters=False,
-                 season_banners=False,
-                 season_all_poster=False,
-                 season_all_banner=False):
-
-        mediabrowser.MediaBrowserMetadata.__init__(self,
-                                         show_metadata,
-                                         episode_metadata,
-                                         fanart,
-                                         poster,
-                                         banner,
-                                         episode_thumbnails,
-                                         season_posters,
-                                         season_banners,
-                                         season_all_poster,
-                                         season_all_banner)
-
-        self.name = "Mede8er"
-
-        self.fanart_name = "fanart.jpg"
-
-        # web-ui metadata template
-        # self.eg_show_metadata = "series.xml"
-        self.eg_episode_metadata = "Season##\\<i>filename</i>.xml"
-        self.eg_fanart = "fanart.jpg"
-        # self.eg_poster = "folder.jpg"
-        # self.eg_banner = "banner.jpg"
-        self.eg_episode_thumbnails = "Season##\\<i>filename</i>.jpg"
-        # self.eg_season_posters = "Season##\\folder.jpg"
-        # self.eg_season_banners = "Season##\\banner.jpg"
-        # self.eg_season_all_poster = "<i>not supported</i>"
-        # self.eg_season_all_banner = "<i>not supported</i>"
-
-    def get_episode_file_path(self, ep_obj):
-        return helpers.replaceExtension(ep_obj.location, self._ep_nfo_extension)
-
-    def get_episode_thumb_path(self, ep_obj):
-        return helpers.replaceExtension(ep_obj.location, 'jpg')
-
-    def _show_data(self, show_obj):
-        """
-        Creates an elementTree XML structure for a MediaBrowser-style series.xml
-        returns the resulting data object.
-
-        show_obj: a TVShow instance to create the NFO for
-        """
-
-        indexer_lang = show_obj.lang
-        lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy()
-
-        lINDEXER_API_PARMS['actors'] = True
-
-        if indexer_lang and not indexer_lang == 'en':
-            lINDEXER_API_PARMS['language'] = indexer_lang
-
-        if show_obj.dvdorder != 0:
-            lINDEXER_API_PARMS['dvdorder'] = True
-
-        t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)
-
-        rootNode = etree.Element("details")
-        tv_node = etree.SubElement(rootNode, "movie")
-        tv_node.attrib["isExtra"] = "false"
-        tv_node.attrib["isSet"] = "false"
-        tv_node.attrib["isTV"] = "true"
-
-        try:
-            myShow = t[int(show_obj.indexerid)]
-        except sickbeard.indexer_shownotfound:
-            logger.log(u"Unable to find show with id " + str(show_obj.indexerid) + " on tvdb, skipping it", logger.ERROR)
-            raise
-
-        except sickbeard.indexer_error:
-            logger.log(u"TVDB is down, can't use its data to make the NFO", logger.ERROR)
-            raise
-
-        # check for title and id
-        try:
-            if getattr(myShow, 'seriesname', None) == None or getattr(myShow, 'seriesname', "") == "" or getattr(myShow, 'id', None) == None or getattr(myShow, 'id', "") == "":
-                logger.log(u"Incomplete info for show with id " + str(show_obj.indexerid) + " on tvdb, skipping it", logger.ERROR)
-                return False
-        except sickbeard.indexer_attributenotfound:
-            logger.log(u"Incomplete info for show with id " + str(show_obj.indexerid) + " on tvdb, skipping it", logger.ERROR)
-            return False
-
-        SeriesName = etree.SubElement(tv_node, "title")
-        SeriesName.text = myShow['seriesname']
-        
-        Genres = etree.SubElement(tv_node, "genres")
-        if getattr(myShow, "genre", None) != None:
-            for genre in myShow['genre'].split('|'):
-                if genre and genre.strip():
-                    cur_genre = etree.SubElement(Genres, "Genre")
-                    cur_genre.text = genre.strip()
-
-        FirstAired = etree.SubElement(tv_node, "premiered")
-        if getattr(myShow, 'firstaired', None) != None:
-            FirstAired.text = myShow['firstaired']
-
-        year = etree.SubElement(tv_node, "year")
-        if getattr(myShow, "firstaired", None) != None:
-            try:
-                year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year)
-                if year_text:
-                    year.text = year_text
-            except:
-                pass
-        plot = etree.SubElement(tv_node, "plot")
-        if getattr(myShow, 'overview', None) is not None:
-            plot.text = myShow["overview"]
-
-        if getattr(myShow, 'rating', None) != None:
-            try:
-                rating = int((float(myShow['rating']) * 10))
-            except ValueError:
-                rating = 0
-            Rating = etree.SubElement(tv_node, "rating")
-            rating_text = str(rating)
-            if rating_text != None:
-                Rating.text = rating_text
-
-        Status = etree.SubElement(tv_node, "status")
-        if getattr(myShow, 'status', None) != None:
-            Status.text = myShow['status']
-
-        mpaa = etree.SubElement(tv_node, "mpaa")
-        if getattr(myShow, "contentrating", None) != None:
-            mpaa.text = myShow["contentrating"]
-
-        IMDB_ID = etree.SubElement(tv_node, "id")
-        if getattr(myShow, 'imdb_id', None) != None:
-            IMDB_ID.attrib["moviedb"] = "imdb"
-            IMDB_ID.text = myShow['imdb_id']
-
-        indexerid = etree.SubElement(tv_node, "indexerid")
-        if getattr(myShow, 'id', None) != None:
-            indexerid.text = myShow['id']
-
-        Runtime = etree.SubElement(tv_node, "runtime")
-        if getattr(myShow, 'runtime', None) != None:
-            Runtime.text = myShow['runtime']
-
-        cast = etree.SubElement(tv_node, "cast")
-
-        if getattr(myShow, '_actors', None) is not None:
-            for actor in myShow['_actors']:
-                cur_actor_name_text = getattr(actor, 'name', None)
-                if cur_actor_name_text != None and cur_actor_name_text.strip():
-                    cur_actor = etree.SubElement(cast, "actor")
-                    cur_actor.text = cur_actor_name_text.strip()
-
-        helpers.indentXML(rootNode)
-
-        data = etree.ElementTree(rootNode)
-
-        return data
-
-    def _ep_data(self, ep_obj):
-        """
-        Creates an elementTree XML structure for a MediaBrowser style episode.xml
-        and returns the resulting data object.
-
-        show_obj: a TVShow instance to create the NFO for
-        """
-
-        eps_to_write = [ep_obj] + ep_obj.relatedEps
-
-        indexer_lang = ep_obj.show.lang
-
-        try:
-            # There's gotta be a better way of doing this but we don't wanna
-            # change the language value elsewhere
-            lINDEXER_API_PARMS = sickbeard.indexerApi(ep_obj.show.indexer).api_params.copy()
-
-            if indexer_lang and not indexer_lang == 'en':
-                lINDEXER_API_PARMS['language'] = indexer_lang
-
-            if ep_obj.show.dvdorder != 0:
-                lINDEXER_API_PARMS['dvdorder'] = True
-
-            t = sickbeard.indexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS)
-            myShow = t[ep_obj.show.indexerid]
-        except sickbeard.indexer_shownotfound, e:
-            raise exceptions.ShowNotFoundException(e.message)
-        except sickbeard.indexer_error, e:
-            logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR)
-            return False
-
-        rootNode = etree.Element("details")
-        movie = etree.SubElement(rootNode, "movie")
-
-        movie.attrib["isExtra"] = "false"
-        movie.attrib["isSet"] = "false"
-        movie.attrib["isTV"] = "true"
-
-        # write an MediaBrowser XML containing info for all matching episodes
-        for curEpToWrite in eps_to_write:
-
-            try:
-                myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
-            except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
-                logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
-                return None
-
-            if curEpToWrite == ep_obj:
-                # root (or single) episode
-
-                # default to today's date for specials if firstaired is not set
-                if getattr(myEp, 'firstaired', None) == None and ep_obj.season == 0:
-                    myEp['firstaired'] = str(datetime.date.fromordinal(1))
-
-                if getattr(myEp, 'episodename', None) == None or getattr(myEp, 'firstaired', None) == None:
-                    return None
-
-                episode = movie
-
-                EpisodeName = etree.SubElement(episode, "title")
-                if curEpToWrite.name != None:
-                    EpisodeName.text = curEpToWrite.name
-                else:
-                    EpisodeName.text = ""
-
-                SeasonNumber = etree.SubElement(episode, "season")
-                SeasonNumber.text = str(curEpToWrite.season)
-
-                EpisodeNumber = etree.SubElement(episode, "episode")
-                EpisodeNumber.text = str(ep_obj.episode)
-
-                year = etree.SubElement(episode, "year")
-                if getattr(myShow, "firstaired", None) != None:
-                    try:
-                        year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year)
-                        if year_text:
-                            year.text = year_text
-                    except:
-                        pass
-
-                plot = etree.SubElement(episode, "plot")
-                if getattr(myShow, "overview", None) != None:
-                    plot.text = myShow["overview"]
-
-                Overview = etree.SubElement(episode, "episodeplot")
-                if curEpToWrite.description != None:
-                    Overview.text = curEpToWrite.description
-                else:
-                    Overview.text = ""
-
-                mpaa = etree.SubElement(episode, "mpaa")
-                if getattr(myShow, 'contentrating', None) is not None:
-                    mpaa.text = myShow["contentrating"]
-
-                if not ep_obj.relatedEps:
-                    if getattr(myEp, "rating", None) != None:
-                        try:
-                            rating = int((float(myEp['rating']) * 10))
-                        except ValueError:
-                            rating = 0
-                        Rating = etree.SubElement(episode, "rating")
-                        rating_text = str(rating)
-                        if rating_text != None:
-                            Rating.text = rating_text
-
-                director = etree.SubElement(episode, "director")
-                director_text = getattr(myEp, 'director', None)
-                if director_text != None:
-                    director.text = director_text
-
-                credits = etree.SubElement(episode, "credits")
-                credits_text = getattr(myEp, 'writer', None)
-                if credits_text != None:
-                    credits.text = credits_text
-
-                cast = etree.SubElement(episode, "cast")
-
-                if getattr(myShow, '_actors', None) is not None:
-                    for actor in myShow['_actors']:
-                        cur_actor_name_text = actor['name']
-
-                        if cur_actor_name_text != None and cur_actor_name_text.strip():
-                            cur_actor = etree.SubElement(cast, "actor")
-                            cur_actor.text = cur_actor_name_text.strip()
-
-            else:
-                # append data from (if any) related episodes
-
-                if curEpToWrite.name:
-                    if not EpisodeName.text:
-                        EpisodeName.text = curEpToWrite.name
-                    else:
-                        EpisodeName.text = EpisodeName.text + ", " + curEpToWrite.name
-
-                if curEpToWrite.description:
-                    if not Overview.text:
-                        Overview.text = curEpToWrite.description
-                    else:
-                        Overview.text = Overview.text + "\r" + curEpToWrite.description
-
-        helpers.indentXML(rootNode)
-        data = etree.ElementTree(rootNode)
-
-        return data
-
-    def write_show_file(self, show_obj):
-        """
-        Generates and writes show_obj's metadata under the given path to the
-        filename given by get_show_file_path()
-
-        show_obj: TVShow object for which to create the metadata
-
-        path: An absolute or relative path where we should put the file. Note that
-                the file name will be the default show_file_name.
-
-        Note that this method expects that _show_data will return an ElementTree
-        object. If your _show_data returns data in another format you'll need to
-        override this method.
-        """
-
-        data = self._show_data(show_obj)
-
-        if not data:
-            return False
-
-        nfo_file_path = self.get_show_file_path(show_obj)
-        nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
-
-        try:
-            if not ek.ek(os.path.isdir, nfo_file_dir):
-                logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
-                ek.ek(os.makedirs, nfo_file_dir)
-                helpers.chmodAsParent(nfo_file_dir)
-
-            logger.log(u"Writing show nfo file to " + nfo_file_path, logger.DEBUG)
-
-            nfo_file = ek.ek(open, nfo_file_path, 'w')
-
-            data.write(nfo_file, encoding="utf-8", xml_declaration=True)
-            nfo_file.close()
-            helpers.chmodAsParent(nfo_file_path)
-        except IOError, e:
-            logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
-                       logger.ERROR)
-            return False
-
-        return True
-    
-    def write_ep_file(self, ep_obj):
-        """
-        Generates and writes ep_obj's metadata under the given path with the
-        given filename root. Uses the episode's name with the extension in
-        _ep_nfo_extension.
-
-        ep_obj: TVEpisode object for which to create the metadata
-
-        file_name_path: The file name to use for this metadata. Note that the extension
-                will be automatically added based on _ep_nfo_extension. This should
-                include an absolute path.
-
-        Note that this method expects that _ep_data will return an ElementTree
-        object. If your _ep_data returns data in another format you'll need to
-        override this method.
-        """
-
-        data = self._ep_data(ep_obj)
-
-        if not data:
-            return False
-
-        nfo_file_path = self.get_episode_file_path(ep_obj)
-        nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
-
-        try:
-            if not ek.ek(os.path.isdir, nfo_file_dir):
-                logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
-                ek.ek(os.makedirs, nfo_file_dir)
-                helpers.chmodAsParent(nfo_file_dir)
-
-            logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG)
-
-            nfo_file = ek.ek(open, nfo_file_path, 'w')
-
-            data.write(nfo_file, encoding="utf-8", xml_declaration = True)
-            nfo_file.close()
-            helpers.chmodAsParent(nfo_file_path)
-        except IOError, e:
-            logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
-                       logger.ERROR)
-            return False
-
-        return True
-
-# present a standard "interface" from the module
-metadata_class = Mede8erMetadata
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+import os.path
+
+import sickbeard
+
+import mediabrowser
+
+from sickbeard import logger, exceptions, helpers
+from sickbeard.exceptions import ex
+from sickbeard import encodingKludge as ek
+
+try:
+    import xml.etree.cElementTree as etree
+except ImportError:
+    import elementtree.ElementTree as etree
+
+
+class Mede8erMetadata(mediabrowser.MediaBrowserMetadata):
+    """
+    Metadata generation class for Mede8er based on the MediaBrowser.
+
+    The following file structure is used:
+
+    show_root/series.xml                    (show metadata)
+    show_root/folder.jpg                    (poster)
+    show_root/fanart.jpg                    (fanart)
+    show_root/Season ##/folder.jpg          (season thumb)
+    show_root/Season ##/filename.ext        (*)
+    show_root/Season ##/filename.xml        (episode metadata)
+    show_root/Season ##/filename.jpg        (episode thumb)
+    """
+
+    def __init__(self,
+                 show_metadata=False,
+                 episode_metadata=False,
+                 fanart=False,
+                 poster=False,
+                 banner=False,
+                 episode_thumbnails=False,
+                 season_posters=False,
+                 season_banners=False,
+                 season_all_poster=False,
+                 season_all_banner=False):
+
+        mediabrowser.MediaBrowserMetadata.__init__(self,
+                                         show_metadata,
+                                         episode_metadata,
+                                         fanart,
+                                         poster,
+                                         banner,
+                                         episode_thumbnails,
+                                         season_posters,
+                                         season_banners,
+                                         season_all_poster,
+                                         season_all_banner)
+
+        self.name = "Mede8er"
+
+        self.fanart_name = "fanart.jpg"
+
+        # web-ui metadata template
+        # self.eg_show_metadata = "series.xml"
+        self.eg_episode_metadata = "Season##\\<i>filename</i>.xml"
+        self.eg_fanart = "fanart.jpg"
+        # self.eg_poster = "folder.jpg"
+        # self.eg_banner = "banner.jpg"
+        self.eg_episode_thumbnails = "Season##\\<i>filename</i>.jpg"
+        # self.eg_season_posters = "Season##\\folder.jpg"
+        # self.eg_season_banners = "Season##\\banner.jpg"
+        # self.eg_season_all_poster = "<i>not supported</i>"
+        # self.eg_season_all_banner = "<i>not supported</i>"
+
+    def get_episode_file_path(self, ep_obj):
+        return helpers.replaceExtension(ep_obj.location, self._ep_nfo_extension)
+
+    def get_episode_thumb_path(self, ep_obj):
+        return helpers.replaceExtension(ep_obj.location, 'jpg')
+
+    def _show_data(self, show_obj):
+        """
+        Creates an elementTree XML structure for a MediaBrowser-style series.xml
+        returns the resulting data object.
+
+        show_obj: a TVShow instance to create the NFO for
+        """
+
+        indexer_lang = show_obj.lang
+        lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy()
+
+        lINDEXER_API_PARMS['actors'] = True
+
+        if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
+            lINDEXER_API_PARMS['language'] = indexer_lang
+
+        if show_obj.dvdorder != 0:
+            lINDEXER_API_PARMS['dvdorder'] = True
+
+        t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)
+
+        rootNode = etree.Element("details")
+        tv_node = etree.SubElement(rootNode, "movie")
+        tv_node.attrib["isExtra"] = "false"
+        tv_node.attrib["isSet"] = "false"
+        tv_node.attrib["isTV"] = "true"
+
+        try:
+            myShow = t[int(show_obj.indexerid)]
+        except sickbeard.indexer_shownotfound:
+            logger.log(u"Unable to find show with id " + str(show_obj.indexerid) + " on tvdb, skipping it", logger.ERROR)
+            raise
+
+        except sickbeard.indexer_error:
+            logger.log(u"TVDB is down, can't use its data to make the NFO", logger.ERROR)
+            raise
+
+        # check for title and id
+        try:
+            if getattr(myShow, 'seriesname', None) == None or getattr(myShow, 'seriesname', "") == "" or getattr(myShow, 'id', None) == None or getattr(myShow, 'id', "") == "":
+                logger.log(u"Incomplete info for show with id " + str(show_obj.indexerid) + " on tvdb, skipping it", logger.ERROR)
+                return False
+        except sickbeard.indexer_attributenotfound:
+            logger.log(u"Incomplete info for show with id " + str(show_obj.indexerid) + " on tvdb, skipping it", logger.ERROR)
+            return False
+
+        SeriesName = etree.SubElement(tv_node, "title")
+        SeriesName.text = myShow['seriesname']
+        
+        Genres = etree.SubElement(tv_node, "genres")
+        if getattr(myShow, "genre", None) != None:
+            for genre in myShow['genre'].split('|'):
+                if genre and genre.strip():
+                    cur_genre = etree.SubElement(Genres, "Genre")
+                    cur_genre.text = genre.strip()
+
+        FirstAired = etree.SubElement(tv_node, "premiered")
+        if getattr(myShow, 'firstaired', None) != None:
+            FirstAired.text = myShow['firstaired']
+
+        year = etree.SubElement(tv_node, "year")
+        if getattr(myShow, "firstaired", None) != None:
+            try:
+                year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year)
+                if year_text:
+                    year.text = year_text
+            except:
+                pass
+        plot = etree.SubElement(tv_node, "plot")
+        if getattr(myShow, 'overview', None) is not None:
+            plot.text = myShow["overview"]
+
+        if getattr(myShow, 'rating', None) != None:
+            try:
+                rating = int((float(myShow['rating']) * 10))
+            except ValueError:
+                rating = 0
+            Rating = etree.SubElement(tv_node, "rating")
+            rating_text = str(rating)
+            if rating_text != None:
+                Rating.text = rating_text
+
+        Status = etree.SubElement(tv_node, "status")
+        if getattr(myShow, 'status', None) != None:
+            Status.text = myShow['status']
+
+        mpaa = etree.SubElement(tv_node, "mpaa")
+        if getattr(myShow, "contentrating", None) != None:
+            mpaa.text = myShow["contentrating"]
+
+        IMDB_ID = etree.SubElement(tv_node, "id")
+        if getattr(myShow, 'imdb_id', None) != None:
+            IMDB_ID.attrib["moviedb"] = "imdb"
+            IMDB_ID.text = myShow['imdb_id']
+
+        indexerid = etree.SubElement(tv_node, "indexerid")
+        if getattr(myShow, 'id', None) != None:
+            indexerid.text = myShow['id']
+
+        Runtime = etree.SubElement(tv_node, "runtime")
+        if getattr(myShow, 'runtime', None) != None:
+            Runtime.text = myShow['runtime']
+
+        cast = etree.SubElement(tv_node, "cast")
+
+        if getattr(myShow, '_actors', None) is not None:
+            for actor in myShow['_actors']:
+                cur_actor_name_text = getattr(actor, 'name', None)
+                if cur_actor_name_text != None and cur_actor_name_text.strip():
+                    cur_actor = etree.SubElement(cast, "actor")
+                    cur_actor.text = cur_actor_name_text.strip()
+
+        helpers.indentXML(rootNode)
+
+        data = etree.ElementTree(rootNode)
+
+        return data
+
+    def _ep_data(self, ep_obj):
+        """
+        Creates an elementTree XML structure for a MediaBrowser style episode.xml
+        and returns the resulting data object.
+
+        show_obj: a TVShow instance to create the NFO for
+        """
+
+        eps_to_write = [ep_obj] + ep_obj.relatedEps
+
+        indexer_lang = ep_obj.show.lang
+
+        try:
+            # There's gotta be a better way of doing this but we don't wanna
+            # change the language value elsewhere
+            lINDEXER_API_PARMS = sickbeard.indexerApi(ep_obj.show.indexer).api_params.copy()
+
+            if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
+                lINDEXER_API_PARMS['language'] = indexer_lang
+
+            if ep_obj.show.dvdorder != 0:
+                lINDEXER_API_PARMS['dvdorder'] = True
+
+            t = sickbeard.indexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS)
+            myShow = t[ep_obj.show.indexerid]
+        except sickbeard.indexer_shownotfound, e:
+            raise exceptions.ShowNotFoundException(e.message)
+        except sickbeard.indexer_error, e:
+            logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR)
+            return False
+
+        rootNode = etree.Element("details")
+        movie = etree.SubElement(rootNode, "movie")
+
+        movie.attrib["isExtra"] = "false"
+        movie.attrib["isSet"] = "false"
+        movie.attrib["isTV"] = "true"
+
+        # write an MediaBrowser XML containing info for all matching episodes
+        for curEpToWrite in eps_to_write:
+
+            try:
+                myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
+            except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
+                logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
+                return None
+
+            if curEpToWrite == ep_obj:
+                # root (or single) episode
+
+                # default to today's date for specials if firstaired is not set
+                if getattr(myEp, 'firstaired', None) == None and ep_obj.season == 0:
+                    myEp['firstaired'] = str(datetime.date.fromordinal(1))
+
+                if getattr(myEp, 'episodename', None) == None or getattr(myEp, 'firstaired', None) == None:
+                    return None
+
+                episode = movie
+
+                EpisodeName = etree.SubElement(episode, "title")
+                if curEpToWrite.name != None:
+                    EpisodeName.text = curEpToWrite.name
+                else:
+                    EpisodeName.text = ""
+
+                SeasonNumber = etree.SubElement(episode, "season")
+                SeasonNumber.text = str(curEpToWrite.season)
+
+                EpisodeNumber = etree.SubElement(episode, "episode")
+                EpisodeNumber.text = str(ep_obj.episode)
+
+                year = etree.SubElement(episode, "year")
+                if getattr(myShow, "firstaired", None) != None:
+                    try:
+                        year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year)
+                        if year_text:
+                            year.text = year_text
+                    except:
+                        pass
+
+                plot = etree.SubElement(episode, "plot")
+                if getattr(myShow, "overview", None) != None:
+                    plot.text = myShow["overview"]
+
+                Overview = etree.SubElement(episode, "episodeplot")
+                if curEpToWrite.description != None:
+                    Overview.text = curEpToWrite.description
+                else:
+                    Overview.text = ""
+
+                mpaa = etree.SubElement(episode, "mpaa")
+                if getattr(myShow, 'contentrating', None) is not None:
+                    mpaa.text = myShow["contentrating"]
+
+                if not ep_obj.relatedEps:
+                    if getattr(myEp, "rating", None) != None:
+                        try:
+                            rating = int((float(myEp['rating']) * 10))
+                        except ValueError:
+                            rating = 0
+                        Rating = etree.SubElement(episode, "rating")
+                        rating_text = str(rating)
+                        if rating_text != None:
+                            Rating.text = rating_text
+
+                director = etree.SubElement(episode, "director")
+                director_text = getattr(myEp, 'director', None)
+                if director_text != None:
+                    director.text = director_text
+
+                credits = etree.SubElement(episode, "credits")
+                credits_text = getattr(myEp, 'writer', None)
+                if credits_text != None:
+                    credits.text = credits_text
+
+                cast = etree.SubElement(episode, "cast")
+
+                if getattr(myShow, '_actors', None) is not None:
+                    for actor in myShow['_actors']:
+                        cur_actor_name_text = actor['name']
+
+                        if cur_actor_name_text != None and cur_actor_name_text.strip():
+                            cur_actor = etree.SubElement(cast, "actor")
+                            cur_actor.text = cur_actor_name_text.strip()
+
+            else:
+                # append data from (if any) related episodes
+
+                if curEpToWrite.name:
+                    if not EpisodeName.text:
+                        EpisodeName.text = curEpToWrite.name
+                    else:
+                        EpisodeName.text = EpisodeName.text + ", " + curEpToWrite.name
+
+                if curEpToWrite.description:
+                    if not Overview.text:
+                        Overview.text = curEpToWrite.description
+                    else:
+                        Overview.text = Overview.text + "\r" + curEpToWrite.description
+
+        helpers.indentXML(rootNode)
+        data = etree.ElementTree(rootNode)
+
+        return data
+
+    def write_show_file(self, show_obj):
+        """
+        Generates and writes show_obj's metadata under the given path to the
+        filename given by get_show_file_path()
+
+        show_obj: TVShow object for which to create the metadata
+
+        path: An absolute or relative path where we should put the file. Note that
+                the file name will be the default show_file_name.
+
+        Note that this method expects that _show_data will return an ElementTree
+        object. If your _show_data returns data in another format you'll need to
+        override this method.
+        """
+
+        data = self._show_data(show_obj)
+
+        if not data:
+            return False
+
+        nfo_file_path = self.get_show_file_path(show_obj)
+        nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
+
+        try:
+            if not ek.ek(os.path.isdir, nfo_file_dir):
+                logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
+                ek.ek(os.makedirs, nfo_file_dir)
+                helpers.chmodAsParent(nfo_file_dir)
+
+            logger.log(u"Writing show nfo file to " + nfo_file_path, logger.DEBUG)
+
+            nfo_file = ek.ek(open, nfo_file_path, 'w')
+
+            data.write(nfo_file, encoding="utf-8", xml_declaration=True)
+            nfo_file.close()
+            helpers.chmodAsParent(nfo_file_path)
+        except IOError, e:
+            logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
+                       logger.ERROR)
+            return False
+
+        return True
+    
+    def write_ep_file(self, ep_obj):
+        """
+        Generates and writes ep_obj's metadata under the given path with the
+        given filename root. Uses the episode's name with the extension in
+        _ep_nfo_extension.
+
+        ep_obj: TVEpisode object for which to create the metadata
+
+        file_name_path: The file name to use for this metadata. Note that the extension
+                will be automatically added based on _ep_nfo_extension. This should
+                include an absolute path.
+
+        Note that this method expects that _ep_data will return an ElementTree
+        object. If your _ep_data returns data in another format you'll need to
+        override this method.
+        """
+
+        data = self._ep_data(ep_obj)
+
+        if not data:
+            return False
+
+        nfo_file_path = self.get_episode_file_path(ep_obj)
+        nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
+
+        try:
+            if not ek.ek(os.path.isdir, nfo_file_dir):
+                logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
+                ek.ek(os.makedirs, nfo_file_dir)
+                helpers.chmodAsParent(nfo_file_dir)
+
+            logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG)
+
+            nfo_file = ek.ek(open, nfo_file_path, 'w')
+
+            data.write(nfo_file, encoding="utf-8", xml_declaration = True)
+            nfo_file.close()
+            helpers.chmodAsParent(nfo_file_path)
+        except IOError, e:
+            logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
+                       logger.ERROR)
+            return False
+
+        return True
+
+# present a standard "interface" from the module
+metadata_class = Mede8erMetadata
diff --git a/sickbeard/metadata/mediabrowser.py b/sickbeard/metadata/mediabrowser.py
index b35c076fcb9a026f2a5d8b01addb53ae528f143c..f54c0c4513a1693cdfe97091227e2cdfdf3d7d86 100644
--- a/sickbeard/metadata/mediabrowser.py
+++ b/sickbeard/metadata/mediabrowser.py
@@ -232,7 +232,7 @@ class MediaBrowserMetadata(generic.GenericMetadata):
 
         lINDEXER_API_PARMS['actors'] = True
 
-        if indexer_lang and not indexer_lang == 'en':
+        if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
             lINDEXER_API_PARMS['language'] = indexer_lang
 
         if show_obj.dvdorder != 0:
@@ -402,7 +402,7 @@ class MediaBrowserMetadata(generic.GenericMetadata):
 
             lINDEXER_API_PARMS['actors'] = True
 
-            if indexer_lang and not indexer_lang == 'en':
+            if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                 lINDEXER_API_PARMS['language'] = indexer_lang
 
             if ep_obj.show.dvdorder != 0:
@@ -504,7 +504,7 @@ class MediaBrowserMetadata(generic.GenericMetadata):
                 try:
                     Language.text = myEp['language']
                 except:
-                    Language.text = 'en'  # tvrage api doesn't provide language so we must assume a value here
+                    Language.text = sickbeard.INDEXER_DEFAULT_LANGUAGE  # tvrage api doesn't provide language so we must assume a value here
 
                 thumb = etree.SubElement(episode, "filename")
                 # TODO: See what this is needed for.. if its still needed
diff --git a/sickbeard/metadata/tivo.py b/sickbeard/metadata/tivo.py
index 6e7c87e603eaede7cd7aee370eabcd15f5d0ac42..7744c585154e068e2852a7aff542806176a6642a 100644
--- a/sickbeard/metadata/tivo.py
+++ b/sickbeard/metadata/tivo.py
@@ -173,7 +173,7 @@ class TIVOMetadata(generic.GenericMetadata):
 
             lINDEXER_API_PARMS['actors'] = True
 
-            if indexer_lang and not indexer_lang == 'en':
+            if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                 lINDEXER_API_PARMS['language'] = indexer_lang
 
             if ep_obj.show.dvdorder != 0:
diff --git a/sickbeard/metadata/wdtv.py b/sickbeard/metadata/wdtv.py
index fe4ed63b606d80474231125b2542d76b4a4ab988..5fba6dea5f1aef1e66d2467aa0917bc72b6efd02 100644
--- a/sickbeard/metadata/wdtv.py
+++ b/sickbeard/metadata/wdtv.py
@@ -184,7 +184,7 @@ class WDTVMetadata(generic.GenericMetadata):
 
             lINDEXER_API_PARMS['actors'] = True
 
-            if indexer_lang and not indexer_lang == 'en':
+            if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                 lINDEXER_API_PARMS['language'] = indexer_lang
 
             if ep_obj.show.dvdorder != 0:
diff --git a/sickbeard/name_parser/regexes.py b/sickbeard/name_parser/regexes.py
index b1a9d6eba4212883d85be187a75d46c426d365dc..a944e3d59f9dc7505ed9086a35c30d399bf9b77b 100644
--- a/sickbeard/name_parser/regexes.py
+++ b/sickbeard/name_parser/regexes.py
@@ -395,6 +395,13 @@ anime_regexes = [
      '''
     ),
 
+    ('anime_WarB3asT',
+     # 003. Show Name - Ep Name.ext
+     # 003-004. Show Name - Ep Name.ext
+     '''
+     ^(?P<ep_ab_num>\d{3,4})(-(?P<extra_ab_ep_num>\d{3,4}))?\.\s+(?P<series_name>.+?)\s-\s.*
+     '''),
+
     ('anime_bare',
      # One Piece - 102
      # [ACX]_Wolf's_Spirit_001.mkv
diff --git a/sickbeard/notifiers/trakt.py b/sickbeard/notifiers/trakt.py
index abc845daf4f0d285c26554a88d2eced2d151ce86..23908d0755a08ceb228a83e82ece271a59d22239 100644
--- a/sickbeard/notifiers/trakt.py
+++ b/sickbeard/notifiers/trakt.py
@@ -219,7 +219,7 @@ class TraktNotifier:
 
         return post_data
 
-    def test_notify(self, username, password, disable_ssl):
+    def test_notify(self, username, password, disable_ssl, blacklist_name=None):
         """
         Sends a test notification to trakt with the given authentication info and returns a boolean
         representing success.
@@ -227,13 +227,23 @@ class TraktNotifier:
         api: The api string to use
         username: The username to use
         password: The password to use
+        blacklist_name: slug of trakt list used to hide not interested show
 
         Returns: True if the request succeeded, False otherwise
         """
         try:
             trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, username, password, disable_ssl, sickbeard.TRAKT_TIMEOUT)
             trakt_api.validateAccount()
-            return "Test notice sent successfully to Trakt"
+            if blacklist_name and blacklist_name is not None:
+                trakt_lists = trakt_api.traktRequest("users/" + username + "/lists")
+                found = False
+                for trakt_list in trakt_lists:
+                    if (trakt_list['ids']['slug'] == blacklist_name):
+                        return "Test notice sent successfully to Trakt"
+                if not found:
+                    return "Trakt blacklist doesn't exists"
+            else:
+                return "Test notice sent successfully to Trakt"
         except (traktException, traktAuthException, traktServerBusy) as e:
             logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
             return "Test notice failed to Trakt: %s" % ex(e)
diff --git a/sickbeard/postProcessor.py b/sickbeard/postProcessor.py
index 8bbe2922e7752304b6f93f7bdff6b7e8ed246acc..c75d4825d1bed49df0ebece78dc2f35b349bfd8f 100644
--- a/sickbeard/postProcessor.py
+++ b/sickbeard/postProcessor.py
@@ -1,1058 +1,1090 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
-import glob
-import fnmatch
-import os
-import re
-import subprocess
-import stat
-
-import sickbeard
-
-from sickbeard import db
-from sickbeard import common
-from sickbeard import exceptions
-from sickbeard import helpers
-from sickbeard import history
-from sickbeard import logger
-from sickbeard import notifiers
-from sickbeard import show_name_helpers
-from sickbeard import failed_history
-from sickbeard import name_cache
-
-from sickbeard import encodingKludge as ek
-from sickbeard.exceptions import ex
-
-from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-
-from lib import adba
-
-
-class PostProcessor(object):
-    """
-    A class which will process a media file according to the post processing settings in the config.
-    """
-
-    EXISTS_LARGER = 1
-    EXISTS_SAME = 2
-    EXISTS_SMALLER = 3
-    DOESNT_EXIST = 4
-
-    IGNORED_FILESTRINGS = ["/.AppleDouble/", ".DS_Store"]
-
-    def __init__(self, file_path, nzb_name=None, process_method=None, is_priority=None):
-        """
-        Creates a new post processor with the given file path and optionally an NZB name.
-
-        file_path: The path to the file to be processed
-        nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
-        """
-        # absolute path to the folder that is being processed
-        self.folder_path = ek.ek(os.path.dirname, ek.ek(os.path.abspath, file_path))
-
-        # full path to file
-        self.file_path = file_path
-
-        # file name only
-        self.file_name = ek.ek(os.path.basename, file_path)
-
-        # the name of the folder only
-        self.folder_name = ek.ek(os.path.basename, self.folder_path)
-
-        # name of the NZB that resulted in this folder
-        self.nzb_name = nzb_name
-
-        self.process_method = process_method if process_method else sickbeard.PROCESS_METHOD
-
-        self.in_history = False
-
-        self.release_group = None
-
-        self.release_name = None
-
-        self.is_proper = False
-
-        self.is_priority = is_priority
-
-        self.log = ''
-        
-        self.version = None
-
-    def _log(self, message, level=logger.INFO):
-        """
-        A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
-
-        message: The string to log (unicode)
-        level: The log level to use (optional)
-        """
-        logger.log(message, level)
-        self.log += message + '\n'
-
-    def _checkForExistingFile(self, existing_file):
-        """
-        Checks if a file exists already and if it does whether it's bigger or smaller than
-        the file we are post processing
-
-        existing_file: The file to compare to
-
-        Returns:
-            DOESNT_EXIST if the file doesn't exist
-            EXISTS_LARGER if the file exists and is larger than the file we are post processing
-            EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
-            EXISTS_SAME if the file exists and is the same size as the file we are post processing
-        """
-
-        if not existing_file:
-            self._log(u"There is no existing file so there's no worries about replacing it", logger.DEBUG)
-            return PostProcessor.DOESNT_EXIST
-
-        # if the new file exists, return the appropriate code depending on the size
-        if ek.ek(os.path.isfile, existing_file):
-
-            # see if it's bigger than our old file
-            if ek.ek(os.path.getsize, existing_file) > ek.ek(os.path.getsize, self.file_path):
-                self._log(u"File " + existing_file + " is larger than " + self.file_path, logger.DEBUG)
-                return PostProcessor.EXISTS_LARGER
-
-            elif ek.ek(os.path.getsize, existing_file) == ek.ek(os.path.getsize, self.file_path):
-                self._log(u"File " + existing_file + " is the same size as " + self.file_path, logger.DEBUG)
-                return PostProcessor.EXISTS_SAME
-
-            else:
-                self._log(u"File " + existing_file + " is smaller than " + self.file_path, logger.DEBUG)
-                return PostProcessor.EXISTS_SMALLER
-
-        else:
-            self._log(u"File " + existing_file + " doesn't exist so there's no worries about replacing it",
-                      logger.DEBUG)
-            return PostProcessor.DOESNT_EXIST
-
-    def list_associated_files(self, file_path, base_name_only=False, subtitles_only=False, subfolders=False):
-        """
-        For a given file path searches for files with the same name but different extension and returns their absolute paths
-
-        file_path: The file to check for associated files
-
-        base_name_only: False add extra '.' (conservative search) to file_path minus extension
-
-        Returns: A list containing all files which are associated to the given file
-        """
-        def recursive_glob(treeroot, pattern):
-            results = []
-            for base, dirs, files in os.walk(treeroot):
-                goodfiles = fnmatch.filter(files, pattern)
-                results.extend(os.path.join(base, f) for f in goodfiles)
-            return results
-
-        if not file_path:
-            return []
-
-        file_path_list = []
-
-        if subfolders:
-            base_name = ek.ek(os.path.basename, file_path).rpartition('.')[0]
-        else:
-            base_name = file_path.rpartition('.')[0]
-
-        if not base_name_only:
-            base_name = base_name + '.'
-
-        # don't strip it all and use cwd by accident
-        if not base_name:
-            return []
-
-        # don't confuse glob with chars we didn't mean to use
-        base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name)
-        
-        if subfolders:
-            filelist = ek.ek(recursive_glob, ek.ek(os.path.dirname, file_path),  base_name + '*')
-        else:
-            filelist = ek.ek(glob.glob, base_name + '*')
-        for associated_file_path in filelist:
-            # only add associated to list
-            if associated_file_path == file_path:
-                continue
-            # only list it if the only non-shared part is the extension or if it is a subtitle
-            if subtitles_only and not associated_file_path[len(associated_file_path) - 3:] in common.subtitleExtensions:
-                continue
-
-            # Exclude .rar files from associated list
-            if re.search('(^.+\.(rar|r\d+)$)', associated_file_path):
-                continue
-
-            if ek.ek(os.path.isfile, associated_file_path):
-                file_path_list.append(associated_file_path)
-
-        return file_path_list
-
-    def _delete(self, file_path, associated_files=False):
-        """
-        Deletes the file and optionally all associated files.
-
-        file_path: The file to delete
-        associated_files: True to delete all files which differ only by extension, False to leave them
-        """
-
-        if not file_path:
-            return
-
-        # figure out which files we want to delete
-        file_list = [file_path]
-        if associated_files:
-            file_list = file_list + self.list_associated_files(file_path, base_name_only=True, subfolders=True)
-
-        if not file_list:
-            self._log(u"There were no files associated with " + file_path + ", not deleting anything", logger.DEBUG)
-            return
-
-        # delete the file and any other files which we want to delete
-        for cur_file in file_list:
-            if ek.ek(os.path.isfile, cur_file):
-                self._log(u"Deleting file " + cur_file, logger.DEBUG)
-                # check first the read-only attribute
-                file_attribute = ek.ek(os.stat, cur_file)[0]
-                if (not file_attribute & stat.S_IWRITE):
-                    # File is read-only, so make it writeable
-                    self._log('Read only mode on file ' + cur_file + ' Will try to make it writeable', logger.DEBUG)
-                    try:
-                        ek.ek(os.chmod, cur_file, stat.S_IWRITE)
-                    except:
-                        self._log(u'Cannot change permissions of ' + cur_file, logger.WARNING)
-
-                ek.ek(os.remove, cur_file)
-
-                # do the library update for synoindex
-                notifiers.synoindex_notifier.deleteFile(cur_file)
-
-    def _combined_file_operation(self, file_path, new_path, new_base_name, associated_files=False, action=None,
-                                 subtitles=False):
-        """
-        Performs a generic operation (move or copy) on a file. Can rename the file as well as change its location,
-        and optionally move associated files too.
-
-        file_path: The full path of the media file to act on
-        new_path: Destination path where we want to move/copy the file to
-        new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
-        associated_files: Boolean, whether we should copy similarly-named files too
-        action: function that takes an old path and new path and does an operation with them (move/copy)
-        """
-
-        if not action:
-            self._log(u"Must provide an action for the combined file operation", logger.ERROR)
-            return
-
-        file_list = [file_path]
-        if associated_files:
-            file_list = file_list + self.list_associated_files(file_path)
-        elif subtitles:
-            file_list = file_list + self.list_associated_files(file_path, subtitles_only=True)
-
-        if not file_list:
-            self._log(u"There were no files associated with " + file_path + ", not moving anything", logger.DEBUG)
-            return
-
-        # create base name with file_path (media_file without .extension)
-        old_base_name = file_path.rpartition('.')[0]
-        old_base_name_length = len(old_base_name)
-
-        # deal with all files
-        for cur_file_path in file_list:
-
-            cur_file_name = ek.ek(os.path.basename, cur_file_path)
-
-            # get the extension without .
-            cur_extension = cur_file_path[old_base_name_length + 1:]
-
-            # check if file have subtitles language
-            if os.path.splitext(cur_extension)[1][1:] in common.subtitleExtensions:
-                cur_lang = os.path.splitext(cur_extension)[0]
-                if cur_lang in sickbeard.SUBTITLES_LANGUAGES:
-                    cur_extension = cur_lang + os.path.splitext(cur_extension)[1]
-
-            # replace .nfo with .nfo-orig to avoid conflicts
-            if cur_extension == 'nfo' and sickbeard.NFO_RENAME == True:
-                cur_extension = 'nfo-orig'
-
-            # If new base name then convert name
-            if new_base_name:
-                new_file_name = new_base_name + '.' + cur_extension
-            # if we're not renaming we still want to change extensions sometimes
-            else:
-                new_file_name = helpers.replaceExtension(cur_file_name, cur_extension)
-
-            if sickbeard.SUBTITLES_DIR and cur_extension in common.subtitleExtensions:
-                subs_new_path = ek.ek(os.path.join, new_path, sickbeard.SUBTITLES_DIR)
-                dir_exists = helpers.makeDir(subs_new_path)
-                if not dir_exists:
-                    logger.log(u"Unable to create subtitles folder " + subs_new_path, logger.ERROR)
-                else:
-                    helpers.chmodAsParent(subs_new_path)
-                new_file_path = ek.ek(os.path.join, subs_new_path, new_file_name)
-            else:
-                new_file_path = ek.ek(os.path.join, new_path, new_file_name)
-
-            action(cur_file_path, new_file_path)
-
-    def _move(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
-        """
-        file_path: The full path of the media file to move
-        new_path: Destination path where we want to move the file to
-        new_base_name: The base filename (no extension) to use during the move. Use None to keep the same name.
-        associated_files: Boolean, whether we should move similarly-named files too
-        """
-
-        def _int_move(cur_file_path, new_file_path):
-
-            self._log(u"Moving file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
-            try:
-                helpers.moveFile(cur_file_path, new_file_path)
-                helpers.chmodAsParent(new_file_path)
-            except (IOError, OSError), e:
-                self._log("Unable to move file " + cur_file_path + " to " + new_file_path + ": " + str(e), logger.ERROR)
-                raise
-
-        self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move,
-                                      subtitles=subtitles)
-
-    def _copy(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
-        """
-        file_path: The full path of the media file to copy
-        new_path: Destination path where we want to copy the file to
-        new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
-        associated_files: Boolean, whether we should copy similarly-named files too
-        """
-
-        def _int_copy(cur_file_path, new_file_path):
-
-            self._log(u"Copying file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
-            try:
-                helpers.copyFile(cur_file_path, new_file_path)
-                helpers.chmodAsParent(new_file_path)
-            except (IOError, OSError), e:
-                logger.log("Unable to copy file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
-                raise
-
-        self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_copy,
-                                      subtitles=subtitles)
-
-
-    def _hardlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
-        """
-        file_path: The full path of the media file to move
-        new_path: Destination path where we want to create a hard linked file
-        new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
-        associated_files: Boolean, whether we should move similarly-named files too
-        """
-
-        def _int_hard_link(cur_file_path, new_file_path):
-
-            self._log(u"Hard linking file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
-            try:
-                helpers.hardlinkFile(cur_file_path, new_file_path)
-                helpers.chmodAsParent(new_file_path)
-            except (IOError, OSError), e:
-                self._log("Unable to link file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
-                raise
-
-        self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_hard_link)
-
-    def _moveAndSymlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
-        """
-        file_path: The full path of the media file to move
-        new_path: Destination path where we want to move the file to create a symbolic link to
-        new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
-        associated_files: Boolean, whether we should move similarly-named files too
-        """
-
-        def _int_move_and_sym_link(cur_file_path, new_file_path):
-
-            self._log(u"Moving then symbolic linking file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
-            try:
-                helpers.moveAndSymlinkFile(cur_file_path, new_file_path)
-                helpers.chmodAsParent(new_file_path)
-            except (IOError, OSError), e:
-                self._log("Unable to link file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
-                raise
-
-        self._combined_file_operation(file_path, new_path, new_base_name, associated_files,
-                                      action=_int_move_and_sym_link)
-
-    def _history_lookup(self):
-        """
-        Look up the NZB name in the history and see if it contains a record for self.nzb_name
-
-        Returns a (indexer_id, season, [], quality, version) tuple. The first two may be None if none were found.
-        """
-
-        to_return = (None, None, [], None, None)
-
-        # if we don't have either of these then there's nothing to use to search the history for anyway
-        if not self.nzb_name and not self.folder_name:
-            self.in_history = False
-            return to_return
-
-        # make a list of possible names to use in the search
-        names = []
-        if self.nzb_name:
-            names.append(self.nzb_name)
-            if '.' in self.nzb_name:
-                names.append(self.nzb_name.rpartition(".")[0])
-        if self.folder_name:
-            names.append(self.folder_name)
-
-        # search the database for a possible match and return immediately if we find one
-        myDB = db.DBConnection()
-        for curName in names:
-            search_name = re.sub("[\.\-\ ]", "_", curName)
-            sql_results = myDB.select("SELECT * FROM history WHERE resource LIKE ?", [search_name])
-
-            if len(sql_results) == 0:
-                continue
-
-            indexer_id = int(sql_results[0]["showid"])
-            season = int(sql_results[0]["season"])
-            quality = int(sql_results[0]["quality"])
-            version = int(sql_results[0]["version"])
-
-            if quality == common.Quality.UNKNOWN:
-                quality = None
-
-            show = helpers.findCertainShow(sickbeard.showList, indexer_id)
-
-            self.in_history = True
-            self.version = version
-            to_return = (show, season, [], quality, version)
-            self._log("Found result in history: " + str(to_return), logger.DEBUG)
-
-            return to_return
-
-        self.in_history = False
-        return to_return
-
-    def _finalize(self, parse_result):
-        self.release_group = parse_result.release_group
-
-        # remember whether it's a proper
-        if parse_result.extra_info:
-            self.is_proper = re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', parse_result.extra_info, re.I) != None
-
-        # if the result is complete then remember that for later
-        # if the result is complete then set release name
-        if parse_result.series_name and ((parse_result.season_number is not None and parse_result.episode_numbers)
-                                         or parse_result.air_date) and parse_result.release_group:
-
-            if not self.release_name:
-                self.release_name = helpers.remove_extension(ek.ek(os.path.basename, parse_result.original_name))
-
-        else:
-            logger.log(u"Parse result not sufficient (all following have to be set). will not save release name",
-                       logger.DEBUG)
-            logger.log(u"Parse result(series_name): " + str(parse_result.series_name), logger.DEBUG)
-            logger.log(u"Parse result(season_number): " + str(parse_result.season_number), logger.DEBUG)
-            logger.log(u"Parse result(episode_numbers): " + str(parse_result.episode_numbers), logger.DEBUG)
-            logger.log(u" or Parse result(air_date): " + str(parse_result.air_date), logger.DEBUG)
-            logger.log(u"Parse result(release_group): " + str(parse_result.release_group), logger.DEBUG)
-
-
-    def _analyze_name(self, name, file=True):
-        """
-        Takes a name and tries to figure out a show, season, and episode from it.
-
-        name: A string which we want to analyze to determine show info from (unicode)
-
-        Returns a (indexer_id, season, [episodes]) tuple. The first two may be None and episodes may be []
-        if none were found.
-        """
-
-        logger.log(u"Analyzing name " + repr(name))
-
-        to_return = (None, None, [], None, None)
-
-        if not name:
-            return to_return
-
-        name = helpers.remove_non_release_groups(helpers.remove_extension(name))
-
-        # parse the name to break it into show name, season, and episode
-        np = NameParser(file, tryIndexers=True, trySceneExceptions=True, convert=True)
-        parse_result = np.parse(name)
-
-        # show object
-        show = parse_result.show
-
-        if parse_result.is_air_by_date:
-            season = -1
-            episodes = [parse_result.air_date]
-        else:
-            season = parse_result.season_number
-            episodes = parse_result.episode_numbers
-
-        to_return = (show, season, episodes, parse_result.quality, None)
-
-        self._finalize(parse_result)
-        return to_return
-
-    def _build_anidb_episode(self, connection, filePath):
-        ep = adba.Episode(connection, filePath=filePath,
-                          paramsF=["quality", "anidb_file_name", "crc32"],
-                          paramsA=["epno", "english_name", "short_name_list", "other_name", "synonym_list"])
-
-        return ep
-
-    def _add_to_anidb_mylist(self, filePath):
-        if helpers.set_up_anidb_connection():
-            if not self.anidbEpisode:  # seams like we could parse the name before, now lets build the anidb object
-                self.anidbEpisode = self._build_anidb_episode(sickbeard.ADBA_CONNECTION, filePath)
-
-            self._log(u"Adding the file to the anidb mylist", logger.DEBUG)
-            try:
-                self.anidbEpisode.add_to_mylist(status=1)  # status = 1 sets the status of the file to "internal HDD"
-            except Exception, e:
-                self._log(u"exception msg: " + str(e))
-
-    def _find_info(self):
-        """
-        For a given file try to find the showid, season, and episode.
-        """
-
-        show = season = quality = version = None
-        episodes = []
-
-        # try to look up the nzb in history
-        attempt_list = [self._history_lookup,
-
-                        # try to analyze the nzb name
-                        lambda: self._analyze_name(self.nzb_name),
-
-                        # try to analyze the file name
-                        lambda: self._analyze_name(self.file_name),
-
-                        # try to analyze the dir name
-                        lambda: self._analyze_name(self.folder_name),
-
-                        # try to analyze the file + dir names together
-                        lambda: self._analyze_name(self.file_path),
-
-                        # try to analyze the dir + file name together as one name
-                        lambda: self._analyze_name(self.folder_name + u' ' + self.file_name)
-        ]
-
-        # attempt every possible method to get our info
-        for cur_attempt in attempt_list:
-
-            try:
-                (cur_show, cur_season, cur_episodes, cur_quality, cur_version) = cur_attempt()
-            except (InvalidNameException, InvalidShowException), e:
-                logger.log(u"Unable to parse, skipping: " + ex(e), logger.DEBUG)
-                continue
-
-            if not cur_show:
-                continue
-            else:
-                show = cur_show
-
-            if cur_quality and not (self.in_history and quality):
-                quality = cur_quality
-
-            # we only get current version for animes from history to prevent issues with old database entries
-            if cur_version is not None:
-                version = cur_version
-
-            if cur_season != None:
-                season = cur_season
-            if cur_episodes:
-                episodes = cur_episodes
-
-            # for air-by-date shows we need to look up the season/episode from database
-            if season == -1 and show and episodes:
-                self._log(
-                    u"Looks like this is an air-by-date or sports show, attempting to convert the date to season/episode",
-                    logger.DEBUG)
-                airdate = episodes[0].toordinal()
-                myDB = db.DBConnection()
-                sql_result = myDB.select(
-                    "SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?",
-                    [show.indexerid, show.indexer, airdate])
-
-                if sql_result:
-                    season = int(sql_result[0][0])
-                    episodes = [int(sql_result[0][1])]
-                else:
-                    self._log(u"Unable to find episode with date " + str(episodes[0]) + u" for show " + str(
-                        show.indexerid) + u", skipping", logger.DEBUG)
-                    # we don't want to leave dates in the episode list if we couldn't convert them to real episode numbers
-                    episodes = []
-                    continue
-
-            # if there's no season then we can hopefully just use 1 automatically
-            elif season == None and show:
-                myDB = db.DBConnection()
-                numseasonsSQlResult = myDB.select(
-                    "SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and indexer = ? and season != 0",
-                    [show.indexerid, show.indexer])
-                if int(numseasonsSQlResult[0][0]) == 1 and season == None:
-                    self._log(
-                        u"Don't have a season number, but this show appears to only have 1 season, setting season number to 1...",
-                        logger.DEBUG)
-                    season = 1
-
-            if show and season and episodes:
-                return (show, season, episodes, quality, version)
-
-        return (show, season, episodes, quality, version)
-
-    def _get_ep_obj(self, show, season, episodes):
-        """
-        Retrieve the TVEpisode object requested.
-
-        show: The show object belonging to the show we want to process
-        season: The season of the episode (int)
-        episodes: A list of episodes to find (list of ints)
-
-        If the episode(s) can be found then a TVEpisode object with the correct related eps will
-        be instantiated and returned. If the episode can't be found then None will be returned.
-        """
-
-        root_ep = None
-        for cur_episode in episodes:
-            self._log(u"Retrieving episode object for " + str(season) + "x" + str(cur_episode), logger.DEBUG)
-
-            # now that we've figured out which episode this file is just load it manually
-            try:
-                curEp = show.getEpisode(season, cur_episode)
-                if not curEp:
-                    raise exceptions.EpisodeNotFoundException()
-            except exceptions.EpisodeNotFoundException, e:
-                self._log(u"Unable to create episode: " + ex(e), logger.DEBUG)
-                raise exceptions.PostProcessingFailed()
-
-            # associate all the episodes together under a single root episode
-            if root_ep == None:
-                root_ep = curEp
-                root_ep.relatedEps = []
-            elif curEp not in root_ep.relatedEps:
-                root_ep.relatedEps.append(curEp)
-
-        return root_ep
-
-    def _get_quality(self, ep_obj):
-        """
-        Determines the quality of the file that is being post processed, first by checking if it is directly
-        available in the TVEpisode's status or otherwise by parsing through the data available.
-
-        ep_obj: The TVEpisode object related to the file we are post processing
-
-        Returns: A quality value found in common.Quality
-        """
-
-        ep_quality = common.Quality.UNKNOWN
-
-        # if there is a quality available in the status then we don't need to bother guessing from the filename
-        if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:
-            oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)  # @UnusedVariable
-            if ep_quality != common.Quality.UNKNOWN:
-                self._log(
-                    u"The old status had a quality in it, using that: " + common.Quality.qualityStrings[ep_quality],
-                    logger.DEBUG)
-                return ep_quality
-
-        # nzb name is the most reliable if it exists, followed by folder name and lastly file name
-        name_list = [self.nzb_name, self.folder_name, self.file_name]
-
-        # search all possible names for our new quality, in case the file or dir doesn't have it
-        for cur_name in name_list:
-
-            # some stuff might be None at this point still
-            if not cur_name:
-                continue
-
-            ep_quality = common.Quality.nameQuality(cur_name, ep_obj.show.is_anime)
-            self._log(
-                u"Looking up quality for name " + cur_name + u", got " + common.Quality.qualityStrings[ep_quality],
-                logger.DEBUG)
-
-            # if we find a good one then use it
-            if ep_quality != common.Quality.UNKNOWN:
-                logger.log(cur_name + u" looks like it has quality " + common.Quality.qualityStrings[
-                    ep_quality] + ", using that", logger.DEBUG)
-                return ep_quality
-
-        # Try getting quality from the episode (snatched) status
-        if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER:
-            oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)  # @UnusedVariable
-            if ep_quality != common.Quality.UNKNOWN:
-                self._log(
-                    u"The old status had a quality in it, using that: " + common.Quality.qualityStrings[ep_quality],
-                    logger.DEBUG)
-                return ep_quality
-
-        # Try guessing quality from the file name
-        ep_quality = common.Quality.assumeQuality(self.file_name)
-        self._log(
-            u"Guessing quality for name " + self.file_name + u", got " + common.Quality.qualityStrings[ep_quality],
-            logger.DEBUG)
-        if ep_quality != common.Quality.UNKNOWN:
-            logger.log(self.file_name + u" looks like it has quality " + common.Quality.qualityStrings[
-                ep_quality] + ", using that", logger.DEBUG)
-            return ep_quality
-
-        test = str(ep_quality)
-        return ep_quality
-
-    def _run_extra_scripts(self, ep_obj):
-        """
-        Executes any extra scripts defined in the config.
-
-        ep_obj: The object to use when calling the extra script
-        """
-        for curScriptName in sickbeard.EXTRA_SCRIPTS:
-
-            # generate a safe command line string to execute the script and provide all the parameters
-            script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", curScriptName) if piece.strip()]
-            script_cmd[0] = ek.ek(os.path.abspath, script_cmd[0])
-            self._log(u"Absolute path to script: " + script_cmd[0], logger.DEBUG)
-
-            script_cmd = script_cmd + [ep_obj.location, self.file_path, str(ep_obj.show.indexerid), str(ep_obj.season),
-                                       str(ep_obj.episode), str(ep_obj.airdate)]
-
-            # use subprocess to run the command and capture output
-            self._log(u"Executing command " + str(script_cmd))
-            try:
-                p = subprocess.Popen(script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-                                     stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR)
-                out, err = p.communicate()  # @UnusedVariable
-                self._log(u"Script result: " + str(out), logger.DEBUG)
-
-            except OSError, e:
-                self._log(u"Unable to run extra_script: " + ex(e))
-
-            except Exception, e:
-                self._log(u"Unable to run extra_script: " + ex(e))
-
-    def _is_priority(self, ep_obj, new_ep_quality):
-        """
-        Determines if the episode is a priority download or not (if it is expected). Episodes which are expected
-        (snatched) or larger than the existing episode are priority, others are not.
-
-        ep_obj: The TVEpisode object in question
-        new_ep_quality: The quality of the episode that is being processed
-
-        Returns: True if the episode is priority, False otherwise.
-        """
-
-        if self.is_priority:
-            return True
-
-        # if SB downloaded this on purpose then this is a priority download
-        if self.in_history or ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:
-            self._log(u"SB snatched this episode so I'm marking it as priority", logger.DEBUG)
-            return True
-
-        old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)
-
-        # if the user downloaded it manually and it's higher quality than the existing episode then it's priority
-        if new_ep_quality > old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
-            self._log(
-                u"This was manually downloaded but it appears to be better quality than what we have so I'm marking it as priority",
-                logger.DEBUG)
-            return True
-
-        # if the user downloaded it manually and it appears to be a PROPER/REPACK then it's priority
-        if self.is_proper and new_ep_quality >= old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
-            self._log(u"This was manually downloaded but it appears to be a proper so I'm marking it as priority",
-                      logger.DEBUG)
-            return True
-
-        return False
-
-    def process(self):
-        """
-        Post-process a given file
-        """
-
-        self._log(u"Processing " + self.file_path + " (" + str(self.nzb_name) + ")")
-
-        if ek.ek(os.path.isdir, self.file_path):
-            self._log(u"File " + self.file_path + " seems to be a directory")
-            return False
-
-        for ignore_file in self.IGNORED_FILESTRINGS:
-            if ignore_file in self.file_path:
-                self._log(u"File " + self.file_path + " is ignored type, skipping")
-                return False
-
-        # reset per-file stuff
-        self.in_history = False
-
-        # reset the anidb episode object
-        self.anidbEpisode = None
-
-        # try to find the file info
-        (show, season, episodes, quality, version) = self._find_info()
-        if not show:
-            self._log(u"This show isn't in your list, you need to add it to SB before post-processing an episode",
-                      logger.WARNING)
-            raise exceptions.PostProcessingFailed()
-        elif season == None or not episodes:
-            self._log(u"Not enough information to determine what episode this is", logger.DEBUG)
-            self._log(u"Quitting post-processing", logger.DEBUG)
-            return False
-
-        # retrieve/create the corresponding TVEpisode objects
-        ep_obj = self._get_ep_obj(show, season, episodes)
-
-        # get the quality of the episode we're processing
-        if quality:
-            self._log(u"Snatch history had a quality in it, using that: " + common.Quality.qualityStrings[quality],
-                      logger.DEBUG)
-            new_ep_quality = quality
-        else:
-            new_ep_quality = self._get_quality(ep_obj)
-
-        logger.log(u"Quality of the episode we're processing: " + str(new_ep_quality), logger.DEBUG)
-
-        # see if this is a priority download (is it snatched, in history, PROPER, or BEST)
-        priority_download = self._is_priority(ep_obj, new_ep_quality)
-        self._log(u"Is ep a priority download: " + str(priority_download), logger.DEBUG)
-
-        # get the version of the episode we're processing
-        if version:
-            self._log(u"Snatch history had a version in it, using that: v" + str(version),
-                      logger.DEBUG)
-            new_ep_version = version
-        else:
-            new_ep_version = -1
-
-        # check for an existing file
-        existing_file_status = self._checkForExistingFile(ep_obj.location)
-
-        # if it's not priority then we don't want to replace smaller files in case it was a mistake
-        if not priority_download:
-
-            # if there's an existing file that we don't want to replace stop here
-            if existing_file_status == PostProcessor.EXISTS_LARGER:
-                if self.is_proper:
-                    self._log(
-                        u"File exists and new file is smaller, new file is a proper/repack, marking it safe to replace",
-                        logger.DEBUG)
-                    return True
-
-                else:
-                    self._log(u"File exists and new file is smaller, marking it unsafe to replace", logger.DEBUG)
-                    return False
-
-            elif existing_file_status == PostProcessor.EXISTS_SAME:
-                self._log(u"File exists and new file is same size, marking it unsafe to replace", logger.DEBUG)
-                return False
-
-        # if the file is priority then we're going to replace it even if it exists
-        else:
-            self._log(
-                u"This download is marked a priority download so I'm going to replace an existing file if I find one",
-                logger.DEBUG)
-
-        # delete the existing file (and company)
-        for cur_ep in [ep_obj] + ep_obj.relatedEps:
-            try:
-                self._delete(cur_ep.location, associated_files=True)
-
-                # clean up any left over folders
-                if cur_ep.location:
-                    helpers.delete_empty_folders(ek.ek(os.path.dirname, cur_ep.location),
-                                                 keep_dir=ep_obj.show._location)
-            except (OSError, IOError):
-                raise exceptions.PostProcessingFailed("Unable to delete the existing files")
-
-            # set the status of the episodes
-            # for curEp in [ep_obj] + ep_obj.relatedEps:
-            #    curEp.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality)
-
-        # if the show directory doesn't exist then make it if allowed
-        if not ek.ek(os.path.isdir, ep_obj.show._location) and sickbeard.CREATE_MISSING_SHOW_DIRS:
-            self._log(u"Show directory doesn't exist, creating it", logger.DEBUG)
-            try:
-                ek.ek(os.mkdir, ep_obj.show._location)
-                # do the library update for synoindex
-                notifiers.synoindex_notifier.addFolder(ep_obj.show._location)
-            except (OSError, IOError):
-                raise exceptions.PostProcessingFailed("Unable to create the show directory: " + ep_obj.show._location)
-
-            # get metadata for the show (but not episode because it hasn't been fully processed)
-            ep_obj.show.writeMetadata(True)
-
-        # update the ep info before we rename so the quality & release name go into the name properly
-        sql_l = []
-        trakt_data = [] 
-        for cur_ep in [ep_obj] + ep_obj.relatedEps:
-            with cur_ep.lock:
-
-                if self.release_name:
-                    self._log("Found release name " + self.release_name, logger.DEBUG)
-                    cur_ep.release_name = self.release_name
-                else:
-                    cur_ep.release_name = ""
-
-                if ep_obj.status in common.Quality.SNATCHED_BEST:
-                    cur_ep.status = common.Quality.compositeStatus(common.ARCHIVED, new_ep_quality)
-                else:
-                    cur_ep.status = common.Quality.compositeStatus(common.DOWNLOADED, new_ep_quality)
-
-                cur_ep.subtitles = []
-
-                cur_ep.subtitles_searchcount = 0
-
-                cur_ep.subtitles_lastsearch = '0001-01-01 00:00:00'
-
-                cur_ep.is_proper = self.is_proper
-
-                cur_ep.version = new_ep_version
-
-                if self.release_group:
-                    cur_ep.release_group = self.release_group
-                else:
-                    cur_ep.release_group = ""
-
-                sql_l.append(cur_ep.get_sql())
-
-                trakt_data.append((cur_ep.season, cur_ep.episode))
-
-        data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
-
-        if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST and sickbeard.TRAKT_REMOVE_WATCHLIST:
-            logger.log(u"Remove episodes, showid: indexerid " + str(show.indexerid) + ", Title " + str(show.name) + " to Traktv Watchlist", logger.DEBUG)
-            if data:
-                notifiers.trakt_notifier.update_watchlist(show, data_episode=data, update="remove")
-
-        if len(sql_l) > 0:
-            myDB = db.DBConnection()
-            myDB.mass_action(sql_l)
-
-        # Just want to keep this consistent for failed handling right now
-        releaseName = show_name_helpers.determineReleaseName(self.folder_path, self.nzb_name)
-        if releaseName is not None:
-            failed_history.logSuccess(releaseName)
-        else:
-            self._log(u"Couldn't find release in snatch history", logger.WARNING)
-
-        # find the destination folder
-        try:
-            proper_path = ep_obj.proper_path()
-            proper_absolute_path = ek.ek(os.path.join, ep_obj.show.location, proper_path)
-
-            dest_path = ek.ek(os.path.dirname, proper_absolute_path)
-        except exceptions.ShowDirNotFoundException:
-            raise exceptions.PostProcessingFailed(
-                u"Unable to post-process an episode if the show dir doesn't exist, quitting")
-
-        self._log(u"Destination folder for this episode: " + dest_path, logger.DEBUG)
-
-        # create any folders we need
-        helpers.make_dirs(dest_path)
-
-        # figure out the base name of the resulting episode file
-        if sickbeard.RENAME_EPISODES:
-            orig_extension = self.file_name.rpartition('.')[-1]
-            new_base_name = ek.ek(os.path.basename, proper_path)
-            new_file_name = new_base_name + '.' + orig_extension
-
-        else:
-            # if we're not renaming then there's no new base name, we'll just use the existing name
-            new_base_name = None
-            new_file_name = self.file_name
-
-        # add to anidb
-        if ep_obj.show.is_anime and sickbeard.ANIDB_USE_MYLIST:
-            self._add_to_anidb_mylist(self.file_path)
-
-        try:
-            # move the episode and associated files to the show dir
-            if self.process_method == "copy":
-                self._copy(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
-                           sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
-            elif self.process_method == "move":
-                self._move(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
-                           sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
-            elif self.process_method == "hardlink":
-                self._hardlink(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
-                               sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
-            elif self.process_method == "symlink":
-                self._moveAndSymlink(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
-                                     sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
-            else:
-                logger.log(u"Unknown process method: " + str(self.process_method), logger.ERROR)
-                raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
-        except (OSError, IOError):
-            raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
-                
-        # download subtitles
-        if sickbeard.USE_SUBTITLES and ep_obj.show.subtitles:
-            for cur_ep in [ep_obj] + ep_obj.relatedEps:
-                with cur_ep.lock:
-                    cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name)
-                    cur_ep.downloadSubtitles(force=True)
-
-        # put the new location in the database
-        sql_l = []
-        for cur_ep in [ep_obj] + ep_obj.relatedEps:
-            with cur_ep.lock:
-                cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name)
-                sql_l.append(cur_ep.get_sql())
-
-        if len(sql_l) > 0:
-            myDB = db.DBConnection()
-            myDB.mass_action(sql_l)
-
-        # set file modify stamp to show airdate
-        if sickbeard.AIRDATE_EPISODES:
-            for cur_ep in [ep_obj] + ep_obj.relatedEps:
-                with cur_ep.lock:
-                    cur_ep.airdateModifyStamp()
-
-        # generate nfo/tbn
-        ep_obj.createMetaFiles()
-
-        # log it to history
-        history.logDownload(ep_obj, self.file_path, new_ep_quality, self.release_group, new_ep_version)
-
-        # send notifications
-        notifiers.notify_download(ep_obj._format_pattern('%SN - %Sx%0E - %EN - %QN'))
-
-        # do the library update for KODI
-        notifiers.kodi_notifier.update_library(ep_obj.show.name)
-
-        # do the library update for Plex
-        notifiers.plex_notifier.update_library()
-
-        # do the library update for NMJ
-        # nmj_notifier kicks off its library update when the notify_download is issued (inside notifiers)
-
-        # do the library update for Synology Indexer
-        notifiers.synoindex_notifier.addFile(ep_obj.location)
-
-        # do the library update for pyTivo
-        notifiers.pytivo_notifier.update_library(ep_obj)
-
-        # do the library update for Trakt
-        notifiers.trakt_notifier.update_library(ep_obj)
-
-        self._run_extra_scripts(ep_obj)
-
-        return True
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import with_statement
+
+import glob
+import fnmatch
+import os
+import re
+import subprocess
+import stat
+
+import sickbeard
+
+from sickbeard import db
+from sickbeard import common
+from sickbeard import exceptions
+from sickbeard import helpers
+from sickbeard import history
+from sickbeard import logger
+from sickbeard import notifiers
+from sickbeard import show_name_helpers
+from sickbeard import failed_history
+from sickbeard import name_cache
+
+from sickbeard import encodingKludge as ek
+from sickbeard.exceptions import ex
+
+from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+
+from lib import adba
+from sickbeard.helpers import verify_freespace
+
+
+class PostProcessor(object):
+    """
+    A class which will process a media file according to the post processing settings in the config.
+    """
+
+    EXISTS_LARGER = 1
+    EXISTS_SAME = 2
+    EXISTS_SMALLER = 3
+    DOESNT_EXIST = 4
+
+    IGNORED_FILESTRINGS = ["/.AppleDouble/", ".DS_Store"]
+
+    def __init__(self, file_path, nzb_name=None, process_method=None, is_priority=None):
+        """
+        Creates a new post processor with the given file path and optionally an NZB name.
+
+        file_path: The path to the file to be processed
+        nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
+        """
+        # absolute path to the folder that is being processed
+        self.folder_path = ek.ek(os.path.dirname, ek.ek(os.path.abspath, file_path))
+
+        # full path to file
+        self.file_path = file_path
+
+        # file name only
+        self.file_name = ek.ek(os.path.basename, file_path)
+
+        # the name of the folder only
+        self.folder_name = ek.ek(os.path.basename, self.folder_path)
+
+        # name of the NZB that resulted in this folder
+        self.nzb_name = nzb_name
+
+        self.process_method = process_method if process_method else sickbeard.PROCESS_METHOD
+
+        self.in_history = False
+
+        self.release_group = None
+
+        self.release_name = None
+
+        self.is_proper = False
+
+        self.is_priority = is_priority
+
+        self.log = ''
+        
+        self.version = None
+
+    def _log(self, message, level=logger.INFO):
+        """
+        A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
+
+        message: The string to log (unicode)
+        level: The log level to use (optional)
+        """
+        logger.log(message, level)
+        self.log += message + '\n'
+
+    def _checkForExistingFile(self, existing_file):
+        """
+        Checks if a file exists already and if it does whether it's bigger or smaller than
+        the file we are post processing
+
+        existing_file: The file to compare to
+
+        Returns:
+            DOESNT_EXIST if the file doesn't exist
+            EXISTS_LARGER if the file exists and is larger than the file we are post processing
+            EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
+            EXISTS_SAME if the file exists and is the same size as the file we are post processing
+        """
+
+        if not existing_file:
+            self._log(u"There is no existing file so there's no worries about replacing it", logger.DEBUG)
+            return PostProcessor.DOESNT_EXIST
+
+        # if the new file exists, return the appropriate code depending on the size
+        if ek.ek(os.path.isfile, existing_file):
+
+            # see if it's bigger than our old file
+            if ek.ek(os.path.getsize, existing_file) > ek.ek(os.path.getsize, self.file_path):
+                self._log(u"File " + existing_file + " is larger than " + self.file_path, logger.DEBUG)
+                return PostProcessor.EXISTS_LARGER
+
+            elif ek.ek(os.path.getsize, existing_file) == ek.ek(os.path.getsize, self.file_path):
+                self._log(u"File " + existing_file + " is the same size as " + self.file_path, logger.DEBUG)
+                return PostProcessor.EXISTS_SAME
+
+            else:
+                self._log(u"File " + existing_file + " is smaller than " + self.file_path, logger.DEBUG)
+                return PostProcessor.EXISTS_SMALLER
+
+        else:
+            self._log(u"File " + existing_file + " doesn't exist so there's no worries about replacing it",
+                      logger.DEBUG)
+            return PostProcessor.DOESNT_EXIST
+
+    def list_associated_files(self, file_path, base_name_only=False, subtitles_only=False, subfolders=False):
+        """
+        For a given file path searches for files with the same name but different extension and returns their absolute paths
+
+        file_path: The file to check for associated files
+
+        base_name_only: False add extra '.' (conservative search) to file_path minus extension
+
+        Returns: A list containing all files which are associated to the given file
+        """
+        def recursive_glob(treeroot, pattern):
+            results = []
+            for base, dirs, files in os.walk(treeroot):
+                goodfiles = fnmatch.filter(files, pattern)
+                results.extend(os.path.join(base, f) for f in goodfiles)
+            return results
+
+        if not file_path:
+            return []
+
+        file_path_list = []
+
+        if subfolders:
+            base_name = ek.ek(os.path.basename, file_path).rpartition('.')[0]
+        else:
+            base_name = file_path.rpartition('.')[0]
+
+        if not base_name_only:
+            base_name = base_name + '.'
+
+        # don't strip it all and use cwd by accident
+        if not base_name:
+            return []
+
+        # don't confuse glob with chars we didn't mean to use
+        base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name)
+        
+        if subfolders:
+            filelist = ek.ek(recursive_glob, ek.ek(os.path.dirname, file_path),  base_name + '*')
+        else:
+            filelist = ek.ek(glob.glob, base_name + '*')
+        for associated_file_path in filelist:
+            # only add associated to list
+            if associated_file_path == file_path:
+                continue
+            # only list it if the only non-shared part is the extension or if it is a subtitle
+            if subtitles_only and not associated_file_path[len(associated_file_path) - 3:] in common.subtitleExtensions:
+                continue
+
+            # Exclude .rar files from associated list
+            if re.search('(^.+\.(rar|r\d+)$)', associated_file_path):
+                continue
+
+            if ek.ek(os.path.isfile, associated_file_path):
+                file_path_list.append(associated_file_path)
+
+        return file_path_list
+
+    def _delete(self, file_path, associated_files=False):
+        """
+        Deletes the file and optionally all associated files.
+
+        file_path: The file to delete
+        associated_files: True to delete all files which differ only by extension, False to leave them
+        """
+
+        if not file_path:
+            return
+
+        # figure out which files we want to delete
+        file_list = [file_path]
+        if associated_files:
+            file_list = file_list + self.list_associated_files(file_path, base_name_only=True, subfolders=True)
+
+        if not file_list:
+            self._log(u"There were no files associated with " + file_path + ", not deleting anything", logger.DEBUG)
+            return
+
+        # delete the file and any other files which we want to delete
+        for cur_file in file_list:
+            if ek.ek(os.path.isfile, cur_file):
+                self._log(u"Deleting file " + cur_file, logger.DEBUG)
+                # check first the read-only attribute
+                file_attribute = ek.ek(os.stat, cur_file)[0]
+                if (not file_attribute & stat.S_IWRITE):
+                    # File is read-only, so make it writeable
+                    self._log('Read only mode on file ' + cur_file + ' Will try to make it writeable', logger.DEBUG)
+                    try:
+                        ek.ek(os.chmod, cur_file, stat.S_IWRITE)
+                    except:
+                        self._log(u'Cannot change permissions of ' + cur_file, logger.WARNING)
+
+                ek.ek(os.remove, cur_file)
+
+                # do the library update for synoindex
+                notifiers.synoindex_notifier.deleteFile(cur_file)
+
+    def _combined_file_operation(self, file_path, new_path, new_base_name, associated_files=False, action=None,
+                                 subtitles=False):
+        """
+        Performs a generic operation (move or copy) on a file. Can rename the file as well as change its location,
+        and optionally move associated files too.
+
+        file_path: The full path of the media file to act on
+        new_path: Destination path where we want to move/copy the file to
+        new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
+        associated_files: Boolean, whether we should copy similarly-named files too
+        action: function that takes an old path and new path and does an operation with them (move/copy)
+        """
+
+        if not action:
+            self._log(u"Must provide an action for the combined file operation", logger.ERROR)
+            return
+
+        file_list = [file_path]
+        if associated_files:
+            file_list = file_list + self.list_associated_files(file_path)
+        elif subtitles:
+            file_list = file_list + self.list_associated_files(file_path, subtitles_only=True)
+
+        if not file_list:
+            self._log(u"There were no files associated with " + file_path + ", not moving anything", logger.DEBUG)
+            return
+
+        # create base name with file_path (media_file without .extension)
+        old_base_name = file_path.rpartition('.')[0]
+        old_base_name_length = len(old_base_name)
+
+        # deal with all files
+        for cur_file_path in file_list:
+
+            cur_file_name = ek.ek(os.path.basename, cur_file_path)
+
+            # get the extension without .
+            cur_extension = cur_file_path[old_base_name_length + 1:]
+
+            # check if file have subtitles language
+            if os.path.splitext(cur_extension)[1][1:] in common.subtitleExtensions:
+                cur_lang = os.path.splitext(cur_extension)[0]
+                if cur_lang in sickbeard.SUBTITLES_LANGUAGES:
+                    cur_extension = cur_lang + os.path.splitext(cur_extension)[1]
+
+            # replace .nfo with .nfo-orig to avoid conflicts
+            if cur_extension == 'nfo' and sickbeard.NFO_RENAME == True:
+                cur_extension = 'nfo-orig'
+
+            # If new base name then convert name
+            if new_base_name:
+                new_file_name = new_base_name + '.' + cur_extension
+            # if we're not renaming we still want to change extensions sometimes
+            else:
+                new_file_name = helpers.replaceExtension(cur_file_name, cur_extension)
+
+            if sickbeard.SUBTITLES_DIR and cur_extension in common.subtitleExtensions:
+                subs_new_path = ek.ek(os.path.join, new_path, sickbeard.SUBTITLES_DIR)
+                dir_exists = helpers.makeDir(subs_new_path)
+                if not dir_exists:
+                    logger.log(u"Unable to create subtitles folder " + subs_new_path, logger.ERROR)
+                else:
+                    helpers.chmodAsParent(subs_new_path)
+                new_file_path = ek.ek(os.path.join, subs_new_path, new_file_name)
+            else:
+                new_file_path = ek.ek(os.path.join, new_path, new_file_name)
+
+            action(cur_file_path, new_file_path)
+
+    def _move(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
+        """
+        file_path: The full path of the media file to move
+        new_path: Destination path where we want to move the file to
+        new_base_name: The base filename (no extension) to use during the move. Use None to keep the same name.
+        associated_files: Boolean, whether we should move similarly-named files too
+        """
+
+        def _int_move(cur_file_path, new_file_path):
+
+            self._log(u"Moving file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
+            try:
+                helpers.moveFile(cur_file_path, new_file_path)
+                helpers.chmodAsParent(new_file_path)
+            except (IOError, OSError), e:
+                self._log("Unable to move file " + cur_file_path + " to " + new_file_path + ": " + str(e), logger.ERROR)
+                raise
+
+        self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move,
+                                      subtitles=subtitles)
+
+    def _copy(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
+        """
+        file_path: The full path of the media file to copy
+        new_path: Destination path where we want to copy the file to
+        new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
+        associated_files: Boolean, whether we should copy similarly-named files too
+        """
+
+        def _int_copy(cur_file_path, new_file_path):
+
+            self._log(u"Copying file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
+            try:
+                helpers.copyFile(cur_file_path, new_file_path)
+                helpers.chmodAsParent(new_file_path)
+            except (IOError, OSError), e:
+                logger.log("Unable to copy file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
+                raise
+
+        self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_copy,
+                                      subtitles=subtitles)
+
+
+    def _hardlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
+        """
+        file_path: The full path of the media file to move
+        new_path: Destination path where we want to create a hard linked file
+        new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
+        associated_files: Boolean, whether we should move similarly-named files too
+        """
+
+        def _int_hard_link(cur_file_path, new_file_path):
+
+            self._log(u"Hard linking file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
+            try:
+                helpers.hardlinkFile(cur_file_path, new_file_path)
+                helpers.chmodAsParent(new_file_path)
+            except (IOError, OSError), e:
+                self._log("Unable to link file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
+                raise
+
+        self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_hard_link)
+
+    def _moveAndSymlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
+        """
+        file_path: The full path of the media file to move
+        new_path: Destination path where we want to move the file to create a symbolic link to
+        new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.
+        associated_files: Boolean, whether we should move similarly-named files too
+        """
+
+        def _int_move_and_sym_link(cur_file_path, new_file_path):
+
+            self._log(u"Moving then symbolic linking file from " + cur_file_path + " to " + new_file_path, logger.DEBUG)
+            try:
+                helpers.moveAndSymlinkFile(cur_file_path, new_file_path)
+                helpers.chmodAsParent(new_file_path)
+            except (IOError, OSError), e:
+                self._log("Unable to link file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR)
+                raise
+
+        self._combined_file_operation(file_path, new_path, new_base_name, associated_files,
+                                      action=_int_move_and_sym_link)
+
+    def _history_lookup(self):
+        """
+        Look up the NZB name in the history and see if it contains a record for self.nzb_name
+
+        Returns a (indexer_id, season, [], quality, version) tuple. The first two may be None if none were found.
+        """
+
+        to_return = (None, None, [], None, None)
+
+        # if we don't have either of these then there's nothing to use to search the history for anyway
+        if not self.nzb_name and not self.folder_name:
+            self.in_history = False
+            return to_return
+
+        # make a list of possible names to use in the search
+        names = []
+        if self.nzb_name:
+            names.append(self.nzb_name)
+            if '.' in self.nzb_name:
+                names.append(self.nzb_name.rpartition(".")[0])
+        if self.folder_name:
+            names.append(self.folder_name)
+
+        # search the database for a possible match and return immediately if we find one
+        myDB = db.DBConnection()
+        for curName in names:
+            search_name = re.sub("[\.\-\ ]", "_", curName)
+            sql_results = myDB.select("SELECT * FROM history WHERE resource LIKE ?", [search_name])
+
+            if len(sql_results) == 0:
+                continue
+
+            indexer_id = int(sql_results[0]["showid"])
+            season = int(sql_results[0]["season"])
+            quality = int(sql_results[0]["quality"])
+            version = int(sql_results[0]["version"])
+
+            if quality == common.Quality.UNKNOWN:
+                quality = None
+
+            show = helpers.findCertainShow(sickbeard.showList, indexer_id)
+
+            self.in_history = True
+            self.version = version
+            to_return = (show, season, [], quality, version)
+            self._log("Found result in history: " + str(to_return), logger.DEBUG)
+
+            return to_return
+
+        self.in_history = False
+        return to_return
+
+    def _finalize(self, parse_result):
+        self.release_group = parse_result.release_group
+
+        # remember whether it's a proper
+        if parse_result.extra_info:
+            self.is_proper = re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', parse_result.extra_info, re.I) != None
+
+        # if the result is complete then remember that for later
+        # if the result is complete then set release name
+        if parse_result.series_name and ((parse_result.season_number is not None and parse_result.episode_numbers)
+                                         or parse_result.air_date) and parse_result.release_group:
+
+            if not self.release_name:
+                self.release_name = helpers.remove_extension(ek.ek(os.path.basename, parse_result.original_name))
+
+        else:
+            logger.log(u"Parse result not sufficient (all following have to be set). will not save release name",
+                       logger.DEBUG)
+            logger.log(u"Parse result(series_name): " + str(parse_result.series_name), logger.DEBUG)
+            logger.log(u"Parse result(season_number): " + str(parse_result.season_number), logger.DEBUG)
+            logger.log(u"Parse result(episode_numbers): " + str(parse_result.episode_numbers), logger.DEBUG)
+            logger.log(u" or Parse result(air_date): " + str(parse_result.air_date), logger.DEBUG)
+            logger.log(u"Parse result(release_group): " + str(parse_result.release_group), logger.DEBUG)
+
+
+    def _analyze_name(self, name, file=True):
+        """
+        Takes a name and tries to figure out a show, season, and episode from it.
+
+        name: A string which we want to analyze to determine show info from (unicode)
+
+        Returns a (indexer_id, season, [episodes]) tuple. The first two may be None and episodes may be []
+        if none were found.
+        """
+
+        logger.log(u"Analyzing name " + repr(name))
+
+        to_return = (None, None, [], None, None)
+
+        if not name:
+            return to_return
+
+        name = helpers.remove_non_release_groups(helpers.remove_extension(name))
+
+        # parse the name to break it into show name, season, and episode
+        np = NameParser(file, tryIndexers=True, trySceneExceptions=True, convert=True)
+        parse_result = np.parse(name)
+
+        # show object
+        show = parse_result.show
+
+        if parse_result.is_air_by_date:
+            season = -1
+            episodes = [parse_result.air_date]
+        else:
+            season = parse_result.season_number
+            episodes = parse_result.episode_numbers
+
+        to_return = (show, season, episodes, parse_result.quality, None)
+
+        self._finalize(parse_result)
+        return to_return
+
+    def _build_anidb_episode(self, connection, filePath):
+        ep = adba.Episode(connection, filePath=filePath,
+                          paramsF=["quality", "anidb_file_name", "crc32"],
+                          paramsA=["epno", "english_name", "short_name_list", "other_name", "synonym_list"])
+
+        return ep
+
+    def _add_to_anidb_mylist(self, filePath):
+        if helpers.set_up_anidb_connection():
+            if not self.anidbEpisode:  # seams like we could parse the name before, now lets build the anidb object
+                self.anidbEpisode = self._build_anidb_episode(sickbeard.ADBA_CONNECTION, filePath)
+
+            self._log(u"Adding the file to the anidb mylist", logger.DEBUG)
+            try:
+                self.anidbEpisode.add_to_mylist(status=1)  # status = 1 sets the status of the file to "internal HDD"
+            except Exception, e:
+                self._log(u"exception msg: " + str(e))
+
+    def _find_info(self):
+        """
+        For a given file try to find the showid, season, and episode.
+        """
+
+        show = season = quality = version = None
+        episodes = []
+
+        # try to look up the nzb in history
+        attempt_list = [self._history_lookup,
+
+                        # try to analyze the nzb name
+                        lambda: self._analyze_name(self.nzb_name),
+
+                        # try to analyze the file name
+                        lambda: self._analyze_name(self.file_name),
+
+                        # try to analyze the dir name
+                        lambda: self._analyze_name(self.folder_name),
+
+                        # try to analyze the file + dir names together
+                        lambda: self._analyze_name(self.file_path),
+
+                        # try to analyze the dir + file name together as one name
+                        lambda: self._analyze_name(self.folder_name + u' ' + self.file_name)
+        ]
+
+        # attempt every possible method to get our info
+        for cur_attempt in attempt_list:
+
+            try:
+                (cur_show, cur_season, cur_episodes, cur_quality, cur_version) = cur_attempt()
+            except (InvalidNameException, InvalidShowException), e:
+                logger.log(u"Unable to parse, skipping: " + ex(e), logger.DEBUG)
+                continue
+
+            if not cur_show:
+                continue
+            else:
+                show = cur_show
+
+            if cur_quality and not (self.in_history and quality):
+                quality = cur_quality
+
+            # we only get current version for animes from history to prevent issues with old database entries
+            if cur_version is not None:
+                version = cur_version
+
+            if cur_season != None:
+                season = cur_season
+            if cur_episodes:
+                episodes = cur_episodes
+
+            # for air-by-date shows we need to look up the season/episode from database
+            if season == -1 and show and episodes:
+                self._log(
+                    u"Looks like this is an air-by-date or sports show, attempting to convert the date to season/episode",
+                    logger.DEBUG)
+                airdate = episodes[0].toordinal()
+                myDB = db.DBConnection()
+                # Ignore season 0 when searching for episode(Conflict between special and regular episode, same air date)
+                sql_result = myDB.select(
+                    "SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ? and season != 0",
+                    [show.indexerid, show.indexer, airdate])
+
+                if sql_result:
+                    season = int(sql_result[0][0])
+                    episodes = [int(sql_result[0][1])]
+                else:
+                    # Found no result, try with season 0
+                    sql_result = myDB.select(
+                        "SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?",
+                        [show.indexerid, show.indexer, airdate])
+                    if sql_result:
+                        season = int(sql_result[0][0])
+                        episodes = [int(sql_result[0][1])]
+                    else:
+                        self._log(u"Unable to find episode with date " + str(episodes[0]) + u" for show " + str(
+                        show.indexerid) + u", skipping", logger.DEBUG)
+                        # we don't want to leave dates in the episode list if we couldn't convert them to real episode numbers
+                        episodes = []
+                        continue
+
+            # if there's no season then we can hopefully just use 1 automatically
+            elif season == None and show:
+                myDB = db.DBConnection()
+                numseasonsSQlResult = myDB.select(
+                    "SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and indexer = ? and season != 0",
+                    [show.indexerid, show.indexer])
+                if int(numseasonsSQlResult[0][0]) == 1 and season == None:
+                    self._log(
+                        u"Don't have a season number, but this show appears to only have 1 season, setting season number to 1...",
+                        logger.DEBUG)
+                    season = 1
+
+            if show and season and episodes:
+                return (show, season, episodes, quality, version)
+
+        return (show, season, episodes, quality, version)
+
+    def _get_ep_obj(self, show, season, episodes):
+        """
+        Retrieve the TVEpisode object requested.
+
+        show: The show object belonging to the show we want to process
+        season: The season of the episode (int)
+        episodes: A list of episodes to find (list of ints)
+
+        If the episode(s) can be found then a TVEpisode object with the correct related eps will
+        be instantiated and returned. If the episode can't be found then None will be returned.
+        """
+
+        root_ep = None
+        for cur_episode in episodes:
+            self._log(u"Retrieving episode object for " + str(season) + "x" + str(cur_episode), logger.DEBUG)
+
+            # now that we've figured out which episode this file is just load it manually
+            try:
+                curEp = show.getEpisode(season, cur_episode)
+                if not curEp:
+                    raise exceptions.EpisodeNotFoundException()
+            except exceptions.EpisodeNotFoundException, e:
+                self._log(u"Unable to create episode: " + ex(e), logger.DEBUG)
+                raise exceptions.PostProcessingFailed()
+
+            # associate all the episodes together under a single root episode
+            if root_ep == None:
+                root_ep = curEp
+                root_ep.relatedEps = []
+            elif curEp not in root_ep.relatedEps:
+                root_ep.relatedEps.append(curEp)
+
+        return root_ep
+
+    def _get_quality(self, ep_obj):
+        """
+        Determines the quality of the file that is being post processed, first by checking if it is directly
+        available in the TVEpisode's status or otherwise by parsing through the data available.
+
+        ep_obj: The TVEpisode object related to the file we are post processing
+
+        Returns: A quality value found in common.Quality
+        """
+
+        ep_quality = common.Quality.UNKNOWN
+
+        # if there is a quality available in the status then we don't need to bother guessing from the filename
+        if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:
+            oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)  # @UnusedVariable
+            if ep_quality != common.Quality.UNKNOWN:
+                self._log(
+                    u"The old status had a quality in it, using that: " + common.Quality.qualityStrings[ep_quality],
+                    logger.DEBUG)
+                return ep_quality
+
+        # nzb name is the most reliable if it exists, followed by folder name and lastly file name
+        name_list = [self.nzb_name, self.folder_name, self.file_name]
+
+        # search all possible names for our new quality, in case the file or dir doesn't have it
+        for cur_name in name_list:
+
+            # some stuff might be None at this point still
+            if not cur_name:
+                continue
+
+            ep_quality = common.Quality.nameQuality(cur_name, ep_obj.show.is_anime)
+            self._log(
+                u"Looking up quality for name " + cur_name + u", got " + common.Quality.qualityStrings[ep_quality],
+                logger.DEBUG)
+
+            # if we find a good one then use it
+            if ep_quality != common.Quality.UNKNOWN:
+                logger.log(cur_name + u" looks like it has quality " + common.Quality.qualityStrings[
+                    ep_quality] + ", using that", logger.DEBUG)
+                return ep_quality
+
+        # Try getting quality from the episode (snatched) status
+        if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER:
+            oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)  # @UnusedVariable
+            if ep_quality != common.Quality.UNKNOWN:
+                self._log(
+                    u"The old status had a quality in it, using that: " + common.Quality.qualityStrings[ep_quality],
+                    logger.DEBUG)
+                return ep_quality
+
+        # Try guessing quality from the file name
+        ep_quality = common.Quality.assumeQuality(self.file_name)
+        self._log(
+            u"Guessing quality for name " + self.file_name + u", got " + common.Quality.qualityStrings[ep_quality],
+            logger.DEBUG)
+        if ep_quality != common.Quality.UNKNOWN:
+            logger.log(self.file_name + u" looks like it has quality " + common.Quality.qualityStrings[
+                ep_quality] + ", using that", logger.DEBUG)
+            return ep_quality
+
+        test = str(ep_quality)
+        return ep_quality
+
+    def _run_extra_scripts(self, ep_obj):
+        """
+        Executes any extra scripts defined in the config.
+
+        ep_obj: The object to use when calling the extra script
+        """
+        for curScriptName in sickbeard.EXTRA_SCRIPTS:
+
+            # generate a safe command line string to execute the script and provide all the parameters
+            script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", curScriptName) if piece.strip()]
+            script_cmd[0] = ek.ek(os.path.abspath, script_cmd[0])
+            self._log(u"Absolute path to script: " + script_cmd[0], logger.DEBUG)
+
+            script_cmd = script_cmd + [ep_obj.location, self.file_path, str(ep_obj.show.indexerid), str(ep_obj.season),
+                                       str(ep_obj.episode), str(ep_obj.airdate)]
+
+            # use subprocess to run the command and capture output
+            self._log(u"Executing command " + str(script_cmd))
+            try:
+                p = subprocess.Popen(script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+                                     stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR)
+                out, err = p.communicate()  # @UnusedVariable
+                self._log(u"Script result: " + str(out), logger.DEBUG)
+
+            except OSError, e:
+                self._log(u"Unable to run extra_script: " + ex(e))
+
+            except Exception, e:
+                self._log(u"Unable to run extra_script: " + ex(e))
+
+    def _is_priority(self, ep_obj, new_ep_quality):
+        """
+        Determines if the episode is a priority download or not (if it is expected). Episodes which are expected
+        (snatched) or larger than the existing episode are priority, others are not.
+
+        ep_obj: The TVEpisode object in question
+        new_ep_quality: The quality of the episode that is being processed
+
+        Returns: True if the episode is priority, False otherwise.
+        """
+
+        if self.is_priority:
+            return True
+
+        old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)
+
+        # if SB downloaded this on purpose we likely have a priority download
+        if self.in_history or ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:
+            # if the episode is still in a snatched status, then we can assume we want this
+            if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:
+                self._log(u"SB snatched this episode and it is not processed before", logger.DEBUG)
+                return True
+            # if it's not snatched, we only want it if the new quality is higher or if it's a proper of equal or higher quality
+            if new_ep_quality > old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
+                self._log(u"SB snatched this episode and it is a higher quality so I'm marking it as priority", logger.DEBUG)
+                return True
+            if self.is_proper and new_ep_quality >= old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
+                self._log(u"SB snatched this episode and it is a proper of equal or higher quality so I'm marking it as priority", logger.DEBUG)
+                return True
+            return False
+            
+        # if the user downloaded it manually and it's higher quality than the existing episode then it's priority
+        if new_ep_quality > old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
+            self._log(
+                u"This was manually downloaded but it appears to be better quality than what we have so I'm marking it as priority",
+                logger.DEBUG)
+            return True
+
+        # if the user downloaded it manually and it appears to be a PROPER/REPACK then it's priority
+        if self.is_proper and new_ep_quality >= old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:
+            self._log(u"This was manually downloaded but it appears to be a proper so I'm marking it as priority",
+                      logger.DEBUG)
+            return True
+
+        return False
+
+    def process(self):
+        """
+        Post-process a given file
+        """
+
+        self._log(u"Processing " + self.file_path + " (" + str(self.nzb_name) + ")")
+
+        if ek.ek(os.path.isdir, self.file_path):
+            self._log(u"File " + self.file_path + " seems to be a directory")
+            return False
+
+        for ignore_file in self.IGNORED_FILESTRINGS:
+            if ignore_file in self.file_path:
+                self._log(u"File " + self.file_path + " is ignored type, skipping")
+                return False
+
+        # reset per-file stuff
+        self.in_history = False
+
+        # reset the anidb episode object
+        self.anidbEpisode = None
+
+        # try to find the file info
+        (show, season, episodes, quality, version) = self._find_info()
+        if not show:
+            self._log(u"This show isn't in your list, you need to add it to SB before post-processing an episode",
+                      logger.WARNING)
+            raise exceptions.PostProcessingFailed()
+        elif season == None or not episodes:
+            self._log(u"Not enough information to determine what episode this is", logger.DEBUG)
+            self._log(u"Quitting post-processing", logger.DEBUG)
+            return False
+
+        # retrieve/create the corresponding TVEpisode objects
+        ep_obj = self._get_ep_obj(show, season, episodes)
+        old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)
+
+        # get the quality of the episode we're processing
+        if quality:
+            self._log(u"Snatch history had a quality in it, using that: " + common.Quality.qualityStrings[quality],
+                      logger.DEBUG)
+            new_ep_quality = quality
+        else:
+            new_ep_quality = self._get_quality(ep_obj)
+
+        logger.log(u"Quality of the episode we're processing: " + str(new_ep_quality), logger.DEBUG)
+
+        # see if this is a priority download (is it snatched, in history, PROPER, or BEST)
+        priority_download = self._is_priority(ep_obj, new_ep_quality)
+        self._log(u"Is ep a priority download: " + str(priority_download), logger.DEBUG)
+
+        # get the version of the episode we're processing
+        if version:
+            self._log(u"Snatch history had a version in it, using that: v" + str(version),
+                      logger.DEBUG)
+            new_ep_version = version
+        else:
+            new_ep_version = -1
+
+        # check for an existing file
+        existing_file_status = self._checkForExistingFile(ep_obj.location)
+
+        # if it's not priority then we don't want to replace smaller files in case it was a mistake
+        if not priority_download:
+
+            # Not a priority and the quality is lower than what we already have
+            if (new_ep_quality < old_ep_quality and new_ep_quality != common.Quality.UNKNOWN) and not existing_file_status == PostProcessor.DOESNT_EXIST:
+                self._log(u"File exists and new file quality is lower than existing, marking it unsafe to replace", logger.DEBUG)
+                return False
+
+            # if there's an existing file that we don't want to replace stop here
+            if existing_file_status == PostProcessor.EXISTS_LARGER:
+                if self.is_proper:
+                    self._log(
+                        u"File exists and new file is smaller, new file is a proper/repack, marking it safe to replace",
+                        logger.DEBUG)
+                    return True
+
+                else:
+                    self._log(u"File exists and new file is smaller, marking it unsafe to replace", logger.DEBUG)
+                    return False
+
+            elif existing_file_status == PostProcessor.EXISTS_SAME:
+                self._log(u"File exists and new file is same size, marking it unsafe to replace", logger.DEBUG)
+                return False
+
+        # if the file is priority then we're going to replace it even if it exists
+        else:
+            self._log(
+                u"This download is marked a priority download so I'm going to replace an existing file if I find one",
+                logger.DEBUG)
+        
+        # try to find out if we have enough space to perform the copy or move action.
+        if not verify_freespace(self.file_path, ek.ek(os.path.dirname, ep_obj.show._location), [ep_obj] + ep_obj.relatedEps):
+            self._log("Not enough space to continue PP, exiting")
+            return False
+
+        # delete the existing file (and company)
+        for cur_ep in [ep_obj] + ep_obj.relatedEps:
+            try:
+                self._delete(cur_ep.location, associated_files=True)
+
+                # clean up any left over folders
+                if cur_ep.location:
+                    helpers.delete_empty_folders(ek.ek(os.path.dirname, cur_ep.location),
+                                                 keep_dir=ep_obj.show._location)
+            except (OSError, IOError):
+                raise exceptions.PostProcessingFailed("Unable to delete the existing files")
+
+            # set the status of the episodes
+            # for curEp in [ep_obj] + ep_obj.relatedEps:
+            #    curEp.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality)
+
+        # if the show directory doesn't exist then make it if allowed
+        if not ek.ek(os.path.isdir, ep_obj.show._location) and sickbeard.CREATE_MISSING_SHOW_DIRS:
+            self._log(u"Show directory doesn't exist, creating it", logger.DEBUG)
+            try:
+                ek.ek(os.mkdir, ep_obj.show._location)
+                # do the library update for synoindex
+                notifiers.synoindex_notifier.addFolder(ep_obj.show._location)
+            except (OSError, IOError):
+                raise exceptions.PostProcessingFailed("Unable to create the show directory: " + ep_obj.show._location)
+
+            # get metadata for the show (but not episode because it hasn't been fully processed)
+            ep_obj.show.writeMetadata(True)
+
+        # update the ep info before we rename so the quality & release name go into the name properly
+        sql_l = []
+        trakt_data = [] 
+        for cur_ep in [ep_obj] + ep_obj.relatedEps:
+            with cur_ep.lock:
+
+                if self.release_name:
+                    self._log("Found release name " + self.release_name, logger.DEBUG)
+                    cur_ep.release_name = self.release_name
+                else:
+                    cur_ep.release_name = ""
+
+                if ep_obj.status in common.Quality.SNATCHED_BEST:
+                    cur_ep.status = common.Quality.compositeStatus(common.ARCHIVED, new_ep_quality)
+                else:
+                    cur_ep.status = common.Quality.compositeStatus(common.DOWNLOADED, new_ep_quality)
+
+                cur_ep.subtitles = []
+
+                cur_ep.subtitles_searchcount = 0
+
+                cur_ep.subtitles_lastsearch = '0001-01-01 00:00:00'
+
+                cur_ep.is_proper = self.is_proper
+
+                cur_ep.version = new_ep_version
+
+                if self.release_group:
+                    cur_ep.release_group = self.release_group
+                else:
+                    cur_ep.release_group = ""
+
+                sql_l.append(cur_ep.get_sql())
+
+                trakt_data.append((cur_ep.season, cur_ep.episode))
+
+        data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
+
+        if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST and sickbeard.TRAKT_REMOVE_WATCHLIST:
+            logger.log(u"Remove episodes, showid: indexerid " + str(show.indexerid) + ", Title " + str(show.name) + " to Traktv Watchlist", logger.DEBUG)
+            if data:
+                notifiers.trakt_notifier.update_watchlist(show, data_episode=data, update="remove")
+
+        # Just want to keep this consistent for failed handling right now
+        releaseName = show_name_helpers.determineReleaseName(self.folder_path, self.nzb_name)
+        if releaseName is not None:
+            failed_history.logSuccess(releaseName)
+        else:
+            self._log(u"Couldn't find release in snatch history", logger.WARNING)
+
+        # find the destination folder
+        try:
+            proper_path = ep_obj.proper_path()
+            proper_absolute_path = ek.ek(os.path.join, ep_obj.show.location, proper_path)
+
+            dest_path = ek.ek(os.path.dirname, proper_absolute_path)
+        except exceptions.ShowDirNotFoundException:
+            raise exceptions.PostProcessingFailed(
+                u"Unable to post-process an episode if the show dir doesn't exist, quitting")
+
+        self._log(u"Destination folder for this episode: " + dest_path, logger.DEBUG)
+
+        # create any folders we need
+        helpers.make_dirs(dest_path)
+
+        # figure out the base name of the resulting episode file
+        if sickbeard.RENAME_EPISODES:
+            orig_extension = self.file_name.rpartition('.')[-1]
+            new_base_name = ek.ek(os.path.basename, proper_path)
+            new_file_name = new_base_name + '.' + orig_extension
+
+        else:
+            # if we're not renaming then there's no new base name, we'll just use the existing name
+            new_base_name = None
+            new_file_name = self.file_name
+
+        # add to anidb
+        if ep_obj.show.is_anime and sickbeard.ANIDB_USE_MYLIST:
+            self._add_to_anidb_mylist(self.file_path)
+        
+        try:
+            # move the episode and associated files to the show dir
+            if self.process_method == "copy":
+                self._copy(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
+                           sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
+            elif self.process_method == "move":
+                self._move(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
+                           sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
+            elif self.process_method == "hardlink":
+                self._hardlink(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
+                               sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
+            elif self.process_method == "symlink":
+                self._moveAndSymlink(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES,
+                                     sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
+            else:
+                logger.log(u"Unknown process method: " + str(self.process_method), logger.ERROR)
+                raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
+        except (OSError, IOError):
+            raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
+                
+        # download subtitles
+        if sickbeard.USE_SUBTITLES and ep_obj.show.subtitles:
+            for cur_ep in [ep_obj] + ep_obj.relatedEps:
+                with cur_ep.lock:
+                    cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name)
+                    cur_ep.downloadSubtitles(force=True)
+
+        # now that processing has finished, we can put the info in the DB. If we do it earlier, then when processing fails, it won't try again.
+        if len(sql_l) > 0:
+            myDB = db.DBConnection()
+            myDB.mass_action(sql_l)
+
+        # put the new location in the database
+        sql_l = []
+        for cur_ep in [ep_obj] + ep_obj.relatedEps:
+            with cur_ep.lock:
+                cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name)
+                sql_l.append(cur_ep.get_sql())
+
+        if len(sql_l) > 0:
+            myDB = db.DBConnection()
+            myDB.mass_action(sql_l)
+
+        # set file modify stamp to show airdate
+        if sickbeard.AIRDATE_EPISODES:
+            for cur_ep in [ep_obj] + ep_obj.relatedEps:
+                with cur_ep.lock:
+                    cur_ep.airdateModifyStamp()
+
+        # generate nfo/tbn
+        ep_obj.createMetaFiles()
+
+        # log it to history
+        history.logDownload(ep_obj, self.file_path, new_ep_quality, self.release_group, new_ep_version)
+
+        # send notifications
+        notifiers.notify_download(ep_obj._format_pattern('%SN - %Sx%0E - %EN - %QN'))
+
+        # do the library update for KODI
+        notifiers.kodi_notifier.update_library(ep_obj.show.name)
+
+        # do the library update for Plex
+        notifiers.plex_notifier.update_library()
+
+        # do the library update for NMJ
+        # nmj_notifier kicks off its library update when the notify_download is issued (inside notifiers)
+
+        # do the library update for Synology Indexer
+        notifiers.synoindex_notifier.addFile(ep_obj.location)
+
+        # do the library update for pyTivo
+        notifiers.pytivo_notifier.update_library(ep_obj)
+
+        # do the library update for Trakt
+        notifiers.trakt_notifier.update_library(ep_obj)
+
+        self._run_extra_scripts(ep_obj)
+
+        return True
diff --git a/sickbeard/properFinder.py b/sickbeard/properFinder.py
index 9319d7eecf4476f3cd1a13c99115108ceeda6107..aff484a1ddeecedb5b1d8ed0a72fe7d843568271 100644
--- a/sickbeard/properFinder.py
+++ b/sickbeard/properFinder.py
@@ -1,277 +1,277 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import datetime
-import operator
-import threading
-import traceback
-from search import pickBestResult
-
-import sickbeard
-
-from sickbeard import db
-from sickbeard import exceptions
-from sickbeard.exceptions import ex
-from sickbeard import helpers, logger
-from sickbeard import search
-from sickbeard import history
-
-from sickbeard.common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, Quality
-
-from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-
-
-class ProperFinder():
-    def __init__(self):
-        self.amActive = False
-
-    def run(self, force=False):
-
-        if not sickbeard.DOWNLOAD_PROPERS:
-            return
-
-        logger.log(u"Beginning the search for new propers")
-
-        self.amActive = True
-
-        propers = self._getProperList()
-
-        if propers:
-            self._downloadPropers(propers)
-
-        self._set_lastProperSearch(datetime.datetime.today().toordinal())
-
-        run_at = ""
-        if None is sickbeard.properFinderScheduler.start_time:
-            run_in = sickbeard.properFinderScheduler.lastRun + sickbeard.properFinderScheduler.cycleTime - datetime.datetime.now()
-            hours, remainder = divmod(run_in.seconds, 3600)
-            minutes, seconds = divmod(remainder, 60)
-            run_at = u", next check in approx. " + (
-                "%dh, %dm" % (hours, minutes) if 0 < hours else "%dm, %ds" % (minutes, seconds))
-
-        logger.log(u"Completed the search for new propers%s" % run_at)
-
-        self.amActive = False
-
-    def _getProperList(self):
-        propers = {}
-
-        search_date = datetime.datetime.today() - datetime.timedelta(days=2)
-
-        # for each provider get a list of the
-        origThreadName = threading.currentThread().name
-        providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive()]
-        for curProvider in providers:
-            threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
-
-            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
-
-            try:
-                curPropers = curProvider.findPropers(search_date)
-            except exceptions.AuthException, e:
-                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
-                continue
-            except Exception, e:
-                logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
-                logger.log(traceback.format_exc(), logger.DEBUG)
-                continue
-            finally:
-                threading.currentThread().name = origThreadName
-
-            # if they haven't been added by a different provider than add the proper to the list
-            for x in curPropers:
-                name = self._genericName(x.name)
-                if not name in propers:
-                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
-                    x.provider = curProvider
-                    propers[name] = x
-
-        # take the list of unique propers and get it sorted by
-        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
-        finalPropers = []
-
-        for curProper in sortedPropers:
-
-            try:
-                myParser = NameParser(False)
-                parse_result = myParser.parse(curProper.name)
-            except InvalidNameException:
-                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
-                continue
-            except InvalidShowException:
-                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid show", logger.DEBUG)
-                continue
-
-            if not parse_result.series_name:
-                continue
-
-            if not parse_result.episode_numbers:
-                logger.log(
-                    u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode",
-                    logger.DEBUG)
-                continue
-
-            logger.log(
-                u"Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name,
-                logger.DEBUG)
-
-            # set the indexerid in the db to the show's indexerid
-            curProper.indexerid = parse_result.show.indexerid
-
-            # set the indexer in the db to the show's indexer
-            curProper.indexer = parse_result.show.indexer
-
-            # populate our Proper instance
-            curProper.show = parse_result.show
-            curProper.season = parse_result.season_number if parse_result.season_number is not None else 1
-            curProper.episode = parse_result.episode_numbers[0]
-            curProper.release_group = parse_result.release_group
-            curProper.version = parse_result.version
-            curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
-            curProper.content = None
-
-            # filter release
-            bestResult = pickBestResult(curProper, parse_result.show)
-            if not bestResult:
-                logger.log(u"Proper " + curProper.name + " were rejected by our release filters.", logger.DEBUG)
-                continue
-
-            # only get anime proper if it has release group and version
-            if bestResult.show.is_anime:
-                if not bestResult.release_group and bestResult.version == -1:
-                    logger.log(u"Proper " + bestResult.name + " doesn't have a release group and version, ignoring it",
-                               logger.DEBUG)
-                    continue
-
-            # check if we actually want this proper (if it's the right quality)
-            myDB = db.DBConnection()
-            sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
-                                     [bestResult.indexerid, bestResult.season, bestResult.episode])
-            if not sqlResults:
-                continue
-
-            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
-            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))
-            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != bestResult.quality:
-                continue
-
-            # check if we actually want this proper (if it's the right release group and a higher version)
-            if bestResult.show.is_anime:
-                myDB = db.DBConnection()
-                sqlResults = myDB.select(
-                    "SELECT release_group, version FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
-                    [bestResult.indexerid, bestResult.season, bestResult.episode])
-
-                oldVersion = int(sqlResults[0]["version"])
-                oldRelease_group = (sqlResults[0]["release_group"])
-
-                if oldVersion > -1 and oldVersion < bestResult.version:
-                    logger.log("Found new anime v" + str(bestResult.version) + " to replace existing v" + str(oldVersion))
-                else:
-                    continue
-
-                if oldRelease_group != bestResult.release_group:
-                    logger.log("Skipping proper from release group: " + bestResult.release_group + ", does not match existing release group: " + oldRelease_group)
-                    continue
-
-            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
-            if bestResult.indexerid != -1 and (bestResult.indexerid, bestResult.season, bestResult.episode) not in map(
-                    operator.attrgetter('indexerid', 'season', 'episode'), finalPropers):
-                logger.log(u"Found a proper that we need: " + str(bestResult.name))
-                finalPropers.append(bestResult)
-
-        return finalPropers
-
-    def _downloadPropers(self, properList):
-
-        for curProper in properList:
-
-            historyLimit = datetime.datetime.today() - datetime.timedelta(days=30)
-
-            # make sure the episode has been downloaded before
-            myDB = db.DBConnection()
-            historyResults = myDB.select(
-                "SELECT resource FROM history " +
-                "WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? " +
-                "AND action IN (" + ",".join([str(x) for x in Quality.SNATCHED]) + ")",
-                [curProper.indexerid, curProper.season, curProper.episode, curProper.quality,
-                 historyLimit.strftime(history.dateFormat)])
-
-            # if we didn't download this episode in the first place we don't know what quality to use for the proper so we can't do it
-            if len(historyResults) == 0:
-                logger.log(
-                    u"Unable to find an original history entry for proper " + curProper.name + " so I'm not downloading it.")
-                continue
-
-            else:
-
-                # make sure that none of the existing history downloads are the same proper we're trying to download
-                clean_proper_name = self._genericName(helpers.remove_non_release_groups(curProper.name))
-                isSame = False
-                for curResult in historyResults:
-                    # if the result exists in history already we need to skip it
-                    if self._genericName(helpers.remove_non_release_groups(curResult["resource"])) == clean_proper_name:
-                        isSame = True
-                        break
-                if isSame:
-                    logger.log(u"This proper is already in history, skipping it", logger.DEBUG)
-                    continue
-
-                # get the episode object
-                epObj = curProper.show.getEpisode(curProper.season, curProper.episode)
-
-                # make the result object
-                result = curProper.provider.getResult([epObj])
-                result.show = curProper.show
-                result.url = curProper.url
-                result.name = curProper.name
-                result.quality = curProper.quality
-                result.release_group = curProper.release_group
-                result.version = curProper.version
-                result.content = curProper.content
-
-                # snatch it
-                search.snatchEpisode(result, SNATCHED_PROPER)
-
-    def _genericName(self, name):
-        return name.replace(".", " ").replace("-", " ").replace("_", " ").lower()
-
-    def _set_lastProperSearch(self, when):
-
-        logger.log(u"Setting the last Proper search in the DB to " + str(when), logger.DEBUG)
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select("SELECT * FROM info")
-
-        if len(sqlResults) == 0:
-            myDB.action("INSERT INTO info (last_backlog, last_indexer, last_proper_search) VALUES (?,?,?)",
-                        [0, 0, str(when)])
-        else:
-            myDB.action("UPDATE info SET last_proper_search=" + str(when))
-
-    def _get_lastProperSearch(self):
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select("SELECT * FROM info")
-
-        try:
-            last_proper_search = datetime.date.fromordinal(int(sqlResults[0]["last_proper_search"]))
-        except:
-            return datetime.date.fromordinal(1)
-
-        return last_proper_search
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+import operator
+import threading
+import traceback
+from search import pickBestResult
+
+import sickbeard
+
+from sickbeard import db
+from sickbeard import exceptions
+from sickbeard.exceptions import ex
+from sickbeard import helpers, logger
+from sickbeard import search
+from sickbeard import history
+
+from sickbeard.common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, Quality
+
+from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+
+
+class ProperFinder():
+    def __init__(self):
+        self.amActive = False
+
+    def run(self, force=False):
+
+        if not sickbeard.DOWNLOAD_PROPERS:
+            return
+
+        logger.log(u"Beginning the search for new propers")
+
+        self.amActive = True
+
+        propers = self._getProperList()
+
+        if propers:
+            self._downloadPropers(propers)
+
+        self._set_lastProperSearch(datetime.datetime.today().toordinal())
+
+        run_at = ""
+        if None is sickbeard.properFinderScheduler.start_time:
+            run_in = sickbeard.properFinderScheduler.lastRun + sickbeard.properFinderScheduler.cycleTime - datetime.datetime.now()
+            hours, remainder = divmod(run_in.seconds, 3600)
+            minutes, seconds = divmod(remainder, 60)
+            run_at = u", next check in approx. " + (
+                "%dh, %dm" % (hours, minutes) if 0 < hours else "%dm, %ds" % (minutes, seconds))
+
+        logger.log(u"Completed the search for new propers%s" % run_at)
+
+        self.amActive = False
+
+    def _getProperList(self):
+        propers = {}
+
+        search_date = datetime.datetime.today() - datetime.timedelta(days=2)
+
+        # for each provider get a list of the
+        origThreadName = threading.currentThread().name
+        providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive()]
+        for curProvider in providers:
+            threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
+
+            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
+
+            try:
+                curPropers = curProvider.findPropers(search_date)
+            except exceptions.AuthException, e:
+                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
+                continue
+            except Exception, e:
+                logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
+                logger.log(traceback.format_exc(), logger.DEBUG)
+                continue
+            finally:
+                threading.currentThread().name = origThreadName
+
+            # if they haven't been added by a different provider than add the proper to the list
+            for x in curPropers:
+                name = self._genericName(x.name)
+                if not name in propers:
+                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
+                    x.provider = curProvider
+                    propers[name] = x
+
+        # take the list of unique propers and get it sorted by
+        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
+        finalPropers = []
+
+        for curProper in sortedPropers:
+
+            try:
+                myParser = NameParser(False)
+                parse_result = myParser.parse(curProper.name)
+            except InvalidNameException:
+                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
+                continue
+            except InvalidShowException:
+                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid show", logger.DEBUG)
+                continue
+
+            if not parse_result.series_name:
+                continue
+
+            if not parse_result.episode_numbers:
+                logger.log(
+                    u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode",
+                    logger.DEBUG)
+                continue
+
+            logger.log(
+                u"Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name,
+                logger.DEBUG)
+
+            # set the indexerid in the db to the show's indexerid
+            curProper.indexerid = parse_result.show.indexerid
+
+            # set the indexer in the db to the show's indexer
+            curProper.indexer = parse_result.show.indexer
+
+            # populate our Proper instance
+            curProper.show = parse_result.show
+            curProper.season = parse_result.season_number if parse_result.season_number is not None else 1
+            curProper.episode = parse_result.episode_numbers[0]
+            curProper.release_group = parse_result.release_group
+            curProper.version = parse_result.version
+            curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
+            curProper.content = None
+
+            # filter release
+            bestResult = pickBestResult(curProper, parse_result.show)
+            if not bestResult:
+                logger.log(u"Proper " + curProper.name + " were rejected by our release filters.", logger.DEBUG)
+                continue
+
+            # only get anime proper if it has release group and version
+            if bestResult.show.is_anime:
+                if not bestResult.release_group and bestResult.version == -1:
+                    logger.log(u"Proper " + bestResult.name + " doesn't have a release group and version, ignoring it",
+                               logger.DEBUG)
+                    continue
+
+            # check if we actually want this proper (if it's the right quality)
+            myDB = db.DBConnection()
+            sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
+                                     [bestResult.indexerid, bestResult.season, bestResult.episode])
+            if not sqlResults:
+                continue
+
+            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
+            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))
+            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != bestResult.quality:
+                continue
+
+            # check if we actually want this proper (if it's the right release group and a higher version)
+            if bestResult.show.is_anime:
+                myDB = db.DBConnection()
+                sqlResults = myDB.select(
+                    "SELECT release_group, version FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
+                    [bestResult.indexerid, bestResult.season, bestResult.episode])
+
+                oldVersion = int(sqlResults[0]["version"])
+                oldRelease_group = (sqlResults[0]["release_group"])
+
+                if oldVersion > -1 and oldVersion < bestResult.version:
+                    logger.log("Found new anime v" + str(bestResult.version) + " to replace existing v" + str(oldVersion))
+                else:
+                    continue
+
+                if oldRelease_group != bestResult.release_group:
+                    logger.log("Skipping proper from release group: " + bestResult.release_group + ", does not match existing release group: " + oldRelease_group)
+                    continue
+
+            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
+            if bestResult.indexerid != -1 and (bestResult.indexerid, bestResult.season, bestResult.episode) not in map(
+                    operator.attrgetter('indexerid', 'season', 'episode'), finalPropers):
+                logger.log(u"Found a proper that we need: " + str(bestResult.name))
+                finalPropers.append(bestResult)
+
+        return finalPropers
+
+    def _downloadPropers(self, properList):
+
+        for curProper in properList:
+
+            historyLimit = datetime.datetime.today() - datetime.timedelta(days=30)
+
+            # make sure the episode has been downloaded before
+            myDB = db.DBConnection()
+            historyResults = myDB.select(
+                "SELECT resource FROM history " +
+                "WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? " +
+                "AND action IN (" + ",".join([str(x) for x in Quality.SNATCHED]) + ")",
+                [curProper.indexerid, curProper.season, curProper.episode, curProper.quality,
+                 historyLimit.strftime(history.dateFormat)])
+
+            # if we didn't download this episode in the first place we don't know what quality to use for the proper so we can't do it
+            if len(historyResults) == 0:
+                logger.log(
+                    u"Unable to find an original history entry for proper " + curProper.name + " so I'm not downloading it.")
+                continue
+
+            else:
+
+                # make sure that none of the existing history downloads are the same proper we're trying to download
+                clean_proper_name = self._genericName(helpers.remove_non_release_groups(curProper.name))
+                isSame = False
+                for curResult in historyResults:
+                    # if the result exists in history already we need to skip it
+                    if self._genericName(helpers.remove_non_release_groups(curResult["resource"])) == clean_proper_name:
+                        isSame = True
+                        break
+                if isSame:
+                    logger.log(u"This proper is already in history, skipping it", logger.DEBUG)
+                    continue
+
+                # get the episode object
+                epObj = curProper.show.getEpisode(curProper.season, curProper.episode)
+
+                # make the result object
+                result = curProper.provider.getResult([epObj])
+                result.show = curProper.show
+                result.url = curProper.url
+                result.name = curProper.name
+                result.quality = curProper.quality
+                result.release_group = curProper.release_group
+                result.version = curProper.version
+                result.content = curProper.content
+
+                # snatch it
+                search.snatchEpisode(result, SNATCHED_PROPER)
+
+    def _genericName(self, name):
+        return name.replace(".", " ").replace("-", " ").replace("_", " ").lower()
+
+    def _set_lastProperSearch(self, when):
+
+        logger.log(u"Setting the last Proper search in the DB to " + str(when), logger.DEBUG)
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select("SELECT * FROM info")
+
+        if len(sqlResults) == 0:
+            myDB.action("INSERT INTO info (last_backlog, last_indexer, last_proper_search) VALUES (?,?,?)",
+                        [0, 0, str(when)])
+        else:
+            myDB.action("UPDATE info SET last_proper_search=" + str(when))
+
+    def _get_lastProperSearch(self):
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select("SELECT * FROM info")
+
+        try:
+            last_proper_search = datetime.date.fromordinal(int(sqlResults[0]["last_proper_search"]))
+        except:
+            return datetime.date.fromordinal(1)
+
+        return last_proper_search
diff --git a/sickbeard/providers/__init__.py b/sickbeard/providers/__init__.py
index de63e1d82d48d5ae36349b6a15587cd0757a9bab..02f150c0bf4b584a01ea64ad2913fe6384c33b96 100755
--- a/sickbeard/providers/__init__.py
+++ b/sickbeard/providers/__init__.py
@@ -46,6 +46,7 @@ __all__ = ['ezrss',
            'rarbg',
            'tntvillage',
            'binsearch',
+           'eztv',
 ]
 
 import sickbeard
diff --git a/sickbeard/providers/btn.py b/sickbeard/providers/btn.py
index 3c1a44509eb086eba7829933b1adb7412e6d5ac5..9e929eb7ed2c2258cd26d7528df6121b1ef00a88 100644
--- a/sickbeard/providers/btn.py
+++ b/sickbeard/providers/btn.py
@@ -1,523 +1,523 @@
-# coding=utf-8
-# Author: Daniel Heimans
-# URL: http://code.google.com/p/sickbeard
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import time
-import socket
-import math
-import sickbeard
-import generic
-import itertools
-
-from sickbeard import classes
-from sickbeard import scene_exceptions
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard.helpers import sanitizeSceneName
-from sickbeard.exceptions import ex, AuthException
-from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
-from sickbeard import db
-from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-from sickbeard.common import Quality
-
-from lib import jsonrpclib
-from datetime import datetime
-
-
-class BTNProvider(generic.TorrentProvider):
-    def __init__(self):
-        generic.TorrentProvider.__init__(self, "BTN")
-
-        self.supportsBacklog = True
-        self.supportsAbsoluteNumbering = True
-
-        self.enabled = False
-        self.api_key = None
-        self.ratio = None
-
-        self.cache = BTNCache(self)
-
-        self.urls = {'base_url': "http://api.btnapps.net"}
-
-
-        self.url = self.urls['base_url']
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'btn.png'
-
-    def _checkAuth(self):
-        if not self.api_key:
-            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
-
-        return True
-
-    def _checkAuthFromData(self, parsedJSON):
-
-        if parsedJSON is None:
-            return self._checkAuth()
-
-        if 'api-error' in parsedJSON:
-            logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['api-error'],
-                       logger.DEBUG)
-            raise AuthException(
-                "Your authentication credentials for " + self.name + " are incorrect, check your config.")
-
-        return True
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        self._checkAuth()
-
-        results = []
-        params = {}
-        apikey = self.api_key
-
-        # age in seconds
-        if age:
-            params['age'] = "<=" + str(int(age))
-
-        if search_params:
-            params.update(search_params)
-
-        parsedJSON = self._api_call(apikey, params)
-        if not parsedJSON:
-            logger.log(u"No data returned from " + self.name, logger.ERROR)
-            return results
-
-        if self._checkAuthFromData(parsedJSON):
-
-            if 'torrents' in parsedJSON:
-                found_torrents = parsedJSON['torrents']
-            else:
-                found_torrents = {}
-
-            # We got something, we know the API sends max 1000 results at a time.
-            # See if there are more than 1000 results for our query, if not we
-            # keep requesting until we've got everything.
-            # max 150 requests per hour so limit at that. Scan every 15 minutes. 60 / 15 = 4.
-            max_pages = 150
-            results_per_page = 1000
-
-            if 'results' in parsedJSON and int(parsedJSON['results']) >= results_per_page:
-                pages_needed = int(math.ceil(int(parsedJSON['results']) / results_per_page))
-                if pages_needed > max_pages:
-                    pages_needed = max_pages
-
-                # +1 because range(1,4) = 1, 2, 3
-                for page in range(1, pages_needed + 1):
-                    parsedJSON = self._api_call(apikey, params, results_per_page, page * results_per_page)
-                    # Note that this these are individual requests and might time out individually. This would result in 'gaps'
-                    # in the results. There is no way to fix this though.
-                    if 'torrents' in parsedJSON:
-                        found_torrents.update(parsedJSON['torrents'])
-
-            for torrentid, torrent_info in found_torrents.iteritems():
-                (title, url) = self._get_title_and_url(torrent_info)
-
-                if title and url:
-                    results.append(torrent_info)
-
-        return results
-
-    def _api_call(self, apikey, params={}, results_per_page=1000, offset=0):
-
-        server = jsonrpclib.Server(self.url)
-        parsedJSON = {}
-
-        try:
-            parsedJSON = server.getTorrents(apikey, params, int(results_per_page), int(offset))
-
-        except jsonrpclib.jsonrpc.ProtocolError, error:
-            logger.log(u"JSON-RPC protocol error while accessing " + self.name + ": " + ex(error), logger.ERROR)
-            parsedJSON = {'api-error': ex(error)}
-            return parsedJSON
-
-        except socket.timeout:
-            logger.log(u"Timeout while accessing " + self.name, logger.WARNING)
-
-        except socket.error, error:
-            # Note that sometimes timeouts are thrown as socket errors
-            logger.log(u"Socket error while accessing " + self.name + ": " + error[1], logger.ERROR)
-
-        except Exception, error:
-            errorstring = str(error)
-            if (errorstring.startswith('<') and errorstring.endswith('>')):
-                errorstring = errorstring[1:-1]
-            logger.log(u"Unknown error while accessing " + self.name + ": " + errorstring, logger.ERROR)
-
-        return parsedJSON
-
-    def _get_title_and_url(self, parsedJSON):
-
-        # The BTN API gives a lot of information in response,
-        # however SickRage is built mostly around Scene or
-        # release names, which is why we are using them here.
-
-        if 'ReleaseName' in parsedJSON and parsedJSON['ReleaseName']:
-            title = parsedJSON['ReleaseName']
-
-        else:
-            # If we don't have a release name we need to get creative
-            title = u''
-            if 'Series' in parsedJSON:
-                title += parsedJSON['Series']
-            if 'GroupName' in parsedJSON:
-                title += '.' + parsedJSON['GroupName'] if title else parsedJSON['GroupName']
-            if 'Resolution' in parsedJSON:
-                title += '.' + parsedJSON['Resolution'] if title else parsedJSON['Resolution']
-            if 'Source' in parsedJSON:
-                title += '.' + parsedJSON['Source'] if title else parsedJSON['Source']
-            if 'Codec' in parsedJSON:
-                title += '.' + parsedJSON['Codec'] if title else parsedJSON['Codec']
-            if title:
-                title = title.replace(' ', '.')
-
-        url = None
-        if 'DownloadURL' in parsedJSON:
-            url = parsedJSON['DownloadURL']
-            if url:
-                # unescaped / is valid in JSON, but it can be escaped
-                url = url.replace("\\/", "/")
-
-        return (title, url)
-
-    def _get_season_search_strings(self, ep_obj):
-        search_params = []
-        current_params = {'category': 'Season'}
-
-        # Search for entire seasons: no need to do special things for air by date or sports shows
-        if ep_obj.show.air_by_date or ep_obj.show.sports:
-            # Search for the year of the air by date show
-            current_params['name'] = str(ep_obj.airdate).split('-')[0]
-        elif ep_obj.show.is_anime:
-            current_params['name'] = "%d" % ep_obj.scene_absolute_number
-        else:
-            current_params['name'] = 'Season ' + str(ep_obj.scene_season)
-
-        # search
-        if ep_obj.show.indexer == 1:
-            current_params['tvdb'] = ep_obj.show.indexerid
-            search_params.append(current_params)
-        elif ep_obj.show.indexer == 2:
-            current_params['tvrage'] = ep_obj.show.indexerid
-            search_params.append(current_params)
-        else:
-            name_exceptions = list(
-                set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
-            for name in name_exceptions:
-                # Search by name if we don't have tvdb or tvrage id
-                current_params['series'] = sanitizeSceneName(name)
-                search_params.append(current_params)
-
-        return search_params
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        if not ep_obj:
-            return [{}]
-
-        to_return = []
-        search_params = {'category': 'Episode'}
-
-        # episode
-        if ep_obj.show.air_by_date or ep_obj.show.sports:
-            date_str = str(ep_obj.airdate)
-
-            # BTN uses dots in dates, we just search for the date since that
-            # combined with the series identifier should result in just one episode
-            search_params['name'] = date_str.replace('-', '.')
-        elif ep_obj.show.anime:
-            search_params['name'] = "%i" % int(ep_obj.scene_absolute_number)
-        else:
-            # Do a general name search for the episode, formatted like SXXEYY
-            search_params['name'] = "S%02dE%02d" % (ep_obj.scene_season, ep_obj.scene_episode)
-
-        # search
-        if ep_obj.show.indexer == 1:
-            search_params['tvdb'] = ep_obj.show.indexerid
-            to_return.append(search_params)
-        elif ep_obj.show.indexer == 2:
-            search_params['tvrage'] = ep_obj.show.indexerid
-            to_return.append(search_params)
-        else:
-            # add new query string for every exception
-            name_exceptions = list(
-                set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
-            for cur_exception in name_exceptions:
-                search_params['series'] = sanitizeSceneName(cur_exception)
-                to_return.append(search_params)
-
-        return to_return
-
-    def _doGeneralSearch(self, search_string):
-        # 'search' looks as broad is it can find. Can contain episode overview and title for example,
-        # use with caution!
-        return self._doSearch({'search': search_string})
-
-    def findPropers(self, search_date=None):
-        results = []
-
-        search_terms = ['%.proper.%', '%.repack.%']
-
-        for term in search_terms:
-            for item in self._doSearch({'release': term}, age=4 * 24 * 60 * 60):
-                if item['Time']:
-                    try:
-                        result_date = datetime.fromtimestamp(float(item['Time']))
-                    except TypeError:
-                        result_date = None
-
-                    if result_date:
-                        if not search_date or result_date > search_date:
-                            title, url = self._get_title_and_url(item)
-                            results.append(classes.Proper(title, url, result_date, self.show))
-
-        return results
-
-    def seedRatio(self):
-        return self.ratio
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
-
-        self._checkAuth()
-        self.show = show
-
-        results = {}
-        itemList = []
-
-        searched_scene_season = None
-        for epObj in episodes:
-            # search cache for episode result
-            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
-            if cacheResult:
-                if epObj.episode not in results:
-                    results[epObj.episode] = cacheResult
-                else:
-                    results[epObj.episode].extend(cacheResult)
-
-                # found result, search next episode
-                continue
-
-            # skip if season already searched
-            if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
-                continue
-
-            # mark season searched for season pack searches so we can skip later on
-            searched_scene_season = epObj.scene_season
-
-            if search_mode == 'sponly':
-                # get season search results
-                for curString in self._get_season_search_strings(epObj):
-                    itemList += self._doSearch(curString, search_mode, len(episodes))
-            else:
-                # get single episode search results
-                for curString in self._get_episode_search_strings(epObj):
-                    itemList += self._doSearch(curString, search_mode, len(episodes))
-
-        # if we found what we needed already from cache then return results and exit
-        if len(results) == len(episodes):
-            return results
-
-        # sort list by quality
-        if len(itemList):
-            items = {}
-            itemsUnknown = []
-            for item in itemList:
-                quality = self.getQuality(item, anime=show.is_anime)
-                if quality == Quality.UNKNOWN:
-                    itemsUnknown += [item]
-                else:
-                    if quality not in items:
-                        items[quality] = [item]
-                    else:
-                        items[quality].append(item)
-
-            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
-            itemList += itemsUnknown if itemsUnknown else []
-
-        # filter results
-        cl = []
-        for item in itemList:
-            (title, url) = self._get_title_and_url(item)
-
-            # parse the file name
-            try:
-                myParser = NameParser(False, convert=True)
-                parse_result = myParser.parse(title)
-            except InvalidNameException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)  # @UndefinedVariable
-                continue
-            except InvalidShowException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
-                continue
-
-            showObj = parse_result.show
-            quality = parse_result.quality
-            release_group = parse_result.release_group
-            version = parse_result.version
-
-            addCacheEntry = False
-            if not (showObj.air_by_date or showObj.sports):
-                if search_mode == 'sponly': 
-                    if len(parse_result.episode_numbers):
-                        logger.log(
-                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    if len(parse_result.episode_numbers) and (
-                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
-                                                                                 ep.scene_episode in parse_result.episode_numbers]):
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                else:
-                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
-                                                                                                     episodes if
-                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
-                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    # we just use the existing info for normal searches
-                    actual_season = parse_result.season_number
-                    actual_episodes = parse_result.episode_numbers
-            else:
-                if not (parse_result.is_air_by_date):
-                    logger.log(
-                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
-                        logger.DEBUG)
-                    addCacheEntry = True
-                else:
-                    airdate = parse_result.air_date.toordinal()
-                    myDB = db.DBConnection()
-                    sql_results = myDB.select(
-                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
-                        [showObj.indexerid, airdate])
-
-                    if len(sql_results) != 1:
-                        logger.log(
-                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
-                            logger.WARNING)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    actual_season = int(sql_results[0]["season"])
-                    actual_episodes = [int(sql_results[0]["episode"])]
-
-            # add parsed result to cache for usage later on
-            if addCacheEntry:
-                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
-                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
-                if ci is not None:
-                    cl.append(ci)
-                continue
-
-            # make sure we want the episode
-            wantEp = True
-            for epNo in actual_episodes:
-                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
-                    wantEp = False
-                    break
-
-            if not wantEp:
-                logger.log(
-                    u"Ignoring result " + title + " because we don't want an episode that is " +
-                    Quality.qualityStrings[
-                        quality], logger.DEBUG)
-
-                continue
-
-            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
-
-            # make a result object
-            epObj = []
-            for curEp in actual_episodes:
-                epObj.append(showObj.getEpisode(actual_season, curEp))
-
-            result = self.getResult(epObj)
-            result.show = showObj
-            result.url = url
-            result.name = title
-            result.quality = quality
-            result.release_group = release_group
-            result.version = version
-            result.content = None
-
-            if len(epObj) == 1:
-                epNum = epObj[0].episode
-                logger.log(u"Single episode result.", logger.DEBUG)
-            elif len(epObj) > 1:
-                epNum = MULTI_EP_RESULT
-                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
-                    parse_result.episode_numbers), logger.DEBUG)
-            elif len(epObj) == 0:
-                epNum = SEASON_RESULT
-                logger.log(u"Separating full season result to check for later", logger.DEBUG)
-
-            if epNum not in results:
-                results[epNum] = [result]
-            else:
-                results[epNum].append(result)
-
-        # check if we have items to add to cache
-        if len(cl) > 0:
-            myDB = self.cache._getDB()
-            myDB.mass_action(cl)
-
-        return results
-
-
-class BTNCache(tvcache.TVCache):
-    def __init__(self, provider):
-        tvcache.TVCache.__init__(self, provider)
-
-        # At least 15 minutes between queries
-        self.minTime = 15
-
-    def _getRSSData(self):
-        # Get the torrents uploaded since last check.
-        seconds_since_last_update = math.ceil(time.time() - time.mktime(self._getLastUpdate().timetuple()))
-
-        # default to 15 minutes
-        seconds_minTime = self.minTime * 60
-        if seconds_since_last_update < seconds_minTime:
-            seconds_since_last_update = seconds_minTime
-
-        # Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, older things will need to be done through backlog
-        if seconds_since_last_update > 86400:
-            logger.log(
-                u"The last known successful update on " + self.provider.name + " was more than 24 hours ago, only trying to fetch the last 24 hours!",
-                logger.WARNING)
-            seconds_since_last_update = 86400
-
-        return {'entries': self.provider._doSearch(search_params=None, age=seconds_since_last_update)}
-
-
-provider = BTNProvider()
+# coding=utf-8
+# Author: Daniel Heimans
+# URL: http://code.google.com/p/sickbeard
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import time
+import socket
+import math
+import sickbeard
+import generic
+import itertools
+
+from sickbeard import classes
+from sickbeard import scene_exceptions
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard.helpers import sanitizeSceneName
+from sickbeard.exceptions import ex, AuthException
+from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
+from sickbeard import db
+from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+from sickbeard.common import Quality
+
+from lib import jsonrpclib
+from datetime import datetime
+
+
+class BTNProvider(generic.TorrentProvider):
+    def __init__(self):
+        generic.TorrentProvider.__init__(self, "BTN")
+
+        self.supportsBacklog = True
+        self.supportsAbsoluteNumbering = True
+
+        self.enabled = False
+        self.api_key = None
+        self.ratio = None
+
+        self.cache = BTNCache(self)
+
+        self.urls = {'base_url': "http://api.btnapps.net"}
+
+
+        self.url = self.urls['base_url']
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'btn.png'
+
+    def _checkAuth(self):
+        if not self.api_key:
+            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
+
+        return True
+
+    def _checkAuthFromData(self, parsedJSON):
+
+        if parsedJSON is None:
+            return self._checkAuth()
+
+        if 'api-error' in parsedJSON:
+            logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['api-error'],
+                       logger.DEBUG)
+            raise AuthException(
+                "Your authentication credentials for " + self.name + " are incorrect, check your config.")
+
+        return True
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        self._checkAuth()
+
+        results = []
+        params = {}
+        apikey = self.api_key
+
+        # age in seconds
+        if age:
+            params['age'] = "<=" + str(int(age))
+
+        if search_params:
+            params.update(search_params)
+
+        parsedJSON = self._api_call(apikey, params)
+        if not parsedJSON:
+            logger.log(u"No data returned from " + self.name, logger.ERROR)
+            return results
+
+        if self._checkAuthFromData(parsedJSON):
+
+            if 'torrents' in parsedJSON:
+                found_torrents = parsedJSON['torrents']
+            else:
+                found_torrents = {}
+
+            # We got something, we know the API sends max 1000 results at a time.
+            # See if there are more than 1000 results for our query, if not we
+            # keep requesting until we've got everything.
+            # max 150 requests per hour so limit at that. Scan every 15 minutes. 60 / 15 = 4.
+            max_pages = 150
+            results_per_page = 1000
+
+            if 'results' in parsedJSON and int(parsedJSON['results']) >= results_per_page:
+                pages_needed = int(math.ceil(int(parsedJSON['results']) / results_per_page))
+                if pages_needed > max_pages:
+                    pages_needed = max_pages
+
+                # +1 because range(1,4) = 1, 2, 3
+                for page in range(1, pages_needed + 1):
+                    parsedJSON = self._api_call(apikey, params, results_per_page, page * results_per_page)
+                    # Note that this these are individual requests and might time out individually. This would result in 'gaps'
+                    # in the results. There is no way to fix this though.
+                    if 'torrents' in parsedJSON:
+                        found_torrents.update(parsedJSON['torrents'])
+
+            for torrentid, torrent_info in found_torrents.iteritems():
+                (title, url) = self._get_title_and_url(torrent_info)
+
+                if title and url:
+                    results.append(torrent_info)
+
+        return results
+
+    def _api_call(self, apikey, params={}, results_per_page=1000, offset=0):
+
+        server = jsonrpclib.Server(self.url)
+        parsedJSON = {}
+
+        try:
+            parsedJSON = server.getTorrents(apikey, params, int(results_per_page), int(offset))
+
+        except jsonrpclib.jsonrpc.ProtocolError, error:
+            logger.log(u"JSON-RPC protocol error while accessing " + self.name + ": " + ex(error), logger.ERROR)
+            parsedJSON = {'api-error': ex(error)}
+            return parsedJSON
+
+        except socket.timeout:
+            logger.log(u"Timeout while accessing " + self.name, logger.WARNING)
+
+        except socket.error, error:
+            # Note that sometimes timeouts are thrown as socket errors
+            logger.log(u"Socket error while accessing " + self.name + ": " + error[1], logger.ERROR)
+
+        except Exception, error:
+            errorstring = str(error)
+            if (errorstring.startswith('<') and errorstring.endswith('>')):
+                errorstring = errorstring[1:-1]
+            logger.log(u"Unknown error while accessing " + self.name + ": " + errorstring, logger.ERROR)
+
+        return parsedJSON
+
+    def _get_title_and_url(self, parsedJSON):
+
+        # The BTN API gives a lot of information in response,
+        # however SickRage is built mostly around Scene or
+        # release names, which is why we are using them here.
+
+        if 'ReleaseName' in parsedJSON and parsedJSON['ReleaseName']:
+            title = parsedJSON['ReleaseName']
+
+        else:
+            # If we don't have a release name we need to get creative
+            title = u''
+            if 'Series' in parsedJSON:
+                title += parsedJSON['Series']
+            if 'GroupName' in parsedJSON:
+                title += '.' + parsedJSON['GroupName'] if title else parsedJSON['GroupName']
+            if 'Resolution' in parsedJSON:
+                title += '.' + parsedJSON['Resolution'] if title else parsedJSON['Resolution']
+            if 'Source' in parsedJSON:
+                title += '.' + parsedJSON['Source'] if title else parsedJSON['Source']
+            if 'Codec' in parsedJSON:
+                title += '.' + parsedJSON['Codec'] if title else parsedJSON['Codec']
+            if title:
+                title = title.replace(' ', '.')
+
+        url = None
+        if 'DownloadURL' in parsedJSON:
+            url = parsedJSON['DownloadURL']
+            if url:
+                # unescaped / is valid in JSON, but it can be escaped
+                url = url.replace("\\/", "/")
+
+        return (title, url)
+
+    def _get_season_search_strings(self, ep_obj):
+        search_params = []
+        current_params = {'category': 'Season'}
+
+        # Search for entire seasons: no need to do special things for air by date or sports shows
+        if ep_obj.show.air_by_date or ep_obj.show.sports:
+            # Search for the year of the air by date show
+            current_params['name'] = str(ep_obj.airdate).split('-')[0]
+        elif ep_obj.show.is_anime:
+            current_params['name'] = "%d" % ep_obj.scene_absolute_number
+        else:
+            current_params['name'] = 'Season ' + str(ep_obj.scene_season)
+
+        # search
+        if ep_obj.show.indexer == 1:
+            current_params['tvdb'] = ep_obj.show.indexerid
+            search_params.append(current_params)
+        elif ep_obj.show.indexer == 2:
+            current_params['tvrage'] = ep_obj.show.indexerid
+            search_params.append(current_params)
+        else:
+            name_exceptions = list(
+                set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
+            for name in name_exceptions:
+                # Search by name if we don't have tvdb or tvrage id
+                current_params['series'] = sanitizeSceneName(name)
+                search_params.append(current_params)
+
+        return search_params
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        if not ep_obj:
+            return [{}]
+
+        to_return = []
+        search_params = {'category': 'Episode'}
+
+        # episode
+        if ep_obj.show.air_by_date or ep_obj.show.sports:
+            date_str = str(ep_obj.airdate)
+
+            # BTN uses dots in dates, we just search for the date since that
+            # combined with the series identifier should result in just one episode
+            search_params['name'] = date_str.replace('-', '.')
+        elif ep_obj.show.anime:
+            search_params['name'] = "%i" % int(ep_obj.scene_absolute_number)
+        else:
+            # Do a general name search for the episode, formatted like SXXEYY
+            search_params['name'] = "S%02dE%02d" % (ep_obj.scene_season, ep_obj.scene_episode)
+
+        # search
+        if ep_obj.show.indexer == 1:
+            search_params['tvdb'] = ep_obj.show.indexerid
+            to_return.append(search_params)
+        elif ep_obj.show.indexer == 2:
+            search_params['tvrage'] = ep_obj.show.indexerid
+            to_return.append(search_params)
+        else:
+            # add new query string for every exception
+            name_exceptions = list(
+                set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
+            for cur_exception in name_exceptions:
+                search_params['series'] = sanitizeSceneName(cur_exception)
+                to_return.append(search_params)
+
+        return to_return
+
+    def _doGeneralSearch(self, search_string):
+        # 'search' looks as broad is it can find. Can contain episode overview and title for example,
+        # use with caution!
+        return self._doSearch({'search': search_string})
+
+    def findPropers(self, search_date=None):
+        results = []
+
+        search_terms = ['%.proper.%', '%.repack.%']
+
+        for term in search_terms:
+            for item in self._doSearch({'release': term}, age=4 * 24 * 60 * 60):
+                if item['Time']:
+                    try:
+                        result_date = datetime.fromtimestamp(float(item['Time']))
+                    except TypeError:
+                        result_date = None
+
+                    if result_date:
+                        if not search_date or result_date > search_date:
+                            title, url = self._get_title_and_url(item)
+                            results.append(classes.Proper(title, url, result_date, self.show))
+
+        return results
+
+    def seedRatio(self):
+        return self.ratio
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+
+        self._checkAuth()
+        self.show = show
+
+        results = {}
+        itemList = []
+
+        searched_scene_season = None
+        for epObj in episodes:
+            # search cache for episode result
+            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
+            if cacheResult:
+                if epObj.episode not in results:
+                    results[epObj.episode] = cacheResult
+                else:
+                    results[epObj.episode].extend(cacheResult)
+
+                # found result, search next episode
+                continue
+
+            # skip if season already searched
+            if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
+                continue
+
+            # mark season searched for season pack searches so we can skip later on
+            searched_scene_season = epObj.scene_season
+
+            if search_mode == 'sponly':
+                # get season search results
+                for curString in self._get_season_search_strings(epObj):
+                    itemList += self._doSearch(curString, search_mode, len(episodes))
+            else:
+                # get single episode search results
+                for curString in self._get_episode_search_strings(epObj):
+                    itemList += self._doSearch(curString, search_mode, len(episodes))
+
+        # if we found what we needed already from cache then return results and exit
+        if len(results) == len(episodes):
+            return results
+
+        # sort list by quality
+        if len(itemList):
+            items = {}
+            itemsUnknown = []
+            for item in itemList:
+                quality = self.getQuality(item, anime=show.is_anime)
+                if quality == Quality.UNKNOWN:
+                    itemsUnknown += [item]
+                else:
+                    if quality not in items:
+                        items[quality] = [item]
+                    else:
+                        items[quality].append(item)
+
+            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
+            itemList += itemsUnknown if itemsUnknown else []
+
+        # filter results
+        cl = []
+        for item in itemList:
+            (title, url) = self._get_title_and_url(item)
+
+            # parse the file name
+            try:
+                myParser = NameParser(False, convert=True)
+                parse_result = myParser.parse(title)
+            except InvalidNameException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)  # @UndefinedVariable
+                continue
+            except InvalidShowException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
+                continue
+
+            showObj = parse_result.show
+            quality = parse_result.quality
+            release_group = parse_result.release_group
+            version = parse_result.version
+
+            addCacheEntry = False
+            if not (showObj.air_by_date or showObj.sports):
+                if search_mode == 'sponly': 
+                    if len(parse_result.episode_numbers):
+                        logger.log(
+                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    if len(parse_result.episode_numbers) and (
+                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
+                                                                                 ep.scene_episode in parse_result.episode_numbers]):
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                else:
+                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
+                                                                                                     episodes if
+                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
+                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    # we just use the existing info for normal searches
+                    actual_season = parse_result.season_number
+                    actual_episodes = parse_result.episode_numbers
+            else:
+                if not (parse_result.is_air_by_date):
+                    logger.log(
+                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
+                        logger.DEBUG)
+                    addCacheEntry = True
+                else:
+                    airdate = parse_result.air_date.toordinal()
+                    myDB = db.DBConnection()
+                    sql_results = myDB.select(
+                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
+                        [showObj.indexerid, airdate])
+
+                    if len(sql_results) != 1:
+                        logger.log(
+                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
+                            logger.WARNING)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    actual_season = int(sql_results[0]["season"])
+                    actual_episodes = [int(sql_results[0]["episode"])]
+
+            # add parsed result to cache for usage later on
+            if addCacheEntry:
+                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
+                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
+                if ci is not None:
+                    cl.append(ci)
+                continue
+
+            # make sure we want the episode
+            wantEp = True
+            for epNo in actual_episodes:
+                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
+                    wantEp = False
+                    break
+
+            if not wantEp:
+                logger.log(
+                    u"Ignoring result " + title + " because we don't want an episode that is " +
+                    Quality.qualityStrings[
+                        quality], logger.DEBUG)
+
+                continue
+
+            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
+
+            # make a result object
+            epObj = []
+            for curEp in actual_episodes:
+                epObj.append(showObj.getEpisode(actual_season, curEp))
+
+            result = self.getResult(epObj)
+            result.show = showObj
+            result.url = url
+            result.name = title
+            result.quality = quality
+            result.release_group = release_group
+            result.version = version
+            result.content = None
+
+            if len(epObj) == 1:
+                epNum = epObj[0].episode
+                logger.log(u"Single episode result.", logger.DEBUG)
+            elif len(epObj) > 1:
+                epNum = MULTI_EP_RESULT
+                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
+                    parse_result.episode_numbers), logger.DEBUG)
+            elif len(epObj) == 0:
+                epNum = SEASON_RESULT
+                logger.log(u"Separating full season result to check for later", logger.DEBUG)
+
+            if epNum not in results:
+                results[epNum] = [result]
+            else:
+                results[epNum].append(result)
+
+        # check if we have items to add to cache
+        if len(cl) > 0:
+            myDB = self.cache._getDB()
+            myDB.mass_action(cl)
+
+        return results
+
+
+class BTNCache(tvcache.TVCache):
+    def __init__(self, provider):
+        tvcache.TVCache.__init__(self, provider)
+
+        # At least 15 minutes between queries
+        self.minTime = 15
+
+    def _getRSSData(self):
+        # Get the torrents uploaded since last check.
+        seconds_since_last_update = math.ceil(time.time() - time.mktime(self._getLastUpdate().timetuple()))
+
+        # default to 15 minutes
+        seconds_minTime = self.minTime * 60
+        if seconds_since_last_update < seconds_minTime:
+            seconds_since_last_update = seconds_minTime
+
+        # Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, older things will need to be done through backlog
+        if seconds_since_last_update > 86400:
+            logger.log(
+                u"The last known successful update on " + self.provider.name + " was more than 24 hours ago, only trying to fetch the last 24 hours!",
+                logger.WARNING)
+            seconds_since_last_update = 86400
+
+        return {'entries': self.provider._doSearch(search_params=None, age=seconds_since_last_update)}
+
+
+provider = BTNProvider()
diff --git a/sickbeard/providers/ezrss.py b/sickbeard/providers/ezrss.py
index d0307c10c7dddbaa83251f9792df8b9e837d79b1..ddebbca60d7a60575000af1f53a017883323239c 100644
--- a/sickbeard/providers/ezrss.py
+++ b/sickbeard/providers/ezrss.py
@@ -1,178 +1,178 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import urllib
-import re
-
-try:
-    import xml.etree.cElementTree as etree
-except ImportError:
-    import elementtree.ElementTree as etree
-
-import sickbeard
-import generic
-
-from sickbeard.common import Quality
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard import helpers
-
-
-class EZRSSProvider(generic.TorrentProvider):
-    def __init__(self):
-
-        self.urls = {'base_url': 'https://www.ezrss.it/'}
-
-        self.url = self.urls['base_url']
-
-        generic.TorrentProvider.__init__(self, "EZRSS")
-
-        self.supportsBacklog = True
-        self.enabled = False
-        self.ratio = None
-
-        self.cache = EZRSSCache(self)
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'ezrss.png'
-
-    def getQuality(self, item, anime=False):
-
-        try:
-            quality = Quality.sceneQuality(item.filename, anime)
-        except:
-            quality = Quality.UNKNOWN
-
-        return quality
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
-
-        self.show = show
-
-        results = {}
-
-        if show.air_by_date or show.sports:
-            logger.log(self.name + u" doesn't support air-by-date or sports backloging because of limitations on their RSS search.",
-                       logger.WARNING)
-            return results
-
-        results = generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
-
-        return results
-
-    def _get_season_search_strings(self, ep_obj):
-
-        params = {}
-
-        params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
-
-        if ep_obj.show.air_by_date or ep_obj.show.sports:
-            params['season'] = str(ep_obj.airdate).split('-')[0]
-        elif ep_obj.show.anime:
-            params['season'] = "%d" % ep_obj.scene_absolute_number
-        else:
-            params['season'] = ep_obj.scene_season
-
-        return [params]
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        params = {}
-
-        if not ep_obj:
-            return params
-
-        params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
-
-        if self.show.air_by_date or self.show.sports:
-            params['date'] = str(ep_obj.airdate)
-        elif self.show.anime:
-            params['episode'] = "%i" % int(ep_obj.scene_absolute_number)
-        else:
-            params['season'] = ep_obj.scene_season
-            params['episode'] = ep_obj.scene_episode
-
-        return [params]
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        params = {"mode": "rss"}
-
-        if search_params:
-            params.update(search_params)
-
-        search_url = self.url + 'search/index.php?' + urllib.urlencode(params)
-
-        logger.log(u"Search string: " + search_url, logger.DEBUG)
-
-        results = []
-        for curItem in self.cache.getRSSFeed(search_url, items=['entries'])['entries'] or []:
-
-            (title, url) = self._get_title_and_url(curItem)
-
-            if title and url:
-                logger.log(u"RSS Feed provider: [" + self.name + "] Attempting to add item to cache: " + title, logger.DEBUG)
-                results.append(curItem)
-
-        return results
-
-    def _get_title_and_url(self, item):
-        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)
-
-        try:
-            new_title = self._extract_name_from_filename(item.filename)
-        except:
-            new_title = None
-
-        if new_title:
-            title = new_title
-            logger.log(u"Extracted the name " + title + " from the torrent link", logger.DEBUG)
-
-        return (title, url)
-
-    def _extract_name_from_filename(self, filename):
-        name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
-        logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
-        match = re.match(name_regex, filename, re.I)
-        if match:
-            return match.group(1)
-        return None
-
-    def seedRatio(self):
-        return self.ratio
-
-
-class EZRSSCache(tvcache.TVCache):
-    def __init__(self, provider):
-
-        tvcache.TVCache.__init__(self, provider)
-
-        # only poll EZRSS every 15 minutes max
-        self.minTime = 15
-
-    def _getRSSData(self):
-
-        rss_url = self.provider.url + 'feed/'
-        logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
-
-        return self.getRSSFeed(rss_url)
-
-provider = EZRSSProvider()
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import urllib
+import re
+
+try:
+    import xml.etree.cElementTree as etree
+except ImportError:
+    import elementtree.ElementTree as etree
+
+import sickbeard
+import generic
+
+from sickbeard.common import Quality
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard import helpers
+
+
+class EZRSSProvider(generic.TorrentProvider):
+    def __init__(self):
+
+        self.urls = {'base_url': 'https://www.ezrss.it/'}
+
+        self.url = self.urls['base_url']
+
+        generic.TorrentProvider.__init__(self, "EZRSS")
+
+        self.supportsBacklog = True
+        self.enabled = False
+        self.ratio = None
+
+        self.cache = EZRSSCache(self)
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'ezrss.png'
+
+    def getQuality(self, item, anime=False):
+
+        try:
+            quality = Quality.sceneQuality(item.filename, anime)
+        except:
+            quality = Quality.UNKNOWN
+
+        return quality
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+
+        self.show = show
+
+        results = {}
+
+        if show.air_by_date or show.sports:
+            logger.log(self.name + u" doesn't support air-by-date or sports backloging because of limitations on their RSS search.",
+                       logger.WARNING)
+            return results
+
+        results = generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
+
+        return results
+
+    def _get_season_search_strings(self, ep_obj):
+
+        params = {}
+
+        params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
+
+        if ep_obj.show.air_by_date or ep_obj.show.sports:
+            params['season'] = str(ep_obj.airdate).split('-')[0]
+        elif ep_obj.show.anime:
+            params['season'] = "%d" % ep_obj.scene_absolute_number
+        else:
+            params['season'] = ep_obj.scene_season
+
+        return [params]
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        params = {}
+
+        if not ep_obj:
+            return params
+
+        params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
+
+        if self.show.air_by_date or self.show.sports:
+            params['date'] = str(ep_obj.airdate)
+        elif self.show.anime:
+            params['episode'] = "%i" % int(ep_obj.scene_absolute_number)
+        else:
+            params['season'] = ep_obj.scene_season
+            params['episode'] = ep_obj.scene_episode
+
+        return [params]
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        params = {"mode": "rss"}
+
+        if search_params:
+            params.update(search_params)
+
+        search_url = self.url + 'search/index.php?' + urllib.urlencode(params)
+
+        logger.log(u"Search string: " + search_url, logger.DEBUG)
+
+        results = []
+        for curItem in self.cache.getRSSFeed(search_url, items=['entries'])['entries'] or []:
+
+            (title, url) = self._get_title_and_url(curItem)
+
+            if title and url:
+                logger.log(u"RSS Feed provider: [" + self.name + "] Attempting to add item to cache: " + title, logger.DEBUG)
+                results.append(curItem)
+
+        return results
+
+    def _get_title_and_url(self, item):
+        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)
+
+        try:
+            new_title = self._extract_name_from_filename(item.filename)
+        except:
+            new_title = None
+
+        if new_title:
+            title = new_title
+            logger.log(u"Extracted the name " + title + " from the torrent link", logger.DEBUG)
+
+        return (title, url)
+
+    def _extract_name_from_filename(self, filename):
+        name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
+        logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
+        match = re.match(name_regex, filename, re.I)
+        if match:
+            return match.group(1)
+        return None
+
+    def seedRatio(self):
+        return self.ratio
+
+
+class EZRSSCache(tvcache.TVCache):
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # only poll EZRSS every 15 minutes max
+        self.minTime = 15
+
+    def _getRSSData(self):
+
+        rss_url = self.provider.url + 'feed/'
+        logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
+
+        return self.getRSSFeed(rss_url)
+
+provider = EZRSSProvider()
diff --git a/sickbeard/providers/eztv.py b/sickbeard/providers/eztv.py
new file mode 100644
index 0000000000000000000000000000000000000000..19458418cb1c1e2c42ff77e8d39100fccb8bd412
--- /dev/null
+++ b/sickbeard/providers/eztv.py
@@ -0,0 +1,169 @@
+# coding=utf-8
+# Author: Nicolas Martinelli <nicolas.martinelli@gmail.com>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of Sick Beard.
+#
+# Sick Beard is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Sick Beard is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Sick Beard.  If not, see <http://www.gnu.org/licenses/>.
+
+import traceback
+import re, datetime
+
+import generic
+from sickbeard import logger, tvcache, db
+from sickbeard.common import Quality
+
+class EZTVProvider(generic.TorrentProvider):
+
+    def __init__(self):
+        generic.TorrentProvider.__init__(self, "EZTV")
+
+        self.supportsBacklog = False
+        self.enabled = False
+        self.ratio = None
+
+        self.cache = EZTVCache(self)
+
+        self.urls = {
+            'base_url': 'http://eztvapi.re/',
+            'show': 'http://eztvapi.re/show/%s',
+        }
+
+        self.url = self.urls['base_url']
+
+    def isEnabled(self):
+        return self.enabled
+
+    def seedRatio(self):
+        return self.ratio
+
+    def imageName(self):
+        return 'eztv_bt_chat.png'
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        search_string = {'Episode': []}
+
+        search_string['Episode'].append({
+            'imdb_id': self.show.imdbid,
+            'season': int(ep_obj.scene_season),
+            'episode': int(ep_obj.scene_episode),
+            'add_string': add_string,
+        })
+
+        return [search_string]
+
+    def getQuality(self, item, anime=False):
+        if item.get('quality') == "480p":
+            return Quality.SDTV
+        elif item.get('quality') == "720p":
+            return Quality.HDWEBDL
+        elif item.get('quality') == "1080p":
+            return Quality.FULLHDWEBDL
+        else:
+            return Quality.sceneQuality(item.get('title'), anime)
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        results = []
+        items = {'Season': [], 'Episode': [], 'RSS': []}
+
+        for mode in search_params.keys():
+
+            if mode != 'Episode':
+                logger.log(u"" + self.name + " does not accept " + mode + " mode", logger.DEBUG)
+                return results
+
+            for search_string in search_params[mode]:
+
+                searchURL = self.urls['show'] % (search_string['imdb_id'])
+                logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
+
+                parsedJSON = self.getURL(searchURL, json=True)
+                if not parsedJSON:
+                    logger.log(u"" + self.name + " could not retrieve page URL:" + searchURL, logger.DEBUG)
+                    return results
+
+            try:
+                for episode in parsedJSON['episodes']:
+                    if int(episode.get('season')) == search_string.get('season') and \
+                       int(episode.get('episode')) == search_string.get('episode'):
+
+                        for quality in episode['torrents'].keys():
+                            link = episode['torrents'][quality]['url']
+                            title = re.search('&dn=(.*?)&', link).group(1)
+
+                            item = {
+                                'title': title,
+                                'link': link,
+                                'quality': quality
+                            }
+
+                            # re.search in case of PROPER|REPACK. In other cases
+                            # add_string is empty, so condition is met.
+                            if re.search(search_string.get('add_string'), title):
+                                items[mode].append(item)
+
+                        break
+
+            except Exception, e:
+                logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(),
+                            logger.ERROR)
+
+            results += items[mode]
+
+        return results
+
+    def findPropers(self, search_date=datetime.datetime.today()):
+
+        results = []
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select(
+            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
+            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
+            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
+            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
+            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
+        )
+
+        if not sqlResults:
+            return []
+
+        for sqlshow in sqlResults:
+            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
+
+            if self.show:
+                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
+
+                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
+
+                for item in self._doSearch(searchString[0]):
+                    title, url = self._get_title_and_url(item)
+                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
+
+        return results
+
+class EZTVCache(tvcache.TVCache):
+    def __init__(self, provider):
+        tvcache.TVCache.__init__(self, provider)
+
+        # Only poll EZTV every 5 minutes max
+        self.minTime = 5
+
+    def _getRSSData(self):
+        search_params = {'RSS': ['']}
+        return {'entries': self.provider._doSearch(search_params)}
+
+provider = EZTVProvider()
diff --git a/sickbeard/providers/freshontv.py b/sickbeard/providers/freshontv.py
index ac83e30529ae1b74fdadfff891b642dc4abb78f3..0500d5cf047d7dfad8290f4ed6228e2fbb7cc04e 100755
--- a/sickbeard/providers/freshontv.py
+++ b/sickbeard/providers/freshontv.py
@@ -1,382 +1,382 @@
-# Author: Idan Gutman
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import re
-import traceback
-import datetime
-import time
-import urlparse
-import sickbeard
-import generic
-from sickbeard.common import Quality, cpu_presets
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard import db
-from sickbeard import classes
-from sickbeard import helpers
-from sickbeard import show_name_helpers
-from sickbeard.exceptions import ex, AuthException
-from sickbeard import clients
-from lib import requests
-from lib.requests import exceptions
-from sickbeard.bs4_parser import BS4Parser
-from lib.unidecode import unidecode
-from sickbeard.helpers import sanitizeSceneName
-
-
-class FreshOnTVProvider(generic.TorrentProvider):
-
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "FreshOnTV")
-
-        self.supportsBacklog = True
-
-        self.enabled = False
-        self._uid = None
-        self._hash = None
-        self.username = None
-        self.password = None
-        self.ratio = None
-        self.minseed = None
-        self.minleech = None
-        self.freeleech = False
-
-        self.cache = FreshOnTVCache(self)
-
-        self.urls = {'base_url': 'https://freshon.tv/',
-                'login': 'https://freshon.tv/login.php?action=makelogin',
-                'detail': 'https://freshon.tv/details.php?id=%s',
-                'search': 'https://freshon.tv/browse.php?incldead=%s&words=0&cat=0&search=%s',
-                'download': 'https://freshon.tv/download.php?id=%s&type=torrent',
-                }
-
-        self.url = self.urls['base_url']
-
-        self.cookies = None
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'freshontv.png'
-
-    def getQuality(self, item, anime=False):
-
-        quality = Quality.sceneQuality(item[0], anime)
-        return quality
-
-    def _checkAuth(self):
-
-        if not self.username or not self.password:
-            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
-
-        return True
-
-    def _doLogin(self):
-        if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
-            return True
-
-        if self._uid and self._hash:
-            requests.utils.add_dict_to_cookiejar(self.session.cookies, self.cookies)
-        else:
-            login_params = {'username': self.username,
-                            'password': self.password,
-                            'login': 'submit'
-            }
-
-            if not self.session:
-                self.session = requests.Session()
-
-            try:
-                response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
-            except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e:
-                logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
-                return False
-
-            if re.search('/logout.php', response.text):
-                logger.log(u'Login to ' + self.name + ' was successful.', logger.DEBUG)
-
-                try:
-                    if requests.utils.dict_from_cookiejar(self.session.cookies)['uid'] and requests.utils.dict_from_cookiejar(self.session.cookies)['pass']:
-                        self._uid = requests.utils.dict_from_cookiejar(self.session.cookies)['uid']
-                        self._hash = requests.utils.dict_from_cookiejar(self.session.cookies)['pass']
-
-                        self.cookies = {'uid': self._uid,
-                                        'pass': self._hash
-                        }
-                        return True
-                except:
-                    logger.log(u'Unable to obtain cookie for FreshOnTV', logger.ERROR)
-                    return False
-
-            else:
-                logger.log(u'Login to ' + self.name + ' was unsuccessful.', logger.DEBUG)
-                if re.search('Username does not exist in the userbase or the account is not confirmed yet.', response.text):
-                    logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
-
-                if re.search('DDoS protection by CloudFlare', response.text):
-                    logger.log(u'Unable to login to ' + self.name + ' due to CloudFlare DDoS javascript check.', logger.ERROR)
-
-                    return False
-
-
-    def _get_season_search_strings(self, ep_obj):
-
-        search_string = {'Season': []}
-        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-            if ep_obj.show.air_by_date or ep_obj.show.sports:
-                ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
-            elif ep_obj.show.anime:
-                ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
-            else:
-                ep_string = show_name + '.S%02d' % int(ep_obj.scene_season)  #1) showName SXX
-
-            search_string['Season'].append(ep_string)
-
-        return [search_string]
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        search_string = {'Episode': []}
-
-        if not ep_obj:
-            return []
-
-        if self.show.air_by_date:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|')
-                search_string['Episode'].append(ep_string)
-        elif self.show.sports:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|') + '|' + \
-                            ep_obj.airdate.strftime('%b')
-                search_string['Episode'].append(ep_string)
-        elif self.show.anime:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            "%i" % int(ep_obj.scene_absolute_number)
-                search_string['Episode'].append(ep_string)
-        else:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
-                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
-                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
-
-                search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
-
-        return [search_string]
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        results = []
-        items = {'Season': [], 'Episode': [], 'RSS': []}
-
-        freeleech = '3' if self.freeleech else '0'
-
-        if not self._doLogin():
-            return results
-
-        for mode in search_params.keys():
-            for search_string in search_params[mode]:
-
-                if isinstance(search_string, unicode):
-                    search_string = unidecode(search_string)
-
-                searchURL = self.urls['search'] % (freeleech, search_string)
-                logger.log(u"Search string: " + searchURL, logger.DEBUG)
-                init_html = self.getURL(searchURL)
-                max_page_number = 0
-
-                if not init_html:
-                    logger.log(u"The opening search response from " + self.name + " is empty.",logger.DEBUG)
-                    continue
-
-                try:
-                    with BS4Parser(init_html, features=["html5lib", "permissive"]) as init_soup:
-
-                        #Check to see if there is more than 1 page of results
-                        pager = init_soup.find('div', {'class': 'pager'})
-                        if pager:
-                            page_links = pager.find_all('a', href=True)
-                        else:
-                            page_links = []
-
-                        if len(page_links) > 0:
-                            for lnk in page_links:
-                                link_text = lnk.text.strip()
-                                if link_text.isdigit():
-                                    page_int = int(link_text)
-                                    if page_int > max_page_number:
-                                        max_page_number = page_int
-
-                        #limit page number to 15 just in case something goes wrong
-                        if max_page_number > 15:
-                            max_page_number = 15
-                        #limit RSS search
-                        if max_page_number > 3 and mode is 'RSS':
-                            max_page_number = 3
-                except:
-                    logger.log(u"BS4 parser unable to process response " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
-                    continue
-
-                data_response_list = []
-                data_response_list.append(init_html)
-
-                #Freshon starts counting pages from zero, even though it displays numbers from 1
-                if max_page_number > 1:
-                    for i in range(1, max_page_number):
-
-                        time.sleep(1)
-                        page_searchURL = searchURL + '&page=' + str(i)
-                        logger.log(u"Search string: " + page_searchURL, logger.DEBUG)
-                        page_html = self.getURL(page_searchURL)
-
-                        if not page_html:
-                            logger.log(u"The search response for page number " + str(i) + " is empty." + self.name,logger.DEBUG)
-                            continue
-
-                        data_response_list.append(page_html)
-
-                try:
-
-                    for data_response in data_response_list:
-
-                        with BS4Parser(data_response, features=["html5lib", "permissive"]) as html:
-
-                            torrent_rows = html.findAll("tr", {"class": re.compile('torrent_[0-9]*')})
-
-                            #Continue only if a Release is found
-                            if len(torrent_rows) == 0:
-                                logger.log(u"The Data returned from " + self.name + " does not contain any torrent", logger.DEBUG)
-                                continue
-
-                            for individual_torrent in torrent_rows:
-
-                                #skip if torrent has been nuked due to poor quality
-                                if individual_torrent.find('img', alt='Nuked') != None:
-                                    continue
-
-                                try:
-                                    title = individual_torrent.find('a', {'class': 'torrent_name_link'})['title']
-                                except:
-                                    logger.log(u"Unable to parse torrent title " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
-                                    continue
-
-                                try:
-                                    details_url = individual_torrent.find('a', {'class': 'torrent_name_link'})['href']
-                                    id = int((re.match('.*?([0-9]+)$', details_url).group(1)).strip())
-                                    download_url = self.urls['download'] % (str(id))
-                                except:
-                                    logger.log(u"Unable to parse torrent id & download url  " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
-                                    continue
-
-                                try:
-                                    seeders = int(individual_torrent.find('td', {'class': 'table_seeders'}).find('span').text.strip())
-                                except:
-                                    logger.log(u"Unable to parse torrent seeders content  " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
-                                    seeders = 1
-                                try:
-                                    leechers = int(individual_torrent.find('td', {'class': 'table_leechers'}).find('a').text.strip())
-                                except:
-                                    logger.log(u"Unable to parse torrent leechers content " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
-                                    leechers = 0
-
-                                #Filter unseeded torrent
-                                if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
-                                    continue
-
-                                if not title or not download_url:
-                                    continue
-
-                                item = title, download_url, id, seeders, leechers
-                                logger.log(u"Found result: " + title + " (" + searchURL + ")", logger.DEBUG)
-
-                                items[mode].append(item)
-
-                except Exception as e:
-                    logger.log(u"Failed parsing " + " Traceback: " + traceback.format_exc(), logger.DEBUG)
-
-            #For each search mode sort all the items by seeders
-            items[mode].sort(key=lambda tup: tup[3], reverse=True)
-
-            results += items[mode]
-
-        return results
-
-    def _get_title_and_url(self, item):
-
-        title, url, id, seeders, leechers = item
-
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        if url:
-            url = str(url).replace('&amp;', '&')
-
-        return (title, url)
-
-    def findPropers(self, search_date=datetime.datetime.today()):
-
-        results = []
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select(
-            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
-            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
-            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
-            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
-            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
-        )
-
-        if not sqlResults:
-            return []
-
-        for sqlshow in sqlResults:
-            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
-            if self.show:
-                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
-
-                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
-
-                for item in self._doSearch(searchString[0]):
-                    title, url = self._get_title_and_url(item)
-                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
-
-        return results
-
-    def seedRatio(self):
-        return self.ratio
-
-
-class FreshOnTVCache(tvcache.TVCache):
-    def __init__(self, provider):
-
-        tvcache.TVCache.__init__(self, provider)
-
-        # poll delay in minutes
-        self.minTime = 20
-
-    def _getRSSData(self):
-        search_params = {'RSS': ['']}
-        return {'entries': self.provider._doSearch(search_params)}
-
-provider = FreshOnTVProvider()
+# Author: Idan Gutman
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import traceback
+import datetime
+import time
+import urlparse
+import sickbeard
+import generic
+from sickbeard.common import Quality, cpu_presets
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard import db
+from sickbeard import classes
+from sickbeard import helpers
+from sickbeard import show_name_helpers
+from sickbeard.exceptions import ex, AuthException
+from sickbeard import clients
+from lib import requests
+from lib.requests import exceptions
+from sickbeard.bs4_parser import BS4Parser
+from lib.unidecode import unidecode
+from sickbeard.helpers import sanitizeSceneName
+
+
+class FreshOnTVProvider(generic.TorrentProvider):
+
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "FreshOnTV")
+
+        self.supportsBacklog = True
+
+        self.enabled = False
+        self._uid = None
+        self._hash = None
+        self.username = None
+        self.password = None
+        self.ratio = None
+        self.minseed = None
+        self.minleech = None
+        self.freeleech = False
+
+        self.cache = FreshOnTVCache(self)
+
+        self.urls = {'base_url': 'https://freshon.tv/',
+                'login': 'https://freshon.tv/login.php?action=makelogin',
+                'detail': 'https://freshon.tv/details.php?id=%s',
+                'search': 'https://freshon.tv/browse.php?incldead=%s&words=0&cat=0&search=%s',
+                'download': 'https://freshon.tv/download.php?id=%s&type=torrent',
+                }
+
+        self.url = self.urls['base_url']
+
+        self.cookies = None
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'freshontv.png'
+
+    def getQuality(self, item, anime=False):
+
+        quality = Quality.sceneQuality(item[0], anime)
+        return quality
+
+    def _checkAuth(self):
+
+        if not self.username or not self.password:
+            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
+
+        return True
+
+    def _doLogin(self):
+        if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
+            return True
+
+        if self._uid and self._hash:
+            requests.utils.add_dict_to_cookiejar(self.session.cookies, self.cookies)
+        else:
+            login_params = {'username': self.username,
+                            'password': self.password,
+                            'login': 'submit'
+            }
+
+            if not self.session:
+                self.session = requests.Session()
+
+            try:
+                response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
+            except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e:
+                logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
+                return False
+
+            if re.search('/logout.php', response.text):
+                logger.log(u'Login to ' + self.name + ' was successful.', logger.DEBUG)
+
+                try:
+                    if requests.utils.dict_from_cookiejar(self.session.cookies)['uid'] and requests.utils.dict_from_cookiejar(self.session.cookies)['pass']:
+                        self._uid = requests.utils.dict_from_cookiejar(self.session.cookies)['uid']
+                        self._hash = requests.utils.dict_from_cookiejar(self.session.cookies)['pass']
+
+                        self.cookies = {'uid': self._uid,
+                                        'pass': self._hash
+                        }
+                        return True
+                except:
+                    logger.log(u'Unable to obtain cookie for FreshOnTV', logger.ERROR)
+                    return False
+
+            else:
+                logger.log(u'Login to ' + self.name + ' was unsuccessful.', logger.DEBUG)
+                if re.search('Username does not exist in the userbase or the account is not confirmed yet.', response.text):
+                    logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
+
+                if re.search('DDoS protection by CloudFlare', response.text):
+                    logger.log(u'Unable to login to ' + self.name + ' due to CloudFlare DDoS javascript check.', logger.ERROR)
+
+                    return False
+
+
+    def _get_season_search_strings(self, ep_obj):
+
+        search_string = {'Season': []}
+        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+            if ep_obj.show.air_by_date or ep_obj.show.sports:
+                ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
+            elif ep_obj.show.anime:
+                ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
+            else:
+                ep_string = show_name + '.S%02d' % int(ep_obj.scene_season)  #1) showName SXX
+
+            search_string['Season'].append(ep_string)
+
+        return [search_string]
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        search_string = {'Episode': []}
+
+        if not ep_obj:
+            return []
+
+        if self.show.air_by_date:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|')
+                search_string['Episode'].append(ep_string)
+        elif self.show.sports:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|') + '|' + \
+                            ep_obj.airdate.strftime('%b')
+                search_string['Episode'].append(ep_string)
+        elif self.show.anime:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            "%i" % int(ep_obj.scene_absolute_number)
+                search_string['Episode'].append(ep_string)
+        else:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
+                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
+                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
+
+                search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
+
+        return [search_string]
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        results = []
+        items = {'Season': [], 'Episode': [], 'RSS': []}
+
+        freeleech = '3' if self.freeleech else '0'
+
+        if not self._doLogin():
+            return results
+
+        for mode in search_params.keys():
+            for search_string in search_params[mode]:
+
+                if isinstance(search_string, unicode):
+                    search_string = unidecode(search_string)
+
+                searchURL = self.urls['search'] % (freeleech, search_string)
+                logger.log(u"Search string: " + searchURL, logger.DEBUG)
+                init_html = self.getURL(searchURL)
+                max_page_number = 0
+
+                if not init_html:
+                    logger.log(u"The opening search response from " + self.name + " is empty.",logger.DEBUG)
+                    continue
+
+                try:
+                    with BS4Parser(init_html, features=["html5lib", "permissive"]) as init_soup:
+
+                        #Check to see if there is more than 1 page of results
+                        pager = init_soup.find('div', {'class': 'pager'})
+                        if pager:
+                            page_links = pager.find_all('a', href=True)
+                        else:
+                            page_links = []
+
+                        if len(page_links) > 0:
+                            for lnk in page_links:
+                                link_text = lnk.text.strip()
+                                if link_text.isdigit():
+                                    page_int = int(link_text)
+                                    if page_int > max_page_number:
+                                        max_page_number = page_int
+
+                        #limit page number to 15 just in case something goes wrong
+                        if max_page_number > 15:
+                            max_page_number = 15
+                        #limit RSS search
+                        if max_page_number > 3 and mode is 'RSS':
+                            max_page_number = 3
+                except:
+                    logger.log(u"BS4 parser unable to process response " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
+                    continue
+
+                data_response_list = []
+                data_response_list.append(init_html)
+
+                #Freshon starts counting pages from zero, even though it displays numbers from 1
+                if max_page_number > 1:
+                    for i in range(1, max_page_number):
+
+                        time.sleep(1)
+                        page_searchURL = searchURL + '&page=' + str(i)
+                        logger.log(u"Search string: " + page_searchURL, logger.DEBUG)
+                        page_html = self.getURL(page_searchURL)
+
+                        if not page_html:
+                            logger.log(u"The search response for page number " + str(i) + " is empty." + self.name,logger.DEBUG)
+                            continue
+
+                        data_response_list.append(page_html)
+
+                try:
+
+                    for data_response in data_response_list:
+
+                        with BS4Parser(data_response, features=["html5lib", "permissive"]) as html:
+
+                            torrent_rows = html.findAll("tr", {"class": re.compile('torrent_[0-9]*')})
+
+                            #Continue only if a Release is found
+                            if len(torrent_rows) == 0:
+                                logger.log(u"The Data returned from " + self.name + " does not contain any torrent", logger.DEBUG)
+                                continue
+
+                            for individual_torrent in torrent_rows:
+
+                                #skip if torrent has been nuked due to poor quality
+                                if individual_torrent.find('img', alt='Nuked') != None:
+                                    continue
+
+                                try:
+                                    title = individual_torrent.find('a', {'class': 'torrent_name_link'})['title']
+                                except:
+                                    logger.log(u"Unable to parse torrent title " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
+                                    continue
+
+                                try:
+                                    details_url = individual_torrent.find('a', {'class': 'torrent_name_link'})['href']
+                                    id = int((re.match('.*?([0-9]+)$', details_url).group(1)).strip())
+                                    download_url = self.urls['download'] % (str(id))
+                                except:
+                                    logger.log(u"Unable to parse torrent id & download url  " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
+                                    continue
+
+                                try:
+                                    seeders = int(individual_torrent.find('td', {'class': 'table_seeders'}).find('span').text.strip())
+                                except:
+                                    logger.log(u"Unable to parse torrent seeders content  " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
+                                    seeders = 1
+                                try:
+                                    leechers = int(individual_torrent.find('td', {'class': 'table_leechers'}).find('a').text.strip())
+                                except:
+                                    logger.log(u"Unable to parse torrent leechers content " + self.name + " Traceback: " + traceback.format_exc(), logger.DEBUG)
+                                    leechers = 0
+
+                                #Filter unseeded torrent
+                                if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
+                                    continue
+
+                                if not title or not download_url:
+                                    continue
+
+                                item = title, download_url, id, seeders, leechers
+                                logger.log(u"Found result: " + title + " (" + searchURL + ")", logger.DEBUG)
+
+                                items[mode].append(item)
+
+                except Exception as e:
+                    logger.log(u"Failed parsing " + " Traceback: " + traceback.format_exc(), logger.DEBUG)
+
+            #For each search mode sort all the items by seeders
+            items[mode].sort(key=lambda tup: tup[3], reverse=True)
+
+            results += items[mode]
+
+        return results
+
+    def _get_title_and_url(self, item):
+
+        title, url, id, seeders, leechers = item
+
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        if url:
+            url = str(url).replace('&amp;', '&')
+
+        return (title, url)
+
+    def findPropers(self, search_date=datetime.datetime.today()):
+
+        results = []
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select(
+            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
+            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
+            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
+            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
+            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
+        )
+
+        if not sqlResults:
+            return []
+
+        for sqlshow in sqlResults:
+            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
+            if self.show:
+                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
+
+                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
+
+                for item in self._doSearch(searchString[0]):
+                    title, url = self._get_title_and_url(item)
+                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
+
+        return results
+
+    def seedRatio(self):
+        return self.ratio
+
+
+class FreshOnTVCache(tvcache.TVCache):
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # poll delay in minutes
+        self.minTime = 20
+
+    def _getRSSData(self):
+        search_params = {'RSS': ['']}
+        return {'entries': self.provider._doSearch(search_params)}
+
+provider = FreshOnTVProvider()
diff --git a/sickbeard/providers/generic.py b/sickbeard/providers/generic.py
index 6fbd54df8d210a5d92213940fbe0d6b3d86c2eab..2fe0459501bf49df17f7c11c2b4ad65aa5983fc3 100644
--- a/sickbeard/providers/generic.py
+++ b/sickbeard/providers/generic.py
@@ -1,523 +1,523 @@
-# coding=utf-8
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
-import datetime
-import os
-import re
-import itertools
-import urllib
-
-import sickbeard
-import requests
-
-from sickbeard import helpers, classes, logger, db
-from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
-from sickbeard import tvcache
-from sickbeard import encodingKludge as ek
-from sickbeard.exceptions import ex
-from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-from sickbeard.common import Quality
-
-from hachoir_parser import createParser
-from base64 import b16encode, b32decode
-
-class GenericProvider:
-    NZB = "nzb"
-    TORRENT = "torrent"
-
-    def __init__(self, name):
-        # these need to be set in the subclass
-        self.providerType = None
-        self.name = name
-
-        self.proxy = ProviderProxy()
-        self.proxyGlypeProxySSLwarning = None
-        self.urls = {}
-        self.url = ''
-
-        self.show = None
-
-        self.supportsBacklog = False
-        self.supportsAbsoluteNumbering = False
-        self.anime_only = False
-
-        self.search_mode = None
-        self.search_fallback = False
-        self.enable_daily = False
-        self.enable_backlog = False
-
-        self.cache = tvcache.TVCache(self)
-
-        self.session = requests.session()
-
-        self.headers = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': USER_AGENT}
-
-    def getID(self):
-        return GenericProvider.makeID(self.name)
-
-    @staticmethod
-    def makeID(name):
-        return re.sub("[^\w\d_]", "_", name.strip().lower())
-
-    def imageName(self):
-        return self.getID() + '.png'
-
-    def _checkAuth(self):
-        return True
-
-    def _doLogin(self):
-        return True
-
-    def isActive(self):
-        if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
-            return self.isEnabled()
-        elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
-            return self.isEnabled()
-        else:
-            return False
-
-    def isEnabled(self):
-        """
-        This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
-        """
-        return False
-
-    def getResult(self, episodes):
-        """
-        Returns a result of the correct type for this provider
-        """
-
-        if self.providerType == GenericProvider.NZB:
-            result = classes.NZBSearchResult(episodes)
-        elif self.providerType == GenericProvider.TORRENT:
-            result = classes.TorrentSearchResult(episodes)
-        else:
-            result = classes.SearchResult(episodes)
-
-        result.provider = self
-
-        return result
-
-    def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
-        """
-        By default this is just a simple urlopen call but this method should be overridden
-        for providers with special URL requirements (like cookies)
-        """
-
-        # check for auth
-        if not self._doLogin():
-            return
-
-        if self.proxy.isEnabled():
-            self.headers.update({'Referer': self.proxy.getProxyURL()})
-            # GlypeProxy SSL warning message
-            self.proxyGlypeProxySSLwarning = self.proxy.getProxyURL() + 'includes/process.php?action=sslagree&submit=Continue anyway...'
-
-        return helpers.getURL(self.proxy._buildURL(url), post_data=post_data, params=params, headers=self.headers, timeout=timeout,
-                              session=self.session, json=json, proxyGlypeProxySSLwarning=self.proxyGlypeProxySSLwarning)
-
-    def downloadResult(self, result):
-        """
-        Save the result to disk.
-        """
-
-        # check for auth
-        if not self._doLogin():
-            return False
-
-        if self.providerType == GenericProvider.TORRENT:
-            try:
-                torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
-
-                if len(torrent_hash) == 32:
-                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()
-
-                if not torrent_hash:
-                    logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
-                    return False
-
-                urls = [
-                    'http://torcache.net/torrent/' + torrent_hash + '.torrent',
-                    'http://torrage.com/torrent/' + torrent_hash + '.torrent',
-                    'http://zoink.it/torrent/' + torrent_hash + '.torrent',
-                ]
-            except:
-                urls = [result.url]
-
-            filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
-                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
-        elif self.providerType == GenericProvider.NZB:
-            urls = [result.url]
-
-            filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
-                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
-        else:
-            return
-
-        for url in urls:
-            if helpers.download_file(url, filename, session=self.session):
-                logger.log(u"Downloading a result from " + self.name + " at " + url)
-
-                if self.providerType == GenericProvider.TORRENT:
-                    logger.log(u"Saved magnet link to " + filename, logger.INFO)
-                else:
-                    logger.log(u"Saved result to " + filename, logger.INFO)
-
-                if self._verify_download(filename):
-                    return True
-
-        logger.log(u"Failed to download result", logger.WARNING)
-        return False
-
-    def _verify_download(self, file_name=None):
-        """
-        Checks the saved file to see if it was actually valid, if not then consider the download a failure.
-        """
-
-        # primitive verification of torrents, just make sure we didn't get a text file or something
-        if self.providerType == GenericProvider.TORRENT:
-            try:
-                parser = createParser(file_name)
-                if parser:
-                    mime_type = parser._getMimeType()
-                    try:
-                        parser.stream._input.close()
-                    except:
-                        pass
-                    if mime_type == 'application/x-bittorrent':
-                        return True
-            except Exception as e:
-                logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
-
-            logger.log(u"Result is not a valid torrent file", logger.WARNING)
-            return False
-
-        return True
-
-    def searchRSS(self, episodes):
-        return self.cache.findNeededEpisodes(episodes)
-
-    def getQuality(self, item, anime=False):
-        """
-        Figures out the quality of the given RSS item node
-        
-        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
-        
-        Returns a Quality value obtained from the node's data 
-        """
-        (title, url) = self._get_title_and_url(item)
-        quality = Quality.sceneQuality(title, anime)
-        return quality
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-        return []
-
-    def _get_season_search_strings(self, episode):
-        return []
-
-    def _get_episode_search_strings(self, eb_obj, add_string=''):
-        return []
-
-    def _get_title_and_url(self, item):
-        """
-        Retrieves the title and URL data from the item XML node
-
-        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
-
-        Returns: A tuple containing two strings representing title and URL respectively
-        """
-
-        title = item.get('title')
-        if title:
-            title = u'' + title.replace(' ', '.')
-
-        url = item.get('link')
-        if url:
-            url = url.replace('&amp;', '&')
-
-        return title, url
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
-
-        self._checkAuth()
-        self.show = show
-
-        results = {}
-        itemList = []
-
-        searched_scene_season = None
-        for epObj in episodes:
-            # search cache for episode result
-            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
-            if cacheResult:
-                if epObj.episode not in results:
-                    results[epObj.episode] = cacheResult
-                else:
-                    results[epObj.episode].extend(cacheResult)
-
-                # found result, search next episode
-                continue
-
-            # skip if season already searched
-            if len(episodes) > 1 and searched_scene_season == epObj.scene_season:
-                continue
-
-            # mark season searched for season pack searches so we can skip later on
-            searched_scene_season = epObj.scene_season
-
-            if len(episodes) > 1:
-                # get season search results
-                for curString in self._get_season_search_strings(epObj):
-                    itemList += self._doSearch(curString, search_mode, len(episodes))
-            else:
-                # get single episode search results
-                for curString in self._get_episode_search_strings(epObj):
-                    itemList += self._doSearch(curString, 'eponly', len(episodes))
-
-        # if we found what we needed already from cache then return results and exit
-        if len(results) == len(episodes):
-            return results
-
-        # sort list by quality
-        if len(itemList):
-            items = {}
-            itemsUnknown = []
-            for item in itemList:
-                quality = self.getQuality(item, anime=show.is_anime)
-                if quality == Quality.UNKNOWN:
-                    itemsUnknown += [item]
-                else:
-                    if quality not in items:
-                        items[quality] = [item]
-                    else:
-                        items[quality].append(item)
-
-            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
-            itemList += itemsUnknown if itemsUnknown else []
-
-        # filter results
-        cl = []
-        for item in itemList:
-            (title, url) = self._get_title_and_url(item)
-
-            # parse the file name
-            try:
-                myParser = NameParser(False, convert=True)
-                parse_result = myParser.parse(title)
-            except InvalidNameException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
-                continue
-            except InvalidShowException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
-                continue
-
-            showObj = parse_result.show
-            quality = parse_result.quality
-            release_group = parse_result.release_group
-            version = parse_result.version
-
-            addCacheEntry = False
-            if not (showObj.air_by_date or showObj.sports):
-                if search_mode == 'sponly': 
-                    if len(parse_result.episode_numbers):
-                        logger.log(
-                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    if len(parse_result.episode_numbers) and (
-                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
-                                                                                 ep.scene_episode in parse_result.episode_numbers]):
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                else:
-                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
-                                                                                                     episodes if
-                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
-                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    # we just use the existing info for normal searches
-                    actual_season = parse_result.season_number
-                    actual_episodes = parse_result.episode_numbers
-            else:
-                if not (parse_result.is_air_by_date):
-                    logger.log(
-                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
-                        logger.DEBUG)
-                    addCacheEntry = True
-                else:
-                    airdate = parse_result.air_date.toordinal()
-                    myDB = db.DBConnection()
-                    sql_results = myDB.select(
-                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
-                        [showObj.indexerid, airdate])
-
-                    if len(sql_results) != 1:
-                        logger.log(
-                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
-                            logger.WARNING)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    actual_season = int(sql_results[0]["season"])
-                    actual_episodes = [int(sql_results[0]["episode"])]
-
-            # add parsed result to cache for usage later on
-            if addCacheEntry:
-                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
-                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
-                if ci is not None:
-                    cl.append(ci)
-                continue
-
-            # make sure we want the episode
-            wantEp = True
-            for epNo in actual_episodes:
-                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
-                    wantEp = False
-                    break
-
-            if not wantEp:
-                logger.log(
-                    u"Ignoring result " + title + " because we don't want an episode that is " +
-                    Quality.qualityStrings[
-                        quality], logger.DEBUG)
-
-                continue
-
-            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
-
-            # make a result object
-            epObj = []
-            for curEp in actual_episodes:
-                epObj.append(showObj.getEpisode(actual_season, curEp))
-
-            result = self.getResult(epObj)
-            result.show = showObj
-            result.url = url
-            result.name = title
-            result.quality = quality
-            result.release_group = release_group
-            result.version = version
-            result.content = None
-
-            if len(epObj) == 1:
-                epNum = epObj[0].episode
-                logger.log(u"Single episode result.", logger.DEBUG)
-            elif len(epObj) > 1:
-                epNum = MULTI_EP_RESULT
-                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
-                    parse_result.episode_numbers), logger.DEBUG)
-            elif len(epObj) == 0:
-                epNum = SEASON_RESULT
-                logger.log(u"Separating full season result to check for later", logger.DEBUG)
-
-            if epNum not in results:
-                results[epNum] = [result]
-            else:
-                results[epNum].append(result)
-
-        # check if we have items to add to cache
-        if len(cl) > 0:
-            myDB = self.cache._getDB()
-            myDB.mass_action(cl)
-
-        return results
-
-    def findPropers(self, search_date=None):
-
-        results = self.cache.listPropers(search_date)
-
-        return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
-                results]
-
-    def seedRatio(self):
-        '''
-        Provider should override this value if custom seed ratio enabled
-        It should return the value of the provider seed ratio
-        '''
-        return ''
-
-
-class NZBProvider(GenericProvider):
-    def __init__(self, name):
-        GenericProvider.__init__(self, name)
-
-        self.providerType = GenericProvider.NZB
-
-
-class TorrentProvider(GenericProvider):
-    def __init__(self, name):
-        GenericProvider.__init__(self, name)
-
-        self.providerType = GenericProvider.TORRENT
-
-class ProviderProxy:
-    def __init__(self):
-        self.Type = 'GlypeProxy'
-        self.param = 'browse.php?u='
-        self.option = '&b=32&f=norefer'
-        self.enabled = False
-        self.url = None
-
-        self.urls = {
-            'getprivate.eu (NL)': 'http://getprivate.eu/',
-            'hideme.nl (NL)': 'http://hideme.nl/',
-            'proxite.eu (DE)': 'http://proxite.eu/',
-            'interproxy.net (EU)': 'http://interproxy.net/',
-        }
-
-    def isEnabled(self):
-        """ Return True if we Choose to call TPB via Proxy """
-        return self.enabled
-
-    def getProxyURL(self):
-        """ Return the Proxy URL Choosen via Provider Setting """
-        return str(self.url)
-
-    def _buildURL(self, url):
-        """ Return the Proxyfied URL of the page """
-        if self.isEnabled():
-            url = self.getProxyURL() + self.param + urllib.quote_plus(url.encode('UTF-8')) + self.option
-            logger.log(u"Proxified URL: " + url, logger.DEBUG)
-
-        return url
-
-    def _buildRE(self, regx):
-        """ Return the Proxyfied RE string """
-        if self.isEnabled():
-            regx = re.sub('//1', self.option, regx).replace('&', '&amp;')
-            logger.log(u"Proxified REGEX: " + regx, logger.DEBUG)
-        else:
-            regx = re.sub('//1', '', regx)
-
-        return regx
+# coding=utf-8
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import with_statement
+
+import datetime
+import os
+import re
+import itertools
+import urllib
+
+import sickbeard
+import requests
+
+from sickbeard import helpers, classes, logger, db
+from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
+from sickbeard import tvcache
+from sickbeard import encodingKludge as ek
+from sickbeard.exceptions import ex
+from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+from sickbeard.common import Quality
+
+from hachoir_parser import createParser
+from base64 import b16encode, b32decode
+
+class GenericProvider:
+    NZB = "nzb"
+    TORRENT = "torrent"
+
+    def __init__(self, name):
+        # these need to be set in the subclass
+        self.providerType = None
+        self.name = name
+
+        self.proxy = ProviderProxy()
+        self.proxyGlypeProxySSLwarning = None
+        self.urls = {}
+        self.url = ''
+
+        self.show = None
+
+        self.supportsBacklog = False
+        self.supportsAbsoluteNumbering = False
+        self.anime_only = False
+
+        self.search_mode = None
+        self.search_fallback = False
+        self.enable_daily = False
+        self.enable_backlog = False
+
+        self.cache = tvcache.TVCache(self)
+
+        self.session = requests.session()
+
+        self.headers = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': USER_AGENT}
+
+    def getID(self):
+        return GenericProvider.makeID(self.name)
+
+    @staticmethod
+    def makeID(name):
+        return re.sub("[^\w\d_]", "_", name.strip().lower())
+
+    def imageName(self):
+        return self.getID() + '.png'
+
+    def _checkAuth(self):
+        return True
+
+    def _doLogin(self):
+        return True
+
+    def isActive(self):
+        if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
+            return self.isEnabled()
+        elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
+            return self.isEnabled()
+        else:
+            return False
+
+    def isEnabled(self):
+        """
+        This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
+        """
+        return False
+
+    def getResult(self, episodes):
+        """
+        Returns a result of the correct type for this provider
+        """
+
+        if self.providerType == GenericProvider.NZB:
+            result = classes.NZBSearchResult(episodes)
+        elif self.providerType == GenericProvider.TORRENT:
+            result = classes.TorrentSearchResult(episodes)
+        else:
+            result = classes.SearchResult(episodes)
+
+        result.provider = self
+
+        return result
+
+    def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
+        """
+        By default this is just a simple urlopen call but this method should be overridden
+        for providers with special URL requirements (like cookies)
+        """
+
+        # check for auth
+        if not self._doLogin():
+            return
+
+        if self.proxy.isEnabled():
+            self.headers.update({'Referer': self.proxy.getProxyURL()})
+            # GlypeProxy SSL warning message
+            self.proxyGlypeProxySSLwarning = self.proxy.getProxyURL() + 'includes/process.php?action=sslagree&submit=Continue anyway...'
+
+        return helpers.getURL(self.proxy._buildURL(url), post_data=post_data, params=params, headers=self.headers, timeout=timeout,
+                              session=self.session, json=json, proxyGlypeProxySSLwarning=self.proxyGlypeProxySSLwarning)
+
+    def downloadResult(self, result):
+        """
+        Save the result to disk.
+        """
+
+        # check for auth
+        if not self._doLogin():
+            return False
+
+        if self.providerType == GenericProvider.TORRENT:
+            try:
+                torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
+
+                if len(torrent_hash) == 32:
+                    torrent_hash = b16encode(b32decode(torrent_hash)).upper()
+
+                if not torrent_hash:
+                    logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
+                    return False
+
+                urls = [
+                    'http://torcache.net/torrent/' + torrent_hash + '.torrent',
+                    'http://zoink.ch/torrent/' + torrent_hash + '.torrent',
+                    'http://torrage.com/torrent/' + torrent_hash.lower() + '.torrent',
+                ]
+            except:
+                urls = [result.url]
+
+            filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
+                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
+        elif self.providerType == GenericProvider.NZB:
+            urls = [result.url]
+
+            filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
+                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
+        else:
+            return
+
+        for url in urls:
+            if helpers.download_file(url, filename, session=self.session):
+                logger.log(u"Downloading a result from " + self.name + " at " + url)
+
+                if self.providerType == GenericProvider.TORRENT:
+                    logger.log(u"Saved magnet link to " + filename, logger.INFO)
+                else:
+                    logger.log(u"Saved result to " + filename, logger.INFO)
+
+                if self._verify_download(filename):
+                    return True
+
+        logger.log(u"Failed to download result", logger.WARNING)
+        return False
+
+    def _verify_download(self, file_name=None):
+        """
+        Checks the saved file to see if it was actually valid, if not then consider the download a failure.
+        """
+
+        # primitive verification of torrents, just make sure we didn't get a text file or something
+        if self.providerType == GenericProvider.TORRENT:
+            try:
+                parser = createParser(file_name)
+                if parser:
+                    mime_type = parser._getMimeType()
+                    try:
+                        parser.stream._input.close()
+                    except:
+                        pass
+                    if mime_type == 'application/x-bittorrent':
+                        return True
+            except Exception as e:
+                logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
+
+            logger.log(u"Result is not a valid torrent file", logger.WARNING)
+            return False
+
+        return True
+
+    def searchRSS(self, episodes):
+        return self.cache.findNeededEpisodes(episodes)
+
+    def getQuality(self, item, anime=False):
+        """
+        Figures out the quality of the given RSS item node
+        
+        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
+        
+        Returns a Quality value obtained from the node's data 
+        """
+        (title, url) = self._get_title_and_url(item)
+        quality = Quality.sceneQuality(title, anime)
+        return quality
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+        return []
+
+    def _get_season_search_strings(self, episode):
+        return []
+
+    def _get_episode_search_strings(self, eb_obj, add_string=''):
+        return []
+
+    def _get_title_and_url(self, item):
+        """
+        Retrieves the title and URL data from the item XML node
+
+        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
+
+        Returns: A tuple containing two strings representing title and URL respectively
+        """
+
+        title = item.get('title')
+        if title:
+            title = u'' + title.replace(' ', '.')
+
+        url = item.get('link')
+        if url:
+            url = url.replace('&amp;', '&')
+
+        return title, url
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+
+        self._checkAuth()
+        self.show = show
+
+        results = {}
+        itemList = []
+
+        searched_scene_season = None
+        for epObj in episodes:
+            # search cache for episode result
+            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
+            if cacheResult:
+                if epObj.episode not in results:
+                    results[epObj.episode] = cacheResult
+                else:
+                    results[epObj.episode].extend(cacheResult)
+
+                # found result, search next episode
+                continue
+
+            # skip if season already searched
+            if len(episodes) > 1 and searched_scene_season == epObj.scene_season:
+                continue
+
+            # mark season searched for season pack searches so we can skip later on
+            searched_scene_season = epObj.scene_season
+
+            if len(episodes) > 1:
+                # get season search results
+                for curString in self._get_season_search_strings(epObj):
+                    itemList += self._doSearch(curString, search_mode, len(episodes))
+            else:
+                # get single episode search results
+                for curString in self._get_episode_search_strings(epObj):
+                    itemList += self._doSearch(curString, 'eponly', len(episodes))
+
+        # if we found what we needed already from cache then return results and exit
+        if len(results) == len(episodes):
+            return results
+
+        # sort list by quality
+        if len(itemList):
+            items = {}
+            itemsUnknown = []
+            for item in itemList:
+                quality = self.getQuality(item, anime=show.is_anime)
+                if quality == Quality.UNKNOWN:
+                    itemsUnknown += [item]
+                else:
+                    if quality not in items:
+                        items[quality] = [item]
+                    else:
+                        items[quality].append(item)
+
+            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
+            itemList += itemsUnknown if itemsUnknown else []
+
+        # filter results
+        cl = []
+        for item in itemList:
+            (title, url) = self._get_title_and_url(item)
+
+            # parse the file name
+            try:
+                myParser = NameParser(False, convert=True)
+                parse_result = myParser.parse(title)
+            except InvalidNameException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
+                continue
+            except InvalidShowException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
+                continue
+
+            showObj = parse_result.show
+            quality = parse_result.quality
+            release_group = parse_result.release_group
+            version = parse_result.version
+
+            addCacheEntry = False
+            if not (showObj.air_by_date or showObj.sports):
+                if search_mode == 'sponly': 
+                    if len(parse_result.episode_numbers):
+                        logger.log(
+                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    if len(parse_result.episode_numbers) and (
+                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
+                                                                                 ep.scene_episode in parse_result.episode_numbers]):
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                else:
+                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
+                                                                                                     episodes if
+                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
+                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    # we just use the existing info for normal searches
+                    actual_season = parse_result.season_number
+                    actual_episodes = parse_result.episode_numbers
+            else:
+                if not (parse_result.is_air_by_date):
+                    logger.log(
+                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
+                        logger.DEBUG)
+                    addCacheEntry = True
+                else:
+                    airdate = parse_result.air_date.toordinal()
+                    myDB = db.DBConnection()
+                    sql_results = myDB.select(
+                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
+                        [showObj.indexerid, airdate])
+
+                    if len(sql_results) != 1:
+                        logger.log(
+                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
+                            logger.WARNING)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    actual_season = int(sql_results[0]["season"])
+                    actual_episodes = [int(sql_results[0]["episode"])]
+
+            # add parsed result to cache for usage later on
+            if addCacheEntry:
+                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
+                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
+                if ci is not None:
+                    cl.append(ci)
+                continue
+
+            # make sure we want the episode
+            wantEp = True
+            for epNo in actual_episodes:
+                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
+                    wantEp = False
+                    break
+
+            if not wantEp:
+                logger.log(
+                    u"Ignoring result " + title + " because we don't want an episode that is " +
+                    Quality.qualityStrings[
+                        quality], logger.DEBUG)
+
+                continue
+
+            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
+
+            # make a result object
+            epObj = []
+            for curEp in actual_episodes:
+                epObj.append(showObj.getEpisode(actual_season, curEp))
+
+            result = self.getResult(epObj)
+            result.show = showObj
+            result.url = url
+            result.name = title
+            result.quality = quality
+            result.release_group = release_group
+            result.version = version
+            result.content = None
+
+            if len(epObj) == 1:
+                epNum = epObj[0].episode
+                logger.log(u"Single episode result.", logger.DEBUG)
+            elif len(epObj) > 1:
+                epNum = MULTI_EP_RESULT
+                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
+                    parse_result.episode_numbers), logger.DEBUG)
+            elif len(epObj) == 0:
+                epNum = SEASON_RESULT
+                logger.log(u"Separating full season result to check for later", logger.DEBUG)
+
+            if epNum not in results:
+                results[epNum] = [result]
+            else:
+                results[epNum].append(result)
+
+        # check if we have items to add to cache
+        if len(cl) > 0:
+            myDB = self.cache._getDB()
+            myDB.mass_action(cl)
+
+        return results
+
+    def findPropers(self, search_date=None):
+
+        results = self.cache.listPropers(search_date)
+
+        return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
+                results]
+
+    def seedRatio(self):
+        '''
+        Provider should override this value if custom seed ratio enabled
+        It should return the value of the provider seed ratio
+        '''
+        return ''
+
+
+class NZBProvider(GenericProvider):
+    def __init__(self, name):
+        GenericProvider.__init__(self, name)
+
+        self.providerType = GenericProvider.NZB
+
+
+class TorrentProvider(GenericProvider):
+    def __init__(self, name):
+        GenericProvider.__init__(self, name)
+
+        self.providerType = GenericProvider.TORRENT
+
+class ProviderProxy:
+    def __init__(self):
+        self.Type = 'GlypeProxy'
+        self.param = 'browse.php?u='
+        self.option = '&b=32&f=norefer'
+        self.enabled = False
+        self.url = None
+
+        self.urls = {
+            'getprivate.eu (NL)': 'http://getprivate.eu/',
+            'hideme.nl (NL)': 'http://hideme.nl/',
+            'proxite.eu (DE)': 'http://proxite.eu/',
+            'interproxy.net (EU)': 'http://interproxy.net/',
+        }
+
+    def isEnabled(self):
+        """ Return True if we Choose to call TPB via Proxy """
+        return self.enabled
+
+    def getProxyURL(self):
+        """ Return the Proxy URL Choosen via Provider Setting """
+        return str(self.url)
+
+    def _buildURL(self, url):
+        """ Return the Proxyfied URL of the page """
+        if self.isEnabled():
+            url = self.getProxyURL() + self.param + urllib.quote_plus(url.encode('UTF-8')) + self.option
+            logger.log(u"Proxified URL: " + url, logger.DEBUG)
+
+        return url
+
+    def _buildRE(self, regx):
+        """ Return the Proxyfied RE string """
+        if self.isEnabled():
+            regx = re.sub('//1', self.option, regx).replace('&', '&amp;')
+            logger.log(u"Proxified REGEX: " + regx, logger.DEBUG)
+        else:
+            regx = re.sub('//1', '', regx)
+
+        return regx
diff --git a/sickbeard/providers/iptorrents.py b/sickbeard/providers/iptorrents.py
index e9e999bd066e530c1cfa56df2808975b987e07a9..113f81611957aec4e0a353bf4f1cebc88cd2c6f3 100644
--- a/sickbeard/providers/iptorrents.py
+++ b/sickbeard/providers/iptorrents.py
@@ -1,463 +1,463 @@
-# Author: seedboy
-# URL: https://github.com/seedboy
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import re
-import traceback
-import datetime
-import urlparse
-import itertools
-
-import sickbeard
-import generic
-from sickbeard.common import Quality
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard import db
-from sickbeard import classes
-from sickbeard import helpers
-from sickbeard import show_name_helpers
-from sickbeard.exceptions import ex, AuthException
-from sickbeard import clients
-from lib import requests
-from lib.requests import exceptions
-from sickbeard.bs4_parser import BS4Parser
-from lib.unidecode import unidecode
-from sickbeard.helpers import sanitizeSceneName
-from sickbeard.show_name_helpers import allPossibleShowNames
-from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
-
-
-class IPTorrentsProvider(generic.TorrentProvider):
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "IPTorrents")
-
-        self.supportsBacklog = True
-
-        self.enabled = False
-        self.username = None
-        self.password = None
-        self.ratio = None
-        self.freeleech = False
-
-        self.cache = IPTorrentsCache(self)
-
-        self.urls = {'base_url': 'https://iptorrents.eu',
-                'login': 'https://iptorrents.eu/torrents/',
-                'search': 'https://iptorrents.eu/torrents/?%s%s&q=%s&qf=ti',
-        }
-
-        self.url = self.urls['base_url']
-
-        self.categorie = 'l73=1&l78=1&l66=1&l65=1&l79=1&l5=1&l4=1'
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'iptorrents.png'
-
-    def getQuality(self, item, anime=False):
-
-        quality = Quality.sceneQuality(item[0], anime)
-        return quality
-
-    def _checkAuth(self):
-
-        if not self.username or not self.password:
-            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
-
-        return True
-
-    def _doLogin(self):
-
-        login_params = {'username': self.username,
-                        'password': self.password,
-                        'login': 'submit',
-        }
-
-        try:
-            response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
-        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
-            logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
-            return False
-
-        if re.search('tries left', response.text) \
-                or re.search('<title>IPT</title>', response.text) \
-                or response.status_code == 401:
-            logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR)
-            return False
-
-        return True
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        search_string = {'Episode': []}
-
-        if not ep_obj:
-            return []
-
-        if self.show.air_by_date:
-            for show_name in set(allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|')
-                search_string['Episode'].append(ep_string)
-        elif self.show.sports:
-            for show_name in set(allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|') + '|' + \
-                            ep_obj.airdate.strftime('%b')
-                search_string['Episode'].append(ep_string)
-        elif self.show.anime:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            "%i" % int(ep_obj.scene_absolute_number)
-                search_string['Episode'].append(ep_string)
-        else:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
-                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
-                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
-
-                search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
-
-        return [search_string]
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
-
-        self._checkAuth()
-        self.show = show
-
-        results = {}
-        itemList = []
-
-        if search_mode == 'sponly':
-            logger.log(u"This provider doesn't support season pack. Consider setting Season search mode to episodes only and unchecked Season search fallback", logger.WARNING)
-            search_mode = 'eponly'
-
-        for epObj in episodes:
-            # search cache for episode result
-            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
-            if cacheResult:
-                if epObj.episode not in results:
-                    results[epObj.episode] = cacheResult
-                else:
-                    results[epObj.episode].extend(cacheResult)
-
-                # found result, search next episode
-                continue
-
-            for curString in self._get_episode_search_strings(epObj):
-                itemList += self._doSearch(curString, 'eponly', len(episodes))
-
-        # if we found what we needed already from cache then return results and exit
-        if len(results) == len(episodes):
-            return results
-
-        # sort list by quality
-        if len(itemList):
-            items = {}
-            itemsUnknown = []
-            for item in itemList:
-                quality = self.getQuality(item, anime=show.is_anime)
-                if quality == Quality.UNKNOWN:
-                    itemsUnknown += [item]
-                else:
-                    if quality not in items:
-                        items[quality] = [item]
-                    else:
-                        items[quality].append(item)
-
-            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
-            itemList += itemsUnknown if itemsUnknown else []
-
-        # filter results
-        cl = []
-        for item in itemList:
-            (title, url) = self._get_title_and_url(item)
-
-            # parse the file name
-            try:
-                myParser = NameParser(False, convert=True)
-                parse_result = myParser.parse(title)
-            except InvalidNameException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
-                continue
-            except InvalidShowException:
-                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
-                continue
-
-            showObj = parse_result.show
-            quality = parse_result.quality
-            release_group = parse_result.release_group
-            version = parse_result.version
-
-            addCacheEntry = False
-            if not (showObj.air_by_date or showObj.sports):
-                if search_mode == 'sponly': 
-                    if len(parse_result.episode_numbers):
-                        logger.log(
-                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    if len(parse_result.episode_numbers) and (
-                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
-                                                                                 ep.scene_episode in parse_result.episode_numbers]):
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                else:
-                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
-                                                                                                     episodes if
-                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
-                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
-                        logger.log(
-                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
-                            logger.DEBUG)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    # we just use the existing info for normal searches
-                    actual_season = parse_result.season_number
-                    actual_episodes = parse_result.episode_numbers
-            else:
-                if not (parse_result.is_air_by_date):
-                    logger.log(
-                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
-                        logger.DEBUG)
-                    addCacheEntry = True
-                else:
-                    airdate = parse_result.air_date.toordinal()
-                    myDB = db.DBConnection()
-                    sql_results = myDB.select(
-                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
-                        [showObj.indexerid, airdate])
-
-                    if len(sql_results) != 1:
-                        logger.log(
-                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
-                            logger.WARNING)
-                        addCacheEntry = True
-
-                if not addCacheEntry:
-                    actual_season = int(sql_results[0]["season"])
-                    actual_episodes = [int(sql_results[0]["episode"])]
-
-            # add parsed result to cache for usage later on
-            if addCacheEntry:
-                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
-                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
-                if ci is not None:
-                    cl.append(ci)
-                continue
-
-            # make sure we want the episode
-            wantEp = True
-            for epNo in actual_episodes:
-                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
-                    wantEp = False
-                    break
-
-            if not wantEp:
-                logger.log(
-                    u"Ignoring result " + title + " because we don't want an episode that is " +
-                    Quality.qualityStrings[
-                        quality], logger.DEBUG)
-
-                continue
-
-            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
-
-            # make a result object
-            epObj = []
-            for curEp in actual_episodes:
-                epObj.append(showObj.getEpisode(actual_season, curEp))
-
-            result = self.getResult(epObj)
-            result.show = showObj
-            result.url = url
-            result.name = title
-            result.quality = quality
-            result.release_group = release_group
-            result.version = version
-            result.content = None
-
-            if len(epObj) == 1:
-                epNum = epObj[0].episode
-                logger.log(u"Single episode result.", logger.DEBUG)
-            elif len(epObj) > 1:
-                epNum = MULTI_EP_RESULT
-                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
-                    parse_result.episode_numbers), logger.DEBUG)
-            elif len(epObj) == 0:
-                epNum = SEASON_RESULT
-                logger.log(u"Separating full season result to check for later", logger.DEBUG)
-
-            if epNum not in results:
-                results[epNum] = [result]
-            else:
-                results[epNum].append(result)
-
-        # check if we have items to add to cache
-        if len(cl) > 0:
-            myDB = self.cache._getDB()
-            myDB.mass_action(cl)
-
-        return results
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        results = []
-        items = {'Season': [], 'Episode': [], 'RSS': []}
-
-        freeleech = '&free=on' if self.freeleech else ''
-
-        if not self._doLogin():
-            return results
-
-        for mode in search_params.keys():
-            for search_string in search_params[mode]:
-                if isinstance(search_string, unicode):
-                    search_string = unidecode(search_string)
-
-                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
-                searchURL = self.urls['search'] % (self.categorie, freeleech, search_string)
-                searchURL += ';o=seeders' if mode != 'RSS' else ''
-
-                logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
-
-                data = self.getURL(searchURL)
-                if not data:
-                    continue
-
-                try:
-                    data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
-                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
-                        if not html:
-                            logger.log(u"Invalid HTML data: " + str(data), logger.DEBUG)
-                            continue
-
-                        if html.find(text='No Torrents Found!'):
-                            logger.log(u"No results found for: " + search_string + " (" + searchURL + ")", logger.DEBUG)
-                            continue
-
-                        torrent_table = html.find('table', attrs={'class': 'torrents'})
-                        torrents = torrent_table.find_all('tr') if torrent_table else []
-
-                        #Continue only if one Release is found
-                        if len(torrents) < 2:
-                            logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
-                                       logger.WARNING)
-                            continue
-
-                        for result in torrents[1:]:
-
-                            try:
-                                torrent = result.find_all('td')[1].find('a')
-                                torrent_name = torrent.string
-                                torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href']
-                                torrent_details_url = self.urls['base_url'] + torrent['href']
-                                torrent_seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).string)
-                                ## Not used, perhaps in the future ##
-                                #torrent_id = int(torrent['href'].replace('/details.php?id=', ''))
-                                #torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
-                            except (AttributeError, TypeError):
-                                continue
-
-                            # Filter unseeded torrent and torrents with no name/url
-                            if mode != 'RSS' and torrent_seeders == 0:
-                                continue
-
-                            if not torrent_name or not torrent_download_url:
-                                continue
-
-                            item = torrent_name, torrent_download_url
-                            logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")", logger.DEBUG)
-                            items[mode].append(item)
-
-                except Exception, e:
-                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
-
-            results += items[mode]
-
-        return results
-
-    def _get_title_and_url(self, item):
-
-        title, url = item
-
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        if url:
-            url = str(url).replace('&amp;', '&')
-
-        return (title, url)
-
-    def findPropers(self, search_date=datetime.datetime.today()):
-
-        results = []
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select(
-            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
-            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
-            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
-            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
-            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
-        )
-
-        if not sqlResults:
-            return []
-
-        for sqlshow in sqlResults:
-            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
-            if self.show:
-                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
-                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
-
-                for item in self._doSearch(searchString[0]):
-                    title, url = self._get_title_and_url(item)
-                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
-
-        return results
-
-    def seedRatio(self):
-        return self.ratio
-
-class IPTorrentsCache(tvcache.TVCache):
-    def __init__(self, provider):
-
-        tvcache.TVCache.__init__(self, provider)
-
-        # Only poll IPTorrents every 10 minutes max
-        self.minTime = 10
-
-    def _getRSSData(self):
-        search_params = {'RSS': ['']}
-        return {'entries': self.provider._doSearch(search_params)}
-
-
-provider = IPTorrentsProvider()
+# Author: seedboy
+# URL: https://github.com/seedboy
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import traceback
+import datetime
+import urlparse
+import itertools
+
+import sickbeard
+import generic
+from sickbeard.common import Quality
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard import db
+from sickbeard import classes
+from sickbeard import helpers
+from sickbeard import show_name_helpers
+from sickbeard.exceptions import ex, AuthException
+from sickbeard import clients
+from lib import requests
+from lib.requests import exceptions
+from sickbeard.bs4_parser import BS4Parser
+from lib.unidecode import unidecode
+from sickbeard.helpers import sanitizeSceneName
+from sickbeard.show_name_helpers import allPossibleShowNames
+from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
+
+
+class IPTorrentsProvider(generic.TorrentProvider):
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "IPTorrents")
+
+        self.supportsBacklog = True
+
+        self.enabled = False
+        self.username = None
+        self.password = None
+        self.ratio = None
+        self.freeleech = False
+
+        self.cache = IPTorrentsCache(self)
+
+        self.urls = {'base_url': 'https://iptorrents.eu',
+                'login': 'https://iptorrents.eu/torrents/',
+                'search': 'https://iptorrents.eu/torrents/?%s%s&q=%s&qf=ti',
+        }
+
+        self.url = self.urls['base_url']
+
+        self.categorie = 'l73=1&l78=1&l66=1&l65=1&l79=1&l5=1&l4=1'
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'iptorrents.png'
+
+    def getQuality(self, item, anime=False):
+
+        quality = Quality.sceneQuality(item[0], anime)
+        return quality
+
+    def _checkAuth(self):
+
+        if not self.username or not self.password:
+            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
+
+        return True
+
+    def _doLogin(self):
+
+        login_params = {'username': self.username,
+                        'password': self.password,
+                        'login': 'submit',
+        }
+
+        try:
+            response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
+        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
+            logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
+            return False
+
+        if re.search('tries left', response.text) \
+                or re.search('<title>IPT</title>', response.text) \
+                or response.status_code == 401:
+            logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR)
+            return False
+
+        return True
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        search_string = {'Episode': []}
+
+        if not ep_obj:
+            return []
+
+        if self.show.air_by_date:
+            for show_name in set(allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|')
+                search_string['Episode'].append(ep_string)
+        elif self.show.sports:
+            for show_name in set(allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|') + '|' + \
+                            ep_obj.airdate.strftime('%b')
+                search_string['Episode'].append(ep_string)
+        elif self.show.anime:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            "%i" % int(ep_obj.scene_absolute_number)
+                search_string['Episode'].append(ep_string)
+        else:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
+                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
+                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
+
+                search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
+
+        return [search_string]
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+
+        self._checkAuth()
+        self.show = show
+
+        results = {}
+        itemList = []
+
+        if search_mode == 'sponly':
+            logger.log(u"This provider doesn't support season pack. Consider setting Season search mode to episodes only and unchecked Season search fallback", logger.WARNING)
+            search_mode = 'eponly'
+
+        for epObj in episodes:
+            # search cache for episode result
+            cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
+            if cacheResult:
+                if epObj.episode not in results:
+                    results[epObj.episode] = cacheResult
+                else:
+                    results[epObj.episode].extend(cacheResult)
+
+                # found result, search next episode
+                continue
+
+            for curString in self._get_episode_search_strings(epObj):
+                itemList += self._doSearch(curString, 'eponly', len(episodes))
+
+        # if we found what we needed already from cache then return results and exit
+        if len(results) == len(episodes):
+            return results
+
+        # sort list by quality
+        if len(itemList):
+            items = {}
+            itemsUnknown = []
+            for item in itemList:
+                quality = self.getQuality(item, anime=show.is_anime)
+                if quality == Quality.UNKNOWN:
+                    itemsUnknown += [item]
+                else:
+                    if quality not in items:
+                        items[quality] = [item]
+                    else:
+                        items[quality].append(item)
+
+            itemList = list(itertools.chain(*[v for (k, v) in sorted(items.items(), reverse=True)]))
+            itemList += itemsUnknown if itemsUnknown else []
+
+        # filter results
+        cl = []
+        for item in itemList:
+            (title, url) = self._get_title_and_url(item)
+
+            # parse the file name
+            try:
+                myParser = NameParser(False, convert=True)
+                parse_result = myParser.parse(title)
+            except InvalidNameException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
+                continue
+            except InvalidShowException:
+                logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
+                continue
+
+            showObj = parse_result.show
+            quality = parse_result.quality
+            release_group = parse_result.release_group
+            version = parse_result.version
+
+            addCacheEntry = False
+            if not (showObj.air_by_date or showObj.sports):
+                if search_mode == 'sponly': 
+                    if len(parse_result.episode_numbers):
+                        logger.log(
+                            u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    if len(parse_result.episode_numbers) and (
+                                    parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
+                                                                                 ep.scene_episode in parse_result.episode_numbers]):
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                else:
+                    if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
+                                                                                                     episodes if
+                                                                                                     ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+                    elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
+                                                                    ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
+                        logger.log(
+                            u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
+                            logger.DEBUG)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    # we just use the existing info for normal searches
+                    actual_season = parse_result.season_number
+                    actual_episodes = parse_result.episode_numbers
+            else:
+                if not (parse_result.is_air_by_date):
+                    logger.log(
+                        u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
+                        logger.DEBUG)
+                    addCacheEntry = True
+                else:
+                    airdate = parse_result.air_date.toordinal()
+                    myDB = db.DBConnection()
+                    sql_results = myDB.select(
+                        "SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
+                        [showObj.indexerid, airdate])
+
+                    if len(sql_results) != 1:
+                        logger.log(
+                            u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
+                            logger.WARNING)
+                        addCacheEntry = True
+
+                if not addCacheEntry:
+                    actual_season = int(sql_results[0]["season"])
+                    actual_episodes = [int(sql_results[0]["episode"])]
+
+            # add parsed result to cache for usage later on
+            if addCacheEntry:
+                logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
+                ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
+                if ci is not None:
+                    cl.append(ci)
+                continue
+
+            # make sure we want the episode
+            wantEp = True
+            for epNo in actual_episodes:
+                if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
+                    wantEp = False
+                    break
+
+            if not wantEp:
+                logger.log(
+                    u"Ignoring result " + title + " because we don't want an episode that is " +
+                    Quality.qualityStrings[
+                        quality], logger.DEBUG)
+
+                continue
+
+            logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
+
+            # make a result object
+            epObj = []
+            for curEp in actual_episodes:
+                epObj.append(showObj.getEpisode(actual_season, curEp))
+
+            result = self.getResult(epObj)
+            result.show = showObj
+            result.url = url
+            result.name = title
+            result.quality = quality
+            result.release_group = release_group
+            result.version = version
+            result.content = None
+
+            if len(epObj) == 1:
+                epNum = epObj[0].episode
+                logger.log(u"Single episode result.", logger.DEBUG)
+            elif len(epObj) > 1:
+                epNum = MULTI_EP_RESULT
+                logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
+                    parse_result.episode_numbers), logger.DEBUG)
+            elif len(epObj) == 0:
+                epNum = SEASON_RESULT
+                logger.log(u"Separating full season result to check for later", logger.DEBUG)
+
+            if epNum not in results:
+                results[epNum] = [result]
+            else:
+                results[epNum].append(result)
+
+        # check if we have items to add to cache
+        if len(cl) > 0:
+            myDB = self.cache._getDB()
+            myDB.mass_action(cl)
+
+        return results
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        results = []
+        items = {'Season': [], 'Episode': [], 'RSS': []}
+
+        freeleech = '&free=on' if self.freeleech else ''
+
+        if not self._doLogin():
+            return results
+
+        for mode in search_params.keys():
+            for search_string in search_params[mode]:
+                if isinstance(search_string, unicode):
+                    search_string = unidecode(search_string)
+
+                # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
+                searchURL = self.urls['search'] % (self.categorie, freeleech, search_string)
+                searchURL += ';o=seeders' if mode != 'RSS' else ''
+
+                logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
+
+                data = self.getURL(searchURL)
+                if not data:
+                    continue
+
+                try:
+                    data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
+                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
+                        if not html:
+                            logger.log(u"Invalid HTML data: " + str(data), logger.DEBUG)
+                            continue
+
+                        if html.find(text='No Torrents Found!'):
+                            logger.log(u"No results found for: " + search_string + " (" + searchURL + ")", logger.DEBUG)
+                            continue
+
+                        torrent_table = html.find('table', attrs={'class': 'torrents'})
+                        torrents = torrent_table.find_all('tr') if torrent_table else []
+
+                        #Continue only if one Release is found
+                        if len(torrents) < 2:
+                            logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
+                                       logger.WARNING)
+                            continue
+
+                        for result in torrents[1:]:
+
+                            try:
+                                torrent = result.find_all('td')[1].find('a')
+                                torrent_name = torrent.string
+                                torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href']
+                                torrent_details_url = self.urls['base_url'] + torrent['href']
+                                torrent_seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).string)
+                                ## Not used, perhaps in the future ##
+                                #torrent_id = int(torrent['href'].replace('/details.php?id=', ''))
+                                #torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
+                            except (AttributeError, TypeError):
+                                continue
+
+                            # Filter unseeded torrent and torrents with no name/url
+                            if mode != 'RSS' and torrent_seeders == 0:
+                                continue
+
+                            if not torrent_name or not torrent_download_url:
+                                continue
+
+                            item = torrent_name, torrent_download_url
+                            logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")", logger.DEBUG)
+                            items[mode].append(item)
+
+                except Exception, e:
+                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
+
+            results += items[mode]
+
+        return results
+
+    def _get_title_and_url(self, item):
+
+        title, url = item
+
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        if url:
+            url = str(url).replace('&amp;', '&')
+
+        return (title, url)
+
+    def findPropers(self, search_date=datetime.datetime.today()):
+
+        results = []
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select(
+            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
+            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
+            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
+            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
+            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
+        )
+
+        if not sqlResults:
+            return []
+
+        for sqlshow in sqlResults:
+            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
+            if self.show:
+                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
+                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
+
+                for item in self._doSearch(searchString[0]):
+                    title, url = self._get_title_and_url(item)
+                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
+
+        return results
+
+    def seedRatio(self):
+        return self.ratio
+
+class IPTorrentsCache(tvcache.TVCache):
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # Only poll IPTorrents every 10 minutes max
+        self.minTime = 10
+
+    def _getRSSData(self):
+        search_params = {'RSS': ['']}
+        return {'entries': self.provider._doSearch(search_params)}
+
+
+provider = IPTorrentsProvider()
diff --git a/sickbeard/providers/morethantv.py b/sickbeard/providers/morethantv.py
index fa48f69efb427c900c27c171d62e220aace13d6f..13af2a8275941b391770abc1f3f68d08cee0fa48 100755
--- a/sickbeard/providers/morethantv.py
+++ b/sickbeard/providers/morethantv.py
@@ -1,321 +1,321 @@
-# Author: Seamus Wassman
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-# This file was adapted for MoreThanTV from the freshontv scraper by
-# Sparhawk76, this is my first foray into python, so there most likely
-# are some mistakes or things I could have done better.
-
-import re
-import traceback
-import datetime
-import urlparse
-import sickbeard
-import generic
-from sickbeard.common import Quality, cpu_presets
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard import db
-from sickbeard import classes
-from sickbeard import helpers
-from sickbeard import show_name_helpers
-from sickbeard.exceptions import ex, AuthException
-from sickbeard import clients
-from lib import requests
-from lib.requests import exceptions
-from sickbeard.bs4_parser import BS4Parser
-from lib.unidecode import unidecode
-from sickbeard.helpers import sanitizeSceneName
-
-
-class MoreThanTVProvider(generic.TorrentProvider):
-
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "MoreThanTV")
-
-        self.supportsBacklog = True
-
-        self.enabled = False
-        self._uid = None
-        self._hash = None
-        self.username = None
-        self.password = None
-        self.ratio = None
-        self.minseed = None
-        self.minleech = None
-        self.freeleech = False
-
-        self.cache = MoreThanTVCache(self)
-
-        self.urls = {'base_url': 'http://www.morethan.tv/',
-                'login': 'http://www.morethan.tv/login.php',
-                'detail': 'http://www.morethan.tv/torrents.php?id=%s',
-                'search': 'http://www.morethan.tv/torrents.php?tags_type=1&order_by=time&order_way=desc&action=basic&searchsubmit=1&searchstr=%s',
-                'download': 'http://www.morethan.tv/torrents.php?action=download&id=%s',
-                }
-
-        self.url = self.urls['base_url']
-
-        self.cookies = None
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'morethantv.png'
-
-    def getQuality(self, item, anime=False):
-
-        quality = Quality.sceneQuality(item[0], anime)
-        return quality
-
-    def _checkAuth(self):
-
-        if not self.username or not self.password:
-            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
-
-        return True
-
-    def _doLogin(self):
-        if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
-            return True
-
-        if self._uid and self._hash:
-            requests.utils.add_dict_to_cookiejar(self.session.cookies, self.cookies)
-        else:
-            login_params = {'username': self.username,
-                            'password': self.password,
-                            'login': 'submit'
-            }
-
-            if not self.session:
-                self.session = requests.Session()
-
-            try:
-                response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
-            except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
-                logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
-                return False
-
-            if re.search('Your username or password was incorrect.', response.text):
-                logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
-                return False
-
-            return True
-
-    def _get_season_search_strings(self, ep_obj):
-
-        search_string = {'Season': []}
-        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-            if ep_obj.show.air_by_date or ep_obj.show.sports:
-                ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
-            elif ep_obj.show.anime:
-                ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
-            else:
-                ep_string = show_name + '.S%02d*' % int(ep_obj.scene_season)  #1) showName SXX
-
-            search_string['Season'].append(re.sub('\.', '+', ep_string))
-
-        return [search_string]
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        search_string = {'Episode': []}
-
-        if not ep_obj:
-            return []
-
-        if self.show.air_by_date:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|')
-                search_string['Episode'].append(ep_string)
-        elif self.show.sports:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|') + '|' + \
-                            ep_obj.airdate.strftime('%b')
-                search_string['Episode'].append(ep_string)
-        elif self.show.anime:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            "%i" % int(ep_obj.scene_absolute_number)
-                search_string['Episode'].append(ep_string)
-        else:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
-                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
-                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
-
-		search_string['Episode'].append(re.sub('\.', '+', ep_string))
-
-        return [search_string]
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        results = []
-        items = {'Season': [], 'Episode': [], 'RSS': []}
-
-        freeleech = '3' if self.freeleech else '0'
-
-        if not self._doLogin():
-            return results
-
-        for mode in search_params.keys():
-            for search_string in search_params[mode]:
-
-                if isinstance(search_string, unicode):
-                    search_string = unidecode(search_string)
-
-                searchURL = self.urls['search'] % (search_string)
-
-                logger.log(u"Search string: " + searchURL, logger.DEBUG)
-
-                # returns top 15 results by default, expandable in user profile to 100
-                data = self.getURL(searchURL)
-                if not data:
-                    continue
-
-                try:
-                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
-                        torrent_table = html.find('table', attrs={'class': 'torrent_table'})
-                        torrent_rows = torrent_table.findChildren('tr') if torrent_table else []
-
-                        #Continue only if one Release is found
-                        if len(torrent_rows) < 2:
-                            logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
-                                       logger.DEBUG)
-                            continue
-
-                        # skip colheader
-                        for result in torrent_rows[1:]:
-                            cells = result.findChildren('td')
-
-                            link = cells[1].find('a', attrs = {'title': 'Download'})
-
-                            link_str = str(link['href'])
-
-                            logger.log(u"link=" + link_str, logger.DEBUG)
-
-                            #skip if torrent has been nuked due to poor quality
-                            if cells[1].find('img', alt='Nuked') != None:
-                                continue
-                            torrent_id_long = link['href'].replace('torrents.php?action=download&id=', '')
-                            torrent_id = torrent_id_long.split('&', 1)[0]
-
-
-                            try:
-                                if link.has_key('title'):
-                                    title = cells[1].find('a', {'title': 'View torrent'}).contents[0].strip()
-                                else:
-                                    title = link.contents[0]
-                                download_url = self.urls['download'] % (torrent_id_long)
-
-                                seeders = cells[6].contents[0]
-
-                                leechers = cells[7].contents[0]
-
-                            except (AttributeError, TypeError):
-                                continue
-
- 
-                            #Filter unseeded torrent
-                            if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
-                                continue
-
-                            if not title or not download_url:
-                                continue
-
-# Debug
-#                            logger.log(u"title = " + title + ", download_url = " + download_url + ", torrent_id = " + torrent_id + ", seeders = " + seeders + ", leechers = " + leechers, logger.DEBUG)
-
-
-                            item = title, download_url, torrent_id, seeders, leechers
-                            logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
-
-                            items[mode].append(item)
-
-                except Exception, e:
-                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
-
-            #For each search mode sort all the items by seeders
-            items[mode].sort(key=lambda tup: tup[3], reverse=True)
-
-            results += items[mode]
-
-        return results
-
-    def _get_title_and_url(self, item):
-
-        title, url, id, seeders, leechers = item
-
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        if url:
-            url = str(url).replace('&amp;', '&')
-
-        return (title, url)
-
-    def findPropers(self, search_date=datetime.datetime.today()):
-
-        results = []
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select(
-            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
-            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
-            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
-            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
-            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
-        )
-
-        if not sqlResults:
-            return []
-
-        for sqlshow in sqlResults:
-            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
-            if self.show:
-                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
-
-                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
-
-                for item in self._doSearch(searchString[0]):
-                    title, url = self._get_title_and_url(item)
-                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
-
-        return results
-
-    def seedRatio(self):
-        return self.ratio
-
-
-class MoreThanTVCache(tvcache.TVCache):
-    def __init__(self, provider):
-
-        tvcache.TVCache.__init__(self, provider)
-
-        # poll delay in minutes
-        self.minTime = 20
-
-    def _getRSSData(self):
-        search_params = {'RSS': ['']}
-        return {'entries': self.provider._doSearch(search_params)}
-
-provider = MoreThanTVProvider()
+# Author: Seamus Wassman
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+# This file was adapted for MoreThanTV from the freshontv scraper by
+# Sparhawk76, this is my first foray into python, so there most likely
+# are some mistakes or things I could have done better.
+
+import re
+import traceback
+import datetime
+import urlparse
+import sickbeard
+import generic
+from sickbeard.common import Quality, cpu_presets
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard import db
+from sickbeard import classes
+from sickbeard import helpers
+from sickbeard import show_name_helpers
+from sickbeard.exceptions import ex, AuthException
+from sickbeard import clients
+from lib import requests
+from lib.requests import exceptions
+from sickbeard.bs4_parser import BS4Parser
+from lib.unidecode import unidecode
+from sickbeard.helpers import sanitizeSceneName
+
+
+class MoreThanTVProvider(generic.TorrentProvider):
+
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "MoreThanTV")
+
+        self.supportsBacklog = True
+
+        self.enabled = False
+        self._uid = None
+        self._hash = None
+        self.username = None
+        self.password = None
+        self.ratio = None
+        self.minseed = None
+        self.minleech = None
+        self.freeleech = False
+
+        self.cache = MoreThanTVCache(self)
+
+        self.urls = {'base_url': 'http://www.morethan.tv/',
+                'login': 'http://www.morethan.tv/login.php',
+                'detail': 'http://www.morethan.tv/torrents.php?id=%s',
+                'search': 'http://www.morethan.tv/torrents.php?tags_type=1&order_by=time&order_way=desc&action=basic&searchsubmit=1&searchstr=%s',
+                'download': 'http://www.morethan.tv/torrents.php?action=download&id=%s',
+                }
+
+        self.url = self.urls['base_url']
+
+        self.cookies = None
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'morethantv.png'
+
+    def getQuality(self, item, anime=False):
+
+        quality = Quality.sceneQuality(item[0], anime)
+        return quality
+
+    def _checkAuth(self):
+
+        if not self.username or not self.password:
+            raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
+
+        return True
+
+    def _doLogin(self):
+        if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
+            return True
+
+        if self._uid and self._hash:
+            requests.utils.add_dict_to_cookiejar(self.session.cookies, self.cookies)
+        else:
+            login_params = {'username': self.username,
+                            'password': self.password,
+                            'login': 'submit'
+            }
+
+            if not self.session:
+                self.session = requests.Session()
+
+            try:
+                response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
+            except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
+                logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
+                return False
+
+            if re.search('Your username or password was incorrect.', response.text):
+                logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
+                return False
+
+            return True
+
+    def _get_season_search_strings(self, ep_obj):
+
+        search_string = {'Season': []}
+        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+            if ep_obj.show.air_by_date or ep_obj.show.sports:
+                ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
+            elif ep_obj.show.anime:
+                ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
+            else:
+                ep_string = show_name + '.S%02d*' % int(ep_obj.scene_season)  #1) showName SXX
+
+            search_string['Season'].append(re.sub('\.', '+', ep_string))
+
+        return [search_string]
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        search_string = {'Episode': []}
+
+        if not ep_obj:
+            return []
+
+        if self.show.air_by_date:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|')
+                search_string['Episode'].append(ep_string)
+        elif self.show.sports:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|') + '|' + \
+                            ep_obj.airdate.strftime('%b')
+                search_string['Episode'].append(ep_string)
+        elif self.show.anime:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            "%i" % int(ep_obj.scene_absolute_number)
+                search_string['Episode'].append(ep_string)
+        else:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
+                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
+                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
+
+		search_string['Episode'].append(re.sub('\.', '+', ep_string))
+
+        return [search_string]
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        results = []
+        items = {'Season': [], 'Episode': [], 'RSS': []}
+
+        freeleech = '3' if self.freeleech else '0'
+
+        if not self._doLogin():
+            return results
+
+        for mode in search_params.keys():
+            for search_string in search_params[mode]:
+
+                if isinstance(search_string, unicode):
+                    search_string = unidecode(search_string)
+
+                searchURL = self.urls['search'] % (search_string)
+
+                logger.log(u"Search string: " + searchURL, logger.DEBUG)
+
+                # returns top 15 results by default, expandable in user profile to 100
+                data = self.getURL(searchURL)
+                if not data:
+                    continue
+
+                try:
+                    with BS4Parser(data, features=["html5lib", "permissive"]) as html:
+                        torrent_table = html.find('table', attrs={'class': 'torrent_table'})
+                        torrent_rows = torrent_table.findChildren('tr') if torrent_table else []
+
+                        #Continue only if one Release is found
+                        if len(torrent_rows) < 2:
+                            logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
+                                       logger.DEBUG)
+                            continue
+
+                        # skip colheader
+                        for result in torrent_rows[1:]:
+                            cells = result.findChildren('td')
+
+                            link = cells[1].find('a', attrs = {'title': 'Download'})
+
+                            link_str = str(link['href'])
+
+                            logger.log(u"link=" + link_str, logger.DEBUG)
+
+                            #skip if torrent has been nuked due to poor quality
+                            if cells[1].find('img', alt='Nuked') != None:
+                                continue
+                            torrent_id_long = link['href'].replace('torrents.php?action=download&id=', '')
+                            torrent_id = torrent_id_long.split('&', 1)[0]
+
+
+                            try:
+                                if link.has_key('title'):
+                                    title = cells[1].find('a', {'title': 'View torrent'}).contents[0].strip()
+                                else:
+                                    title = link.contents[0]
+                                download_url = self.urls['download'] % (torrent_id_long)
+
+                                seeders = cells[6].contents[0]
+
+                                leechers = cells[7].contents[0]
+
+                            except (AttributeError, TypeError):
+                                continue
+
+ 
+                            #Filter unseeded torrent
+                            if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
+                                continue
+
+                            if not title or not download_url:
+                                continue
+
+# Debug
+#                            logger.log(u"title = " + title + ", download_url = " + download_url + ", torrent_id = " + torrent_id + ", seeders = " + seeders + ", leechers = " + leechers, logger.DEBUG)
+
+
+                            item = title, download_url, torrent_id, seeders, leechers
+                            logger.log(u"Found result: " + title + "(" + searchURL + ")", logger.DEBUG)
+
+                            items[mode].append(item)
+
+                except Exception, e:
+                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
+
+            #For each search mode sort all the items by seeders
+            items[mode].sort(key=lambda tup: tup[3], reverse=True)
+
+            results += items[mode]
+
+        return results
+
+    def _get_title_and_url(self, item):
+
+        title, url, id, seeders, leechers = item
+
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        if url:
+            url = str(url).replace('&amp;', '&')
+
+        return (title, url)
+
+    def findPropers(self, search_date=datetime.datetime.today()):
+
+        results = []
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select(
+            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
+            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
+            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
+            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
+            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
+        )
+
+        if not sqlResults:
+            return []
+
+        for sqlshow in sqlResults:
+            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
+            if self.show:
+                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
+
+                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
+
+                for item in self._doSearch(searchString[0]):
+                    title, url = self._get_title_and_url(item)
+                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
+
+        return results
+
+    def seedRatio(self):
+        return self.ratio
+
+
+class MoreThanTVCache(tvcache.TVCache):
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # poll delay in minutes
+        self.minTime = 20
+
+    def _getRSSData(self):
+        search_params = {'RSS': ['']}
+        return {'entries': self.provider._doSearch(search_params)}
+
+provider = MoreThanTVProvider()
diff --git a/sickbeard/providers/nyaatorrents.py b/sickbeard/providers/nyaatorrents.py
index 5699abe607c3bd3dbbff053e13a93b3b4e0081c7..f09a698267c247080fd1d7bf62677ac54ea77137 100644
--- a/sickbeard/providers/nyaatorrents.py
+++ b/sickbeard/providers/nyaatorrents.py
@@ -1,132 +1,132 @@
-# Author: Mr_Orange
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import urllib
-import re
-
-import sickbeard
-import generic
-
-from sickbeard import show_name_helpers
-from sickbeard import logger
-from sickbeard.common import Quality
-from sickbeard import tvcache
-from sickbeard import show_name_helpers
-
-
-class NyaaProvider(generic.TorrentProvider):
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "NyaaTorrents")
-
-        self.supportsBacklog = True
-        self.supportsAbsoluteNumbering = True
-        self.anime_only = True
-        self.enabled = False
-        self.ratio = None
-
-        self.cache = NyaaCache(self)
-
-        self.urls = {'base_url': 'http://www.nyaa.se/'}
-
-        self.url = self.urls['base_url']
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'nyaatorrents.png'
-
-    def getQuality(self, item, anime=False):
-        title = item.get('title')
-        quality = Quality.sceneQuality(title, anime)
-        return quality
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
-        return generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
-
-    def _get_season_search_strings(self, ep_obj):
-        return show_name_helpers.makeSceneShowSearchStrings(self.show, anime=True)
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-        return self._get_season_search_strings(ep_obj)
-
-    def _doSearch(self, search_string, search_mode='eponly', epcount=0, age=0):
-        if self.show and not self.show.is_anime:
-            logger.log(u"" + str(self.show.name) + " is not an anime skiping " + str(self.name))
-            return []
-
-        params = {
-            "term": search_string.encode('utf-8'),
-            "cats": '1_37',  # Limit to English-translated Anime (for now)
-            "sort": '2',     # Sort Descending By Seeders
-        }
-
-        searchURL = self.url + '?page=rss&' + urllib.urlencode(params)
-
-        logger.log(u"Search string: " + searchURL, logger.DEBUG)
-
-        results = []
-        for curItem in self.cache.getRSSFeed(searchURL, items=['entries'])['entries'] or []:
-            (title, url) = self._get_title_and_url(curItem)
-
-            if title and url:
-                results.append(curItem)
-            else:
-                logger.log(
-                    u"The data returned from the " + self.name + " is incomplete, this result is unusable",
-                    logger.DEBUG)
-
-        return results
-
-    def _get_title_and_url(self, item):
-        return generic.TorrentProvider._get_title_and_url(self, item)
-
-    def _extract_name_from_filename(self, filename):
-        name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
-        logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
-        match = re.match(name_regex, filename, re.I)
-        if match:
-            return match.group(1)
-        return None
-
-    def seedRatio(self):
-        return self.ratio
-
-
-class NyaaCache(tvcache.TVCache):
-    def __init__(self, provider):
-        tvcache.TVCache.__init__(self, provider)
-
-        # only poll NyaaTorrents every 15 minutes max
-        self.minTime = 15
-
-    def _getRSSData(self):
-        params = {
-            "page": 'rss',   # Use RSS page
-            "order": '1',    # Sort Descending By Date
-            "cats": '1_37',  # Limit to English-translated Anime (for now)
-        }
-
-        url = self.provider.url + '?' + urllib.urlencode(params)
-
-        logger.log(u"NyaaTorrents cache update URL: " + url, logger.DEBUG)
-
-        return self.getRSSFeed(url)
-
-provider = NyaaProvider()
+# Author: Mr_Orange
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import urllib
+import re
+
+import sickbeard
+import generic
+
+from sickbeard import show_name_helpers
+from sickbeard import logger
+from sickbeard.common import Quality
+from sickbeard import tvcache
+from sickbeard import show_name_helpers
+
+
+class NyaaProvider(generic.TorrentProvider):
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "NyaaTorrents")
+
+        self.supportsBacklog = True
+        self.supportsAbsoluteNumbering = True
+        self.anime_only = True
+        self.enabled = False
+        self.ratio = None
+
+        self.cache = NyaaCache(self)
+
+        self.urls = {'base_url': 'http://www.nyaa.se/'}
+
+        self.url = self.urls['base_url']
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'nyaatorrents.png'
+
+    def getQuality(self, item, anime=False):
+        title = item.get('title')
+        quality = Quality.sceneQuality(title, anime)
+        return quality
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+        return generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
+
+    def _get_season_search_strings(self, ep_obj):
+        return show_name_helpers.makeSceneShowSearchStrings(self.show, anime=True)
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+        return self._get_season_search_strings(ep_obj)
+
+    def _doSearch(self, search_string, search_mode='eponly', epcount=0, age=0):
+        if self.show and not self.show.is_anime:
+            logger.log(u"" + str(self.show.name) + " is not an anime skiping " + str(self.name))
+            return []
+
+        params = {
+            "term": search_string.encode('utf-8'),
+            "cats": '1_37',  # Limit to English-translated Anime (for now)
+            "sort": '2',     # Sort Descending By Seeders
+        }
+
+        searchURL = self.url + '?page=rss&' + urllib.urlencode(params)
+
+        logger.log(u"Search string: " + searchURL, logger.DEBUG)
+
+        results = []
+        for curItem in self.cache.getRSSFeed(searchURL, items=['entries'])['entries'] or []:
+            (title, url) = self._get_title_and_url(curItem)
+
+            if title and url:
+                results.append(curItem)
+            else:
+                logger.log(
+                    u"The data returned from the " + self.name + " is incomplete, this result is unusable",
+                    logger.DEBUG)
+
+        return results
+
+    def _get_title_and_url(self, item):
+        return generic.TorrentProvider._get_title_and_url(self, item)
+
+    def _extract_name_from_filename(self, filename):
+        name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
+        logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
+        match = re.match(name_regex, filename, re.I)
+        if match:
+            return match.group(1)
+        return None
+
+    def seedRatio(self):
+        return self.ratio
+
+
+class NyaaCache(tvcache.TVCache):
+    def __init__(self, provider):
+        tvcache.TVCache.__init__(self, provider)
+
+        # only poll NyaaTorrents every 15 minutes max
+        self.minTime = 15
+
+    def _getRSSData(self):
+        params = {
+            "page": 'rss',   # Use RSS page
+            "order": '1',    # Sort Descending By Date
+            "cats": '1_37',  # Limit to English-translated Anime (for now)
+        }
+
+        url = self.provider.url + '?' + urllib.urlencode(params)
+
+        logger.log(u"NyaaTorrents cache update URL: " + url, logger.DEBUG)
+
+        return self.getRSSFeed(url)
+
+provider = NyaaProvider()
diff --git a/sickbeard/providers/speedcd.py b/sickbeard/providers/speedcd.py
index cbe5726dd50dbd310847d84df504808616df1e99..0ee22e0813b12b6ed90e2ac854c3009ecbc4fe1c 100644
--- a/sickbeard/providers/speedcd.py
+++ b/sickbeard/providers/speedcd.py
@@ -1,261 +1,261 @@
-# Author: Mr_Orange
-# URL: https://github.com/mr-orange/Sick-Beard
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import re
-import datetime
-import urlparse
-import time
-import sickbeard
-import generic
-
-from sickbeard.common import Quality, cpu_presets
-from sickbeard import logger
-from sickbeard import tvcache
-from sickbeard import db
-from sickbeard import classes
-from sickbeard import helpers
-from sickbeard import show_name_helpers
-from sickbeard.common import Overview
-from sickbeard.exceptions import ex
-from sickbeard import clients
-from lib import requests
-from lib.requests import exceptions
-from sickbeard.helpers import sanitizeSceneName
-
-
-class SpeedCDProvider(generic.TorrentProvider):
-
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "Speedcd")
-
-        self.supportsBacklog = True
-
-        self.enabled = False
-        self.username = None
-        self.password = None
-        self.ratio = None
-        self.freeleech = False
-        self.minseed = None
-        self.minleech = None
-
-        self.cache = SpeedCDCache(self)
-
-        self.urls = {'base_url': 'http://speed.cd/',
-                'login': 'http://speed.cd/take_login.php',
-                'detail': 'http://speed.cd/t/%s',
-                'search': 'http://speed.cd/V3/API/API.php',
-                'download': 'http://speed.cd/download.php?torrent=%s',
-                }
-
-        self.url = self.urls['base_url']
-
-        self.categories = {'Season': {'c14': 1}, 'Episode': {'c2': 1, 'c49': 1}, 'RSS': {'c14': 1, 'c2': 1, 'c49': 1}}
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'speedcd.png'
-
-    def getQuality(self, item, anime=False):
-
-        quality = Quality.sceneQuality(item[0], anime)
-        return quality
-
-    def _doLogin(self):
-
-        login_params = {'username': self.username,
-                        'password': self.password
-        }
-
-        try:
-            response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
-        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
-            logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
-            return False
-
-        if re.search('Incorrect username or Password. Please try again.', response.text) \
-                or response.status_code == 401:
-            logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
-            return False
-
-        return True
-
-    def _get_season_search_strings(self, ep_obj):
-
-        #If Every episode in Season is a wanted Episode then search for Season first
-        search_string = {'Season': []}
-        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-            if ep_obj.show.air_by_date or ep_obj.show.sports:
-                ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
-            elif ep_obj.show.anime:
-                ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
-            else:
-                ep_string = show_name + ' S%02d' % int(ep_obj.scene_season)  #1) showName SXX
-
-            search_string['Season'].append(ep_string)
-
-        return [search_string]
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-
-        search_string = {'Episode': []}
-
-        if not ep_obj:
-            return []
-
-        if self.show.air_by_date:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|')
-                search_string['Episode'].append(ep_string)
-        elif self.show.sports:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            str(ep_obj.airdate).replace('-', '|') + '|' + \
-                            ep_obj.airdate.strftime('%b')
-                search_string['Episode'].append(ep_string)
-        elif self.show.anime:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = sanitizeSceneName(show_name) + ' ' + \
-                            "%i" % int(ep_obj.scene_absolute_number)
-                search_string['Episode'].append(ep_string)
-        else:
-            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
-                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
-                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
-                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
-
-                search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
-
-        return [search_string]
-
-    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
-
-        results = []
-        items = {'Season': [], 'Episode': [], 'RSS': []}
-
-        if not self._doLogin():
-            return results
-
-        for mode in search_params.keys():
-            for search_string in search_params[mode]:
-
-                logger.log(u"Search string: " + search_string, logger.DEBUG)
-
-                search_string = '+'.join(search_string.split())
-
-                post_data = dict({'/browse.php?': None, 'cata': 'yes', 'jxt': 4, 'jxw': 'b', 'search': search_string},
-                                 **self.categories[mode])
-
-                parsedJSON = self.getURL(self.urls['search'], post_data=post_data, json=True)
-                if not parsedJSON:
-                    continue
-
-                try:
-                    torrents = parsedJSON.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
-                except:
-                    continue
-
-                for torrent in torrents:
-
-                    if self.freeleech and not torrent['free']:
-                        continue
-
-                    title = re.sub('<[^>]*>', '', torrent['name'])
-                    url = self.urls['download'] % (torrent['id'])
-                    seeders = int(torrent['seed'])
-                    leechers = int(torrent['leech'])
-
-                    if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
-                        continue
-
-                    if not title or not url:
-                        continue
-
-                    item = title, url, seeders, leechers
-                    items[mode].append(item)
-
-            #For each search mode sort all the items by seeders
-            items[mode].sort(key=lambda tup: tup[2], reverse=True)
-
-            results += items[mode]
-
-        return results
-
-    def _get_title_and_url(self, item):
-
-        title, url, seeders, leechers = item
-
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        if url:
-            url = str(url).replace('&amp;', '&')
-
-        return (title, url)
-
-    def findPropers(self, search_date=datetime.datetime.today()):
-
-        results = []
-
-        myDB = db.DBConnection()
-        sqlResults = myDB.select(
-            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
-            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
-            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
-            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
-            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
-        )
-
-        if not sqlResults:
-            return []
-
-        for sqlshow in sqlResults:
-            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
-            if self.show:
-                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
-
-                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
-
-                for item in self._doSearch(searchString[0]):
-                    title, url = self._get_title_and_url(item)
-                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
-
-        return results
-
-    def seedRatio(self):
-        return self.ratio
-
-
-class SpeedCDCache(tvcache.TVCache):
-    def __init__(self, provider):
-
-        tvcache.TVCache.__init__(self, provider)
-
-        # only poll Speedcd every 20 minutes max
-        self.minTime = 20
-
-    def _getRSSData(self):
-        search_params = {'RSS': ['']}
-        return {'entries': self.provider._doSearch(search_params)}
-
-provider = SpeedCDProvider()
-
+# Author: Mr_Orange
+# URL: https://github.com/mr-orange/Sick-Beard
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import datetime
+import urlparse
+import time
+import sickbeard
+import generic
+
+from sickbeard.common import Quality, cpu_presets
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard import db
+from sickbeard import classes
+from sickbeard import helpers
+from sickbeard import show_name_helpers
+from sickbeard.common import Overview
+from sickbeard.exceptions import ex
+from sickbeard import clients
+from lib import requests
+from lib.requests import exceptions
+from sickbeard.helpers import sanitizeSceneName
+
+
+class SpeedCDProvider(generic.TorrentProvider):
+
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "Speedcd")
+
+        self.supportsBacklog = True
+
+        self.enabled = False
+        self.username = None
+        self.password = None
+        self.ratio = None
+        self.freeleech = False
+        self.minseed = None
+        self.minleech = None
+
+        self.cache = SpeedCDCache(self)
+
+        self.urls = {'base_url': 'http://speed.cd/',
+                'login': 'http://speed.cd/take_login.php',
+                'detail': 'http://speed.cd/t/%s',
+                'search': 'http://speed.cd/V3/API/API.php',
+                'download': 'http://speed.cd/download.php?torrent=%s',
+                }
+
+        self.url = self.urls['base_url']
+
+        self.categories = {'Season': {'c14': 1}, 'Episode': {'c2': 1, 'c49': 1}, 'RSS': {'c14': 1, 'c2': 1, 'c49': 1}}
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'speedcd.png'
+
+    def getQuality(self, item, anime=False):
+
+        quality = Quality.sceneQuality(item[0], anime)
+        return quality
+
+    def _doLogin(self):
+
+        login_params = {'username': self.username,
+                        'password': self.password
+        }
+
+        try:
+            response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
+        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
+            logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
+            return False
+
+        if re.search('Incorrect username or Password. Please try again.', response.text) \
+                or response.status_code == 401:
+            logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
+            return False
+
+        return True
+
+    def _get_season_search_strings(self, ep_obj):
+
+        #If Every episode in Season is a wanted Episode then search for Season first
+        search_string = {'Season': []}
+        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+            if ep_obj.show.air_by_date or ep_obj.show.sports:
+                ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
+            elif ep_obj.show.anime:
+                ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
+            else:
+                ep_string = show_name + ' S%02d' % int(ep_obj.scene_season)  #1) showName SXX
+
+            search_string['Season'].append(ep_string)
+
+        return [search_string]
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+
+        search_string = {'Episode': []}
+
+        if not ep_obj:
+            return []
+
+        if self.show.air_by_date:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|')
+                search_string['Episode'].append(ep_string)
+        elif self.show.sports:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            str(ep_obj.airdate).replace('-', '|') + '|' + \
+                            ep_obj.airdate.strftime('%b')
+                search_string['Episode'].append(ep_string)
+        elif self.show.anime:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = sanitizeSceneName(show_name) + ' ' + \
+                            "%i" % int(ep_obj.scene_absolute_number)
+                search_string['Episode'].append(ep_string)
+        else:
+            for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+                ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
+                            sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
+                                                                  'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
+
+                search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
+
+        return [search_string]
+
+    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
+
+        results = []
+        items = {'Season': [], 'Episode': [], 'RSS': []}
+
+        if not self._doLogin():
+            return results
+
+        for mode in search_params.keys():
+            for search_string in search_params[mode]:
+
+                logger.log(u"Search string: " + search_string, logger.DEBUG)
+
+                search_string = '+'.join(search_string.split())
+
+                post_data = dict({'/browse.php?': None, 'cata': 'yes', 'jxt': 4, 'jxw': 'b', 'search': search_string},
+                                 **self.categories[mode])
+
+                parsedJSON = self.getURL(self.urls['search'], post_data=post_data, json=True)
+                if not parsedJSON:
+                    continue
+
+                try:
+                    torrents = parsedJSON.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
+                except:
+                    continue
+
+                for torrent in torrents:
+
+                    if self.freeleech and not torrent['free']:
+                        continue
+
+                    title = re.sub('<[^>]*>', '', torrent['name'])
+                    url = self.urls['download'] % (torrent['id'])
+                    seeders = int(torrent['seed'])
+                    leechers = int(torrent['leech'])
+
+                    if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
+                        continue
+
+                    if not title or not url:
+                        continue
+
+                    item = title, url, seeders, leechers
+                    items[mode].append(item)
+
+            #For each search mode sort all the items by seeders
+            items[mode].sort(key=lambda tup: tup[2], reverse=True)
+
+            results += items[mode]
+
+        return results
+
+    def _get_title_and_url(self, item):
+
+        title, url, seeders, leechers = item
+
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        if url:
+            url = str(url).replace('&amp;', '&')
+
+        return (title, url)
+
+    def findPropers(self, search_date=datetime.datetime.today()):
+
+        results = []
+
+        myDB = db.DBConnection()
+        sqlResults = myDB.select(
+            'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
+            ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
+            ' WHERE e.airdate >= ' + str(search_date.toordinal()) +
+            ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
+            ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
+        )
+
+        if not sqlResults:
+            return []
+
+        for sqlshow in sqlResults:
+            self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
+            if self.show:
+                curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
+
+                searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
+
+                for item in self._doSearch(searchString[0]):
+                    title, url = self._get_title_and_url(item)
+                    results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
+
+        return results
+
+    def seedRatio(self):
+        return self.ratio
+
+
+class SpeedCDCache(tvcache.TVCache):
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # only poll Speedcd every 20 minutes max
+        self.minTime = 20
+
+    def _getRSSData(self):
+        search_params = {'RSS': ['']}
+        return {'entries': self.provider._doSearch(search_params)}
+
+provider = SpeedCDProvider()
+
diff --git a/sickbeard/providers/t411.py b/sickbeard/providers/t411.py
index cce3bf944b207e79e51fd3159f884a7dbb138fa0..d0c2ee6c819cca2cefaf4c41257144c0ff575f9e 100644
--- a/sickbeard/providers/t411.py
+++ b/sickbeard/providers/t411.py
@@ -216,8 +216,7 @@ class T411Provider(generic.TorrentProvider):
 
                                     try:
                                         link = result.find('a', title=True)
-                                        torrentName = link['title']
-                                        torrent_name = str(torrentName)
+                                        torrent_name = link['title']
                                         torrentId = result.find_all('td')[2].find_all('a')[0]['href'][1:].replace(
                                             'torrents/nfo/?id=', '')
                                         torrent_download_url = (self.urls['download'] % torrentId).encode('utf8')
diff --git a/sickbeard/providers/tntvillage.py b/sickbeard/providers/tntvillage.py
index 387743c7ebabfc7c3365cf91396665412a86d4e2..21a881dd88b15eb748aecdd5b2813a3fa11b3ded 100644
--- a/sickbeard/providers/tntvillage.py
+++ b/sickbeard/providers/tntvillage.py
@@ -85,10 +85,13 @@ class TNTVillageProvider(generic.TorrentProvider):
         self.minleech = None
 
         self.hdtext = [
+                       ' - Versione 720p',
                        ' Versione 720p',
                        ' V 720p',
+                       ' V 720',
                        ' V HEVC',
                        ' V  HEVC',
+                       ' V 1080',
                        ' Versione 1080p',
                        ' 720p HEVC',
                        ' Ver 720',
@@ -396,7 +399,10 @@ class TNTVillageProvider(generic.TorrentProvider):
                                 logger.log(u"name: " + title + "", logger.DEBUG)
                                 filename_qt = self._reverseQuality(self._episodeQuality(result))
                                 for text in self.hdtext:
+                                    title1 = title
                                     title = title.replace(text,filename_qt)
+                                    if title != title1:
+                                        break
 
                                 if Quality.nameQuality(title) == Quality.UNKNOWN:
                                     title += filename_qt 
diff --git a/sickbeard/providers/tokyotoshokan.py b/sickbeard/providers/tokyotoshokan.py
index 3f3089457167fc0a43a248bd29f607d4fc011cdd..651c4586008594532b02404aae7f558d2203b1be 100644
--- a/sickbeard/providers/tokyotoshokan.py
+++ b/sickbeard/providers/tokyotoshokan.py
@@ -1,171 +1,171 @@
-# Author: Mr_Orange
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import urllib
-import re
-import traceback
-
-import sickbeard
-import generic
-
-from sickbeard import show_name_helpers
-from sickbeard import logger
-from sickbeard.common import Quality
-from sickbeard import tvcache
-from sickbeard import show_name_helpers, helpers
-from sickbeard.bs4_parser import BS4Parser
-
-
-class TokyoToshokanProvider(generic.TorrentProvider):
-    def __init__(self):
-
-        generic.TorrentProvider.__init__(self, "TokyoToshokan")
-
-        self.supportsBacklog = True
-        self.supportsAbsoluteNumbering = True
-        self.anime_only = True
-        self.enabled = False
-        self.ratio = None
-
-        self.cache = TokyoToshokanCache(self)
-
-        self.urls = {'base_url': 'http://tokyotosho.info/'}
-        self.url = self.urls['base_url']
-
-    def isEnabled(self):
-        return self.enabled
-
-    def imageName(self):
-        return 'tokyotoshokan.png'
-
-    def _get_title_and_url(self, item):
-
-        title, url = item
-
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        if url:
-            url = url.replace('&amp;', '&')
-
-        return (title, url)
-
-    def seedRatio(self):
-        return self.ratio
-
-    def getQuality(self, item, anime=False):
-        quality = Quality.sceneQuality(item[0], anime)
-        return quality
-
-    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
-        return generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
-
-    def _get_season_search_strings(self, ep_obj):
-        return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
-
-    def _get_episode_search_strings(self, ep_obj, add_string=''):
-        return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
-
-    def _doSearch(self, search_string, search_mode='eponly', epcount=0, age=0):
-        if self.show and not self.show.is_anime:
-            logger.log(u"" + str(self.show.name) + " is not an anime skiping " + str(self.name))
-            return []
-
-        params = {
-            "terms": search_string.encode('utf-8'),
-            "type": 1, # get anime types
-        }
-
-        searchURL = self.url + 'search.php?' + urllib.urlencode(params)
-
-        data = self.getURL(searchURL)
-
-        logger.log(u"Search string: " + searchURL, logger.DEBUG)
-
-        if not data:
-            return []
-
-        results = []
-        try:
-            with BS4Parser(data, features=["html5lib", "permissive"]) as soup:
-                torrent_table = soup.find('table', attrs={'class': 'listing'})
-                torrent_rows = torrent_table.find_all('tr') if torrent_table else []
-                if torrent_rows: 
-                    if torrent_rows[0].find('td', attrs={'class': 'centertext'}):
-                        a = 1
-                    else:
-                        a = 0
-    
-                    for top, bottom in zip(torrent_rows[a::2], torrent_rows[a::2]):
-                        title = top.find('td', attrs={'class': 'desc-top'}).text
-                        url = top.find('td', attrs={'class': 'desc-top'}).find('a')['href']
-    
-                        if not title or not url:
-                            continue
-    
-                        item = title.lstrip(), url
-                        results.append(item)
-
-        except Exception, e:
-            logger.log(u"Failed to parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
-
-
-        return results
-
-
-class TokyoToshokanCache(tvcache.TVCache):
-    def __init__(self, provider):
-        tvcache.TVCache.__init__(self, provider)
-
-        # only poll NyaaTorrents every 15 minutes max
-        self.minTime = 15
-
-    def _get_title_and_url(self, item):
-        """
-        Retrieves the title and URL data from the item XML node
-
-        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
-
-        Returns: A tuple containing two strings representing title and URL respectively
-        """
-
-        title = item.title if item.title else None
-        if title:
-            title = u'' + title
-            title = title.replace(' ', '.')
-
-        url = item.link if item.link else None
-        if url:
-            url = url.replace('&amp;', '&')
-
-        return (title, url)
-
-    def _getRSSData(self):
-        params = {
-            "filter": '1',
-        }
-
-        url = self.provider.url + 'rss.php?' + urllib.urlencode(params)
-
-        logger.log(u"TokyoToshokan cache update URL: " + url, logger.DEBUG)
-
-        return self.getRSSFeed(url)
-
-
-provider = TokyoToshokanProvider()
+# Author: Mr_Orange
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import urllib
+import re
+import traceback
+
+import sickbeard
+import generic
+
+from sickbeard import show_name_helpers
+from sickbeard import logger
+from sickbeard.common import Quality
+from sickbeard import tvcache
+from sickbeard import show_name_helpers, helpers
+from sickbeard.bs4_parser import BS4Parser
+
+
+class TokyoToshokanProvider(generic.TorrentProvider):
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "TokyoToshokan")
+
+        self.supportsBacklog = True
+        self.supportsAbsoluteNumbering = True
+        self.anime_only = True
+        self.enabled = False
+        self.ratio = None
+
+        self.cache = TokyoToshokanCache(self)
+
+        self.urls = {'base_url': 'http://tokyotosho.info/'}
+        self.url = self.urls['base_url']
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'tokyotoshokan.png'
+
+    def _get_title_and_url(self, item):
+
+        title, url = item
+
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        if url:
+            url = url.replace('&amp;', '&')
+
+        return (title, url)
+
+    def seedRatio(self):
+        return self.ratio
+
+    def getQuality(self, item, anime=False):
+        quality = Quality.sceneQuality(item[0], anime)
+        return quality
+
+    def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
+        return generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
+
+    def _get_season_search_strings(self, ep_obj):
+        return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
+
+    def _get_episode_search_strings(self, ep_obj, add_string=''):
+        return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
+
+    def _doSearch(self, search_string, search_mode='eponly', epcount=0, age=0):
+        if self.show and not self.show.is_anime:
+            logger.log(u"" + str(self.show.name) + " is not an anime skiping " + str(self.name))
+            return []
+
+        params = {
+            "terms": search_string.encode('utf-8'),
+            "type": 1, # get anime types
+        }
+
+        searchURL = self.url + 'search.php?' + urllib.urlencode(params)
+
+        data = self.getURL(searchURL)
+
+        logger.log(u"Search string: " + searchURL, logger.DEBUG)
+
+        if not data:
+            return []
+
+        results = []
+        try:
+            with BS4Parser(data, features=["html5lib", "permissive"]) as soup:
+                torrent_table = soup.find('table', attrs={'class': 'listing'})
+                torrent_rows = torrent_table.find_all('tr') if torrent_table else []
+                if torrent_rows: 
+                    if torrent_rows[0].find('td', attrs={'class': 'centertext'}):
+                        a = 1
+                    else:
+                        a = 0
+    
+                    for top, bottom in zip(torrent_rows[a::2], torrent_rows[a::2]):
+                        title = top.find('td', attrs={'class': 'desc-top'}).text
+                        url = top.find('td', attrs={'class': 'desc-top'}).find('a')['href']
+    
+                        if not title or not url:
+                            continue
+    
+                        item = title.lstrip(), url
+                        results.append(item)
+
+        except Exception, e:
+            logger.log(u"Failed to parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
+
+
+        return results
+
+
+class TokyoToshokanCache(tvcache.TVCache):
+    def __init__(self, provider):
+        tvcache.TVCache.__init__(self, provider)
+
+        # only poll NyaaTorrents every 15 minutes max
+        self.minTime = 15
+
+    def _get_title_and_url(self, item):
+        """
+        Retrieves the title and URL data from the item XML node
+
+        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
+
+        Returns: A tuple containing two strings representing title and URL respectively
+        """
+
+        title = item.title if item.title else None
+        if title:
+            title = u'' + title
+            title = title.replace(' ', '.')
+
+        url = item.link if item.link else None
+        if url:
+            url = url.replace('&amp;', '&')
+
+        return (title, url)
+
+    def _getRSSData(self):
+        params = {
+            "filter": '1',
+        }
+
+        url = self.provider.url + 'rss.php?' + urllib.urlencode(params)
+
+        logger.log(u"TokyoToshokan cache update URL: " + url, logger.DEBUG)
+
+        return self.getRSSFeed(url)
+
+
+provider = TokyoToshokanProvider()
diff --git a/sickbeard/search.py b/sickbeard/search.py
index ba379af7359bfc9e7abe87981c07b0573f0bbda3..58df654e115821e11ea129d77d53835f88fa58c2 100644
--- a/sickbeard/search.py
+++ b/sickbeard/search.py
@@ -1,714 +1,717 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
-import os
-import re
-import threading
-import datetime
-import traceback
-
-import sickbeard
-
-from common import SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, Quality, SEASON_RESULT, MULTI_EP_RESULT
-
-from sickbeard import logger, db, show_name_helpers, exceptions, helpers
-from sickbeard import sab
-from sickbeard import nzbget
-from sickbeard import clients
-from sickbeard import history
-from sickbeard import notifiers
-from sickbeard import nzbSplitter
-from sickbeard import ui
-from sickbeard import encodingKludge as ek
-from sickbeard import failed_history
-from sickbeard.exceptions import ex
-from sickbeard.providers.generic import GenericProvider
-from sickbeard.blackandwhitelist import BlackAndWhiteList
-from sickbeard import common
-
-def _downloadResult(result):
-    """
-    Downloads a result to the appropriate black hole folder.
-
-    Returns a bool representing success.
-
-    result: SearchResult instance to download.
-    """
-
-    resProvider = result.provider
-    if resProvider == None:
-        logger.log(u"Invalid provider name - this is a coding error, report it please", logger.ERROR)
-        return False
-
-    # nzbs with an URL can just be downloaded from the provider
-    if result.resultType == "nzb":
-        newResult = resProvider.downloadResult(result)
-    # if it's an nzb data result
-    elif result.resultType == "nzbdata":
-
-        # get the final file path to the nzb
-        fileName = ek.ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb")
-
-        logger.log(u"Saving NZB to " + fileName)
-
-        newResult = True
-
-        # save the data to disk
-        try:
-            with ek.ek(open, fileName, 'w') as fileOut:
-                fileOut.write(result.extraInfo[0])
-
-            helpers.chmodAsParent(fileName)
-
-        except EnvironmentError, e:
-            logger.log(u"Error trying to save NZB to black hole: " + ex(e), logger.ERROR)
-            newResult = False
-    elif resProvider.providerType == "torrent":
-        newResult = resProvider.downloadResult(result)
-    else:
-        logger.log(u"Invalid provider type - this is a coding error, report it please", logger.ERROR)
-        newResult = False
-
-    return newResult
-
-def snatchEpisode(result, endStatus=SNATCHED):
-    """
-    Contains the internal logic necessary to actually "snatch" a result that
-    has been found.
-
-    Returns a bool representing success.
-
-    result: SearchResult instance to be snatched.
-    endStatus: the episode status that should be used for the episode object once it's snatched.
-    """
-
-    if result is None:
-        return False
-
-    result.priority = 0  # -1 = low, 0 = normal, 1 = high
-    if sickbeard.ALLOW_HIGH_PRIORITY:
-        # if it aired recently make it high priority
-        for curEp in result.episodes:
-            if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
-                result.priority = 1
-    if re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', result.name, re.I) != None:
-        endStatus = SNATCHED_PROPER
-
-    # NZBs can be sent straight to SAB or saved to disk
-    if result.resultType in ("nzb", "nzbdata"):
-        if sickbeard.NZB_METHOD == "blackhole":
-            dlResult = _downloadResult(result)
-        elif sickbeard.NZB_METHOD == "sabnzbd":
-            dlResult = sab.sendNZB(result)
-        elif sickbeard.NZB_METHOD == "nzbget":
-            is_proper = True if endStatus == SNATCHED_PROPER else False
-            dlResult = nzbget.sendNZB(result, is_proper)
-        else:
-            logger.log(u"Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
-            dlResult = False
-
-    # TORRENTs can be sent to clients or saved to disk
-    elif result.resultType == "torrent":
-        # torrents are saved to disk when blackhole mode
-        if sickbeard.TORRENT_METHOD == "blackhole":
-            dlResult = _downloadResult(result)
-        else:
-            if result.content or result.url.startswith('magnet'):
-                client = clients.getClientIstance(sickbeard.TORRENT_METHOD)()
-                dlResult = client.sendTORRENT(result)
-            else:
-                logger.log(u"Torrent file content is empty", logger.ERROR)
-                dlResult = False
-    else:
-        logger.log(u"Unknown result type, unable to download it", logger.ERROR)
-        dlResult = False
-
-    if not dlResult:
-        return False
-
-    if sickbeard.USE_FAILED_DOWNLOADS:
-        failed_history.logSnatch(result)
-
-    ui.notifications.message('Episode snatched', result.name)
-
-    history.logSnatch(result)
-
-    # don't notify when we re-download an episode
-    sql_l = []
-    trakt_data = []
-    for curEpObj in result.episodes:
-        with curEpObj.lock:
-            if isFirstBestMatch(result):
-                curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality)
-            else:
-                curEpObj.status = Quality.compositeStatus(endStatus, result.quality)
-
-            sql_l.append(curEpObj.get_sql())
-
-        if curEpObj.status not in Quality.DOWNLOADED:
-            notifiers.notify_snatch(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN') + " from " + result.provider.name)
-
-            trakt_data.append((curEpObj.season, curEpObj.episode))
-
-    data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
-
-    if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
-        logger.log(u"Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str(result.show.name) + " to Traktv Watchlist", logger.DEBUG)
-        if data:
-            notifiers.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add")
-
-    if len(sql_l) > 0:
-        myDB = db.DBConnection()
-        myDB.mass_action(sql_l)
-
-    if sickbeard.UPDATE_SHOWS_ON_SNATCH and not sickbeard.showQueueScheduler.action.isBeingUpdated(result.show) and result.show.status == "Continuing":
-        sickbeard.showQueueScheduler.action.updateShow(result.show, True)
-
-    return True
-
-
-def pickBestResult(results, show, quality_list=None):
-    results = results if isinstance(results, list) else [results]
-
-    logger.log(u"Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
-
-    bwl = None
-    bestResult = None
-
-    # find the best result for the current episode
-    for cur_result in results:
-        if show and cur_result.show is not show:
-            continue
-
-        # filter out possible bad torrents from providers such as ezrss
-        if isinstance(cur_result, sickbeard.classes.SearchResult):
-            if cur_result.resultType == "torrent" and sickbeard.TORRENT_METHOD != "blackhole":
-                if not cur_result.url.startswith('magnet'):
-                    cur_result.content = cur_result.provider.getURL(cur_result.url)
-                    if not cur_result.content:
-                        continue
-        else:
-            if not cur_result.url.startswith('magnet'):
-                cur_result.content = cur_result.provider.getURL(cur_result.url)
-                if not cur_result.content:
-                    continue
-
-        # build the black And white list
-        if cur_result.show.is_anime:
-            if not bwl:
-                bwl = BlackAndWhiteList(cur_result.show.indexerid)
-            if not bwl.is_valid(cur_result):
-                logger.log(cur_result.name+" does not match the blacklist or the whitelist, rejecting it. Result: " + bwl.get_last_result_msg(), logger.INFO)
-                continue
-
-        logger.log("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality])
-
-        if quality_list and cur_result.quality not in quality_list:
-            logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
-            continue
-
-        if show.rls_ignore_words and show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_ignore_words):
-            logger.log(u"Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words,
-                       logger.INFO)
-            continue
-
-        if show.rls_require_words and not show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_require_words):
-            logger.log(u"Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words,
-                       logger.INFO)
-            continue
-
-        if not show_name_helpers.filterBadReleases(cur_result.name, parse=False):
-            logger.log(u"Ignoring " + cur_result.name + " because its not a valid scene release that we want, ignoring it",
-                       logger.INFO)
-            continue
-
-        if hasattr(cur_result, 'size'):
-            if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
-                                                                           cur_result.provider.name):
-                logger.log(cur_result.name + u" has previously failed, rejecting it")
-                continue
-
-        if not bestResult or bestResult.quality < cur_result.quality and cur_result.quality != Quality.UNKNOWN:
-            bestResult = cur_result
-
-        elif bestResult.quality == cur_result.quality:
-            if "proper" in cur_result.name.lower() or "repack" in cur_result.name.lower():
-                bestResult = cur_result
-            elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower():
-                bestResult = cur_result
-            elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower():
-                logger.log(u"Preferring " + cur_result.name + " (x264 over xvid)")
-                bestResult = cur_result
-
-    if bestResult:
-        logger.log(u"Picked " + bestResult.name + " as the best", logger.DEBUG)
-    else:
-        logger.log(u"No result picked.", logger.DEBUG)
-
-    return bestResult
-
-
-def isFinalResult(result):
-    """
-    Checks if the given result is good enough quality that we can stop searching for other ones.
-
-    If the result is the highest quality in both the any/best quality lists then this function
-    returns True, if not then it's False
-
-    """
-
-    logger.log(u"Checking if we should keep searching after we've found " + result.name, logger.DEBUG)
-
-    show_obj = result.episodes[0].show
-
-    bwl = None
-    if show_obj.is_anime:
-        bwl = BlackAndWhiteList(show_obj.indexerid)
-
-    any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
-
-    # if there is a redownload that's higher than this then we definitely need to keep looking
-    if best_qualities and result.quality < max(best_qualities):
-        return False
-
-    # if it does not match the shows black and white list its no good
-    elif bwl and not bwl.is_valid(result):
-        return False
-
-    # if there's no redownload that's higher (above) and this is the highest initial download then we're good
-    elif any_qualities and result.quality in any_qualities:
-        return True
-
-    elif best_qualities and result.quality == max(best_qualities):
-
-        # if this is the best redownload but we have a higher initial download then keep looking
-        if any_qualities and result.quality < max(any_qualities):
-            return False
-
-        # if this is the best redownload and we don't have a higher initial download then we're done
-        else:
-            return True
-
-    # if we got here than it's either not on the lists, they're empty, or it's lower than the highest required
-    else:
-        return False
-
-
-def isFirstBestMatch(result):
-    """
-    Checks if the given result is a best quality match and if we want to archive the episode on first match.
-    """
-
-    logger.log(u"Checking if we should archive our first best quality match for for episode " + result.name,
-               logger.DEBUG)
-
-    show_obj = result.episodes[0].show
-
-    any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
-
-    # if there is a redownload that's a match to one of our best qualities and we want to archive the episode then we are done
-    if best_qualities and show_obj.archive_firstmatch and result.quality in best_qualities:
-        return True
-
-    return False
-
-def wantedEpisodes(show, fromDate):
-    anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # @UnusedVariable
-    allQualities = list(set(anyQualities + bestQualities))
-
-    logger.log(u"Seeing if we need anything from " + show.name)
-    myDB = db.DBConnection()
-
-    if show.air_by_date:
-        sqlResults = myDB.select(
-            "SELECT ep.status, ep.season, ep.episode FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.indexer_id AND show.paused = 0 AND ep.airdate > ? AND ep.showid = ? AND show.air_by_date = 1",
-        [fromDate.toordinal(), show.indexerid])
-    else:
-        sqlResults = myDB.select(
-            "SELECT status, season, episode FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?",
-            [show.indexerid, fromDate.toordinal()])
-
-    # check through the list of statuses to see if we want any
-    wanted = []
-    for result in sqlResults:
-        curCompositeStatus = int(result["status"] or -1)
-        curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
-
-        if bestQualities:
-            highestBestQuality = max(allQualities)
-        else:
-            highestBestQuality = 0
-
-        # if we need a better one then say yes
-        if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER,
-            common.SNATCHED_BEST) and curQuality < highestBestQuality) or curStatus == common.WANTED:
-
-            epObj = show.getEpisode(int(result["season"]), int(result["episode"]))
-            epObj.wantedQuality = [i for i in allQualities if (i > curQuality and i != common.Quality.UNKNOWN)]
-            wanted.append(epObj)
-
-    return wanted
-
-def searchForNeededEpisodes():
-    foundResults = {}
-
-    didSearch = False
-
-    origThreadName = threading.currentThread().name
-    threads = []
-
-    show_list = sickbeard.showList
-    fromDate = datetime.date.fromordinal(1)
-    episodes = []
-
-    for curShow in show_list:
-        if not curShow.paused:
-            episodes.extend(wantedEpisodes(curShow, fromDate))
-
-    providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_daily]
-    for curProvider in providers:
-        threads += [threading.Thread(target=curProvider.cache.updateCache, name=origThreadName + " :: [" + curProvider.name + "]")]
-
-    # start the thread we just created
-    for t in threads:
-        t.start()
-
-    # wait for all threads to finish
-    for t in threads:
-        t.join()
-
-    for curProvider in providers:
-        threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
-        curFoundResults = curProvider.searchRSS(episodes)
-        didSearch = True
-
-        # pick a single result for each episode, respecting existing results
-        for curEp in curFoundResults:
-            bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
-
-            # if all results were rejected move on to the next episode
-            if not bestResult:
-                logger.log(u"All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG)
-                continue
-
-            # if it's already in the list (from another provider) and the newly found quality is no better then skip it
-            if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality:
-                continue
-
-            foundResults[curEp] = bestResult
-
-    threading.currentThread().name = origThreadName
-
-    if not didSearch:
-        logger.log(
-            u"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.",
-            logger.ERROR)
-
-    return foundResults.values()
-
-
-def searchProviders(show, episodes, manualSearch=False, downCurQuality=False):
-    foundResults = {}
-    finalResults = []
-
-    didSearch = False
-    threads = []
-
-    # build name cache for show
-    sickbeard.name_cache.buildNameCache(show)
-
-    origThreadName = threading.currentThread().name
-
-    providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_backlog]
-    for curProvider in providers:
-        threads += [threading.Thread(target=curProvider.cache.updateCache,
-                                     name=origThreadName + " :: [" + curProvider.name + "]")]
-
-    # start the thread we just created
-    for t in threads:
-        t.start()
-
-    # wait for all threads to finish
-    for t in threads:
-        t.join()
-
-    for providerNum, curProvider in enumerate(providers):
-        if curProvider.anime_only and not show.is_anime:
-            logger.log(u"" + str(show.name) + " is not an anime, skiping", logger.DEBUG)
-            continue
-
-        threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
-
-        foundResults[curProvider.name] = {}
-
-        searchCount = 0
-        search_mode = curProvider.search_mode
-
-        # Always search for episode when manually searching when in sponly and fallback false
-        if search_mode == 'sponly' and manualSearch == True and curProvider.search_fallback == False:
-            search_mode = 'eponly'
-
-        while(True):
-            searchCount += 1
-
-            if search_mode == 'eponly':
-                logger.log(u"Performing episode search for " + show.name)
-            else:
-                logger.log(u"Performing season pack search for " + show.name)
-
-            try:
-                searchResults = curProvider.findSearchResults(show, episodes, search_mode, manualSearch, downCurQuality)
-            except exceptions.AuthException, e:
-                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
-                break
-            except Exception, e:
-                logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
-                logger.log(traceback.format_exc(), logger.DEBUG)
-                break
-            finally:
-                threading.currentThread().name = origThreadName
-
-            didSearch = True
-
-            if len(searchResults):
-                # make a list of all the results for this provider
-                for curEp in searchResults:
-                    if curEp in foundResults:
-                        foundResults[curProvider.name][curEp] += searchResults[curEp]
-                    else:
-                        foundResults[curProvider.name][curEp] = searchResults[curEp]
-
-                break
-            elif not curProvider.search_fallback or searchCount == 2:
-                break
-
-            if search_mode == 'sponly':
-                logger.log(u"FALLBACK EPISODE SEARCH INITIATED ...")
-                search_mode = 'eponly'
-            else:
-                logger.log(u"FALLBACK SEASON PACK SEARCH INITIATED ...")
-                search_mode = 'sponly'
-
-        # skip to next provider if we have no results to process
-        if not len(foundResults[curProvider.name]):
-            continue
-
-        anyQualities, bestQualities = Quality.splitQuality(show.quality)
-
-        # pick the best season NZB
-        bestSeasonResult = None
-        if SEASON_RESULT in foundResults[curProvider.name]:
-            bestSeasonResult = pickBestResult(foundResults[curProvider.name][SEASON_RESULT], show,
-                                           anyQualities + bestQualities)
-
-        highest_quality_overall = 0
-        for cur_episode in foundResults[curProvider.name]:
-            for cur_result in foundResults[curProvider.name][cur_episode]:
-                if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
-                    highest_quality_overall = cur_result.quality
-        logger.log(u"The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall],
-                   logger.DEBUG)
-
-        # see if every episode is wanted
-        if bestSeasonResult:
-            searchedSeasons = [str(x.season) for x in episodes]
-
-            # get the quality of the season nzb
-            seasonQual = bestSeasonResult.quality
-            logger.log(
-                u"The quality of the season " + bestSeasonResult.provider.providerType + " is " + Quality.qualityStrings[
-                    seasonQual], logger.DEBUG)
-
-            myDB = db.DBConnection()
-            allEps = [int(x["episode"])
-                      for x in myDB.select("SELECT episode FROM tv_episodes WHERE showid = ? AND ( season IN ( " + ','.join(searchedSeasons) + " ) )",
-                                           [show.indexerid])]
-
-            logger.log(u"Executed query: [SELECT episode FROM tv_episodes WHERE showid = %s AND season in  %s]" % (show.indexerid, ','.join(searchedSeasons)))
-            logger.log(u"Episode list: " + str(allEps), logger.DEBUG)
-
-            allWanted = True
-            anyWanted = False
-            for curEpNum in allEps:
-                for season in set([x.season for x in episodes]):
-                    if not show.wantEpisode(season, curEpNum, seasonQual, downCurQuality):
-                        allWanted = False
-                    else:
-                        anyWanted = True
-
-            # if we need every ep in the season and there's nothing better then just download this and be done with it (unless single episodes are preferred)
-            if allWanted and bestSeasonResult.quality == highest_quality_overall:
-                logger.log(
-                    u"Every ep in this season is needed, downloading the whole " + bestSeasonResult.provider.providerType + " " + bestSeasonResult.name)
-                epObjs = []
-                for curEpNum in allEps:
-                    for season in set([x.season for x in episodes]):
-                        epObjs.append(show.getEpisode(season, curEpNum))
-                bestSeasonResult.episodes = epObjs
-
-                return [bestSeasonResult]
-
-            elif not anyWanted:
-                logger.log(
-                    u"No eps from this season are wanted at this quality, ignoring the result of " + bestSeasonResult.name,
-                    logger.DEBUG)
-
-            else:
-
-                if bestSeasonResult.provider.providerType == GenericProvider.NZB:
-                    logger.log(u"Breaking apart the NZB and adding the individual ones to our results", logger.DEBUG)
-
-                    # if not, break it apart and add them as the lowest priority results
-                    individualResults = nzbSplitter.splitResult(bestSeasonResult)
-                    for curResult in individualResults:
-                        if len(curResult.episodes) == 1:
-                            epNum = curResult.episodes[0].episode
-                        elif len(curResult.episodes) > 1:
-                            epNum = MULTI_EP_RESULT
-
-                        if epNum in foundResults[curProvider.name]:
-                            foundResults[curProvider.name][epNum].append(curResult)
-                        else:
-                            foundResults[curProvider.name][epNum] = [curResult]
-
-                # If this is a torrent all we can do is leech the entire torrent, user will have to select which eps not do download in his torrent client
-                else:
-
-                    # Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it.
-                    logger.log(
-                        u"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!")
-                    epObjs = []
-                    for curEpNum in allEps:
-                        for season in set([x.season for x in episodes]):
-                            epObjs.append(show.getEpisode(season, curEpNum))
-                    bestSeasonResult.episodes = epObjs
-
-                    epNum = MULTI_EP_RESULT
-                    if epNum in foundResults[curProvider.name]:
-                        foundResults[curProvider.name][epNum].append(bestSeasonResult)
-                    else:
-                        foundResults[curProvider.name][epNum] = [bestSeasonResult]
-
-        # go through multi-ep results and see if we really want them or not, get rid of the rest
-        multiResults = {}
-        if MULTI_EP_RESULT in foundResults[curProvider.name]:
-            for multiResult in foundResults[curProvider.name][MULTI_EP_RESULT]:
-
-                logger.log(u"Seeing if we want to bother with multi-episode result " + multiResult.name, logger.DEBUG)
-
-                if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(multiResult.name, multiResult.size,
-                                                                               multiResult.provider.name):
-                    logger.log(multiResult.name + u" has previously failed, rejecting this multi-ep result")
-                    continue
-
-                # see how many of the eps that this result covers aren't covered by single results
-                neededEps = []
-                notNeededEps = []
-                for epObj in multiResult.episodes:
-                    epNum = epObj.episode
-                    # if we have results for the episode
-                    if epNum in foundResults[curProvider.name] and len(foundResults[curProvider.name][epNum]) > 0:
-                        neededEps.append(epNum)
-                    else:
-                        notNeededEps.append(epNum)
-
-                logger.log(
-                    u"Single-ep check result is neededEps: " + str(neededEps) + ", notNeededEps: " + str(notNeededEps),
-                    logger.DEBUG)
-
-                if not notNeededEps:
-                    logger.log(u"All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG)
-                    continue
-
-                # check if these eps are already covered by another multi-result
-                multiNeededEps = []
-                multiNotNeededEps = []
-                for epObj in multiResult.episodes:
-                    epNum = epObj.episode
-                    if epNum in multiResults:
-                        multiNotNeededEps.append(epNum)
-                    else:
-                        multiNeededEps.append(epNum)
-
-                logger.log(
-                    u"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(
-                        multiNotNeededEps), logger.DEBUG)
-
-                if not multiNeededEps:
-                    logger.log(
-                        u"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result",
-                        logger.DEBUG)
-                    continue
-
-                # if we're keeping this multi-result then remember it
-                for epObj in multiResult.episodes:
-                    multiResults[epObj.episode] = multiResult
-
-                # don't bother with the single result if we're going to get it with a multi result
-                for epObj in multiResult.episodes:
-                    epNum = epObj.episode
-                    if epNum in foundResults[curProvider.name]:
-                        logger.log(
-                            u"A needed multi-episode result overlaps with a single-episode result for ep #" + str(
-                                epNum) + ", removing the single-episode results from the list", logger.DEBUG)
-                        del foundResults[curProvider.name][epNum]
-
-        # of all the single ep results narrow it down to the best one for each episode
-        finalResults += set(multiResults.values())
-        for curEp in foundResults[curProvider.name]:
-            if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
-                continue
-
-            if not len(foundResults[curProvider.name][curEp]) > 0:
-                continue
-
-            # if all results were rejected move on to the next episode
-            bestResult = pickBestResult(foundResults[curProvider.name][curEp], show)
-            if not bestResult:
-                continue
-
-            # add result if its not a duplicate and
-            found = False
-            for i, result in enumerate(finalResults):
-                for bestResultEp in bestResult.episodes:
-                    if bestResultEp in result.episodes:
-                        if result.quality < bestResult.quality:
-                            finalResults.pop(i)
-                        else:
-                            found = True
-            if not found:
-                finalResults += [bestResult]
-
-        # check that we got all the episodes we wanted first before doing a match and snatch
-        wantedEpCount = 0
-        for wantedEp in episodes:
-            for result in finalResults:
-                if wantedEp in result.episodes and isFinalResult(result):
-                    wantedEpCount += 1
-
-        # make sure we search every provider for results unless we found everything we wanted
-        if wantedEpCount == len(episodes):
-            break
-
-    if not didSearch:
-        logger.log(u"No NZB/Torrent providers found or enabled in the sickrage config for backlog searches. Please check your settings.",
-                   logger.ERROR)
-
-    return finalResults
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import with_statement
+
+import os
+import re
+import threading
+import datetime
+import traceback
+
+import sickbeard
+
+from common import SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, Quality, SEASON_RESULT, MULTI_EP_RESULT
+
+from sickbeard import logger, db, show_name_helpers, exceptions, helpers
+from sickbeard import sab
+from sickbeard import nzbget
+from sickbeard import clients
+from sickbeard import history
+from sickbeard import notifiers
+from sickbeard import nzbSplitter
+from sickbeard import ui
+from sickbeard import encodingKludge as ek
+from sickbeard import failed_history
+from sickbeard.exceptions import ex
+from sickbeard.providers.generic import GenericProvider
+from sickbeard.blackandwhitelist import BlackAndWhiteList
+from sickbeard import common
+
+def _downloadResult(result):
+    """
+    Downloads a result to the appropriate black hole folder.
+
+    Returns a bool representing success.
+
+    result: SearchResult instance to download.
+    """
+
+    resProvider = result.provider
+    if resProvider == None:
+        logger.log(u"Invalid provider name - this is a coding error, report it please", logger.ERROR)
+        return False
+
+    # nzbs with an URL can just be downloaded from the provider
+    if result.resultType == "nzb":
+        newResult = resProvider.downloadResult(result)
+    # if it's an nzb data result
+    elif result.resultType == "nzbdata":
+
+        # get the final file path to the nzb
+        fileName = ek.ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb")
+
+        logger.log(u"Saving NZB to " + fileName)
+
+        newResult = True
+
+        # save the data to disk
+        try:
+            with ek.ek(open, fileName, 'w') as fileOut:
+                fileOut.write(result.extraInfo[0])
+
+            helpers.chmodAsParent(fileName)
+
+        except EnvironmentError, e:
+            logger.log(u"Error trying to save NZB to black hole: " + ex(e), logger.ERROR)
+            newResult = False
+    elif resProvider.providerType == "torrent":
+        newResult = resProvider.downloadResult(result)
+    else:
+        logger.log(u"Invalid provider type - this is a coding error, report it please", logger.ERROR)
+        newResult = False
+
+    return newResult
+
+def snatchEpisode(result, endStatus=SNATCHED):
+    """
+    Contains the internal logic necessary to actually "snatch" a result that
+    has been found.
+
+    Returns a bool representing success.
+
+    result: SearchResult instance to be snatched.
+    endStatus: the episode status that should be used for the episode object once it's snatched.
+    """
+
+    if result is None:
+        return False
+
+    result.priority = 0  # -1 = low, 0 = normal, 1 = high
+    if sickbeard.ALLOW_HIGH_PRIORITY:
+        # if it aired recently make it high priority
+        for curEp in result.episodes:
+            if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
+                result.priority = 1
+    if re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', result.name, re.I) != None:
+        endStatus = SNATCHED_PROPER
+
+    # NZBs can be sent straight to SAB or saved to disk
+    if result.resultType in ("nzb", "nzbdata"):
+        if sickbeard.NZB_METHOD == "blackhole":
+            dlResult = _downloadResult(result)
+        elif sickbeard.NZB_METHOD == "sabnzbd":
+            dlResult = sab.sendNZB(result)
+        elif sickbeard.NZB_METHOD == "nzbget":
+            is_proper = True if endStatus == SNATCHED_PROPER else False
+            dlResult = nzbget.sendNZB(result, is_proper)
+        else:
+            logger.log(u"Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
+            dlResult = False
+
+    # TORRENTs can be sent to clients or saved to disk
+    elif result.resultType == "torrent":
+        # torrents are saved to disk when blackhole mode
+        if sickbeard.TORRENT_METHOD == "blackhole":
+            dlResult = _downloadResult(result)
+        else:
+            if result.content or result.url.startswith('magnet'):
+                client = clients.getClientIstance(sickbeard.TORRENT_METHOD)()
+                dlResult = client.sendTORRENT(result)
+            else:
+                logger.log(u"Torrent file content is empty", logger.ERROR)
+                dlResult = False
+    else:
+        logger.log(u"Unknown result type, unable to download it", logger.ERROR)
+        dlResult = False
+
+    if not dlResult:
+        return False
+
+    if sickbeard.USE_FAILED_DOWNLOADS:
+        failed_history.logSnatch(result)
+
+    ui.notifications.message('Episode snatched', result.name)
+
+    history.logSnatch(result)
+
+    # don't notify when we re-download an episode
+    sql_l = []
+    trakt_data = []
+    for curEpObj in result.episodes:
+        with curEpObj.lock:
+            if isFirstBestMatch(result):
+                curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality)
+            else:
+                curEpObj.status = Quality.compositeStatus(endStatus, result.quality)
+
+            sql_l.append(curEpObj.get_sql())
+
+        if curEpObj.status not in Quality.DOWNLOADED:
+            notifiers.notify_snatch(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN') + " from " + result.provider.name)
+
+            trakt_data.append((curEpObj.season, curEpObj.episode))
+
+    data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
+
+    if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
+        logger.log(u"Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str(result.show.name) + " to Traktv Watchlist", logger.DEBUG)
+        if data:
+            notifiers.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add")
+
+    if len(sql_l) > 0:
+        myDB = db.DBConnection()
+        myDB.mass_action(sql_l)
+
+    if sickbeard.UPDATE_SHOWS_ON_SNATCH and not sickbeard.showQueueScheduler.action.isBeingUpdated(result.show) and result.show.status == "Continuing":
+        try:
+            sickbeard.showQueueScheduler.action.updateShow(result.show, True)
+        except exceptions.CantUpdateException as e:
+            logger.log("Unable to update show: {0}".format(str(e)),logger.DEBUG)
+
+    return True
+
+
+def pickBestResult(results, show, quality_list=None):
+    results = results if isinstance(results, list) else [results]
+
+    logger.log(u"Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
+
+    bwl = None
+    bestResult = None
+
+    # find the best result for the current episode
+    for cur_result in results:
+        if show and cur_result.show is not show:
+            continue
+
+        # filter out possible bad torrents from providers such as ezrss
+        if isinstance(cur_result, sickbeard.classes.SearchResult):
+            if cur_result.resultType == "torrent" and sickbeard.TORRENT_METHOD != "blackhole":
+                if not cur_result.url.startswith('magnet'):
+                    cur_result.content = cur_result.provider.getURL(cur_result.url)
+                    if not cur_result.content:
+                        continue
+        else:
+            if not cur_result.url.startswith('magnet'):
+                cur_result.content = cur_result.provider.getURL(cur_result.url)
+                if not cur_result.content:
+                    continue
+
+        # build the black And white list
+        if cur_result.show.is_anime:
+            if not bwl:
+                bwl = BlackAndWhiteList(cur_result.show.indexerid)
+            if not bwl.is_valid(cur_result):
+                logger.log(cur_result.name+" does not match the blacklist or the whitelist, rejecting it. Result: " + bwl.get_last_result_msg(), logger.INFO)
+                continue
+
+        logger.log("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality])
+
+        if quality_list and cur_result.quality not in quality_list:
+            logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
+            continue
+
+        if show.rls_ignore_words and show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_ignore_words):
+            logger.log(u"Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words,
+                       logger.INFO)
+            continue
+
+        if show.rls_require_words and not show_name_helpers.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_require_words):
+            logger.log(u"Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words,
+                       logger.INFO)
+            continue
+
+        if not show_name_helpers.filterBadReleases(cur_result.name, parse=False):
+            logger.log(u"Ignoring " + cur_result.name + " because its not a valid scene release that we want, ignoring it",
+                       logger.INFO)
+            continue
+
+        if hasattr(cur_result, 'size'):
+            if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
+                                                                           cur_result.provider.name):
+                logger.log(cur_result.name + u" has previously failed, rejecting it")
+                continue
+
+        if not bestResult or bestResult.quality < cur_result.quality and cur_result.quality != Quality.UNKNOWN:
+            bestResult = cur_result
+
+        elif bestResult.quality == cur_result.quality:
+            if "proper" in cur_result.name.lower() or "repack" in cur_result.name.lower():
+                bestResult = cur_result
+            elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower():
+                bestResult = cur_result
+            elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower():
+                logger.log(u"Preferring " + cur_result.name + " (x264 over xvid)")
+                bestResult = cur_result
+
+    if bestResult:
+        logger.log(u"Picked " + bestResult.name + " as the best", logger.DEBUG)
+    else:
+        logger.log(u"No result picked.", logger.DEBUG)
+
+    return bestResult
+
+
+def isFinalResult(result):
+    """
+    Checks if the given result is good enough quality that we can stop searching for other ones.
+
+    If the result is the highest quality in both the any/best quality lists then this function
+    returns True, if not then it's False
+
+    """
+
+    logger.log(u"Checking if we should keep searching after we've found " + result.name, logger.DEBUG)
+
+    show_obj = result.episodes[0].show
+
+    bwl = None
+    if show_obj.is_anime:
+        bwl = BlackAndWhiteList(show_obj.indexerid)
+
+    any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
+
+    # if there is a redownload that's higher than this then we definitely need to keep looking
+    if best_qualities and result.quality < max(best_qualities):
+        return False
+
+    # if it does not match the shows black and white list its no good
+    elif bwl and not bwl.is_valid(result):
+        return False
+
+    # if there's no redownload that's higher (above) and this is the highest initial download then we're good
+    elif any_qualities and result.quality in any_qualities:
+        return True
+
+    elif best_qualities and result.quality == max(best_qualities):
+
+        # if this is the best redownload but we have a higher initial download then keep looking
+        if any_qualities and result.quality < max(any_qualities):
+            return False
+
+        # if this is the best redownload and we don't have a higher initial download then we're done
+        else:
+            return True
+
+    # if we got here than it's either not on the lists, they're empty, or it's lower than the highest required
+    else:
+        return False
+
+
+def isFirstBestMatch(result):
+    """
+    Checks if the given result is a best quality match and if we want to archive the episode on first match.
+    """
+
+    logger.log(u"Checking if we should archive our first best quality match for for episode " + result.name,
+               logger.DEBUG)
+
+    show_obj = result.episodes[0].show
+
+    any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
+
+    # if there is a redownload that's a match to one of our best qualities and we want to archive the episode then we are done
+    if best_qualities and show_obj.archive_firstmatch and result.quality in best_qualities:
+        return True
+
+    return False
+
+def wantedEpisodes(show, fromDate):
+    anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # @UnusedVariable
+    allQualities = list(set(anyQualities + bestQualities))
+
+    logger.log(u"Seeing if we need anything from " + show.name)
+    myDB = db.DBConnection()
+
+    if show.air_by_date:
+        sqlResults = myDB.select(
+            "SELECT ep.status, ep.season, ep.episode FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.indexer_id AND show.paused = 0 AND ep.airdate > ? AND ep.showid = ? AND show.air_by_date = 1",
+        [fromDate.toordinal(), show.indexerid])
+    else:
+        sqlResults = myDB.select(
+            "SELECT status, season, episode FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?",
+            [show.indexerid, fromDate.toordinal()])
+
+    # check through the list of statuses to see if we want any
+    wanted = []
+    for result in sqlResults:
+        curCompositeStatus = int(result["status"] or -1)
+        curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
+
+        if bestQualities:
+            highestBestQuality = max(allQualities)
+        else:
+            highestBestQuality = 0
+
+        # if we need a better one then say yes
+        if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER,
+            common.SNATCHED_BEST) and curQuality < highestBestQuality) or curStatus == common.WANTED:
+
+            epObj = show.getEpisode(int(result["season"]), int(result["episode"]))
+            epObj.wantedQuality = [i for i in allQualities if (i > curQuality and i != common.Quality.UNKNOWN)]
+            wanted.append(epObj)
+
+    return wanted
+
+def searchForNeededEpisodes():
+    foundResults = {}
+
+    didSearch = False
+
+    origThreadName = threading.currentThread().name
+    threads = []
+
+    show_list = sickbeard.showList
+    fromDate = datetime.date.fromordinal(1)
+    episodes = []
+
+    for curShow in show_list:
+        if not curShow.paused:
+            episodes.extend(wantedEpisodes(curShow, fromDate))
+
+    providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_daily]
+    for curProvider in providers:
+        threads += [threading.Thread(target=curProvider.cache.updateCache, name=origThreadName + " :: [" + curProvider.name + "]")]
+
+    # start the thread we just created
+    for t in threads:
+        t.start()
+
+    # wait for all threads to finish
+    for t in threads:
+        t.join()
+
+    for curProvider in providers:
+        threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
+        curFoundResults = curProvider.searchRSS(episodes)
+        didSearch = True
+
+        # pick a single result for each episode, respecting existing results
+        for curEp in curFoundResults:
+            bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
+
+            # if all results were rejected move on to the next episode
+            if not bestResult:
+                logger.log(u"All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG)
+                continue
+
+            # if it's already in the list (from another provider) and the newly found quality is no better then skip it
+            if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality:
+                continue
+
+            foundResults[curEp] = bestResult
+
+    threading.currentThread().name = origThreadName
+
+    if not didSearch:
+        logger.log(
+            u"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.",
+            logger.ERROR)
+
+    return foundResults.values()
+
+
+def searchProviders(show, episodes, manualSearch=False, downCurQuality=False):
+    foundResults = {}
+    finalResults = []
+
+    didSearch = False
+    threads = []
+
+    # build name cache for show
+    sickbeard.name_cache.buildNameCache(show)
+
+    origThreadName = threading.currentThread().name
+
+    providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive() and x.enable_backlog]
+    for curProvider in providers:
+        threads += [threading.Thread(target=curProvider.cache.updateCache,
+                                     name=origThreadName + " :: [" + curProvider.name + "]")]
+
+    # start the thread we just created
+    for t in threads:
+        t.start()
+
+    # wait for all threads to finish
+    for t in threads:
+        t.join()
+
+    for providerNum, curProvider in enumerate(providers):
+        if curProvider.anime_only and not show.is_anime:
+            logger.log(u"" + str(show.name) + " is not an anime, skiping", logger.DEBUG)
+            continue
+
+        threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
+
+        foundResults[curProvider.name] = {}
+
+        searchCount = 0
+        search_mode = curProvider.search_mode
+
+        # Always search for episode when manually searching when in sponly and fallback false
+        if search_mode == 'sponly' and manualSearch == True and curProvider.search_fallback == False:
+            search_mode = 'eponly'
+
+        while(True):
+            searchCount += 1
+
+            if search_mode == 'eponly':
+                logger.log(u"Performing episode search for " + show.name)
+            else:
+                logger.log(u"Performing season pack search for " + show.name)
+
+            try:
+                searchResults = curProvider.findSearchResults(show, episodes, search_mode, manualSearch, downCurQuality)
+            except exceptions.AuthException, e:
+                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
+                break
+            except Exception, e:
+                logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
+                logger.log(traceback.format_exc(), logger.DEBUG)
+                break
+            finally:
+                threading.currentThread().name = origThreadName
+
+            didSearch = True
+
+            if len(searchResults):
+                # make a list of all the results for this provider
+                for curEp in searchResults:
+                    if curEp in foundResults:
+                        foundResults[curProvider.name][curEp] += searchResults[curEp]
+                    else:
+                        foundResults[curProvider.name][curEp] = searchResults[curEp]
+
+                break
+            elif not curProvider.search_fallback or searchCount == 2:
+                break
+
+            if search_mode == 'sponly':
+                logger.log(u"FALLBACK EPISODE SEARCH INITIATED ...")
+                search_mode = 'eponly'
+            else:
+                logger.log(u"FALLBACK SEASON PACK SEARCH INITIATED ...")
+                search_mode = 'sponly'
+
+        # skip to next provider if we have no results to process
+        if not len(foundResults[curProvider.name]):
+            continue
+
+        anyQualities, bestQualities = Quality.splitQuality(show.quality)
+
+        # pick the best season NZB
+        bestSeasonResult = None
+        if SEASON_RESULT in foundResults[curProvider.name]:
+            bestSeasonResult = pickBestResult(foundResults[curProvider.name][SEASON_RESULT], show,
+                                           anyQualities + bestQualities)
+
+        highest_quality_overall = 0
+        for cur_episode in foundResults[curProvider.name]:
+            for cur_result in foundResults[curProvider.name][cur_episode]:
+                if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
+                    highest_quality_overall = cur_result.quality
+        logger.log(u"The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall],
+                   logger.DEBUG)
+
+        # see if every episode is wanted
+        if bestSeasonResult:
+            searchedSeasons = [str(x.season) for x in episodes]
+
+            # get the quality of the season nzb
+            seasonQual = bestSeasonResult.quality
+            logger.log(
+                u"The quality of the season " + bestSeasonResult.provider.providerType + " is " + Quality.qualityStrings[
+                    seasonQual], logger.DEBUG)
+
+            myDB = db.DBConnection()
+            allEps = [int(x["episode"])
+                      for x in myDB.select("SELECT episode FROM tv_episodes WHERE showid = ? AND ( season IN ( " + ','.join(searchedSeasons) + " ) )",
+                                           [show.indexerid])]
+
+            logger.log(u"Executed query: [SELECT episode FROM tv_episodes WHERE showid = %s AND season in  %s]" % (show.indexerid, ','.join(searchedSeasons)))
+            logger.log(u"Episode list: " + str(allEps), logger.DEBUG)
+
+            allWanted = True
+            anyWanted = False
+            for curEpNum in allEps:
+                for season in set([x.season for x in episodes]):
+                    if not show.wantEpisode(season, curEpNum, seasonQual, downCurQuality):
+                        allWanted = False
+                    else:
+                        anyWanted = True
+
+            # if we need every ep in the season and there's nothing better then just download this and be done with it (unless single episodes are preferred)
+            if allWanted and bestSeasonResult.quality == highest_quality_overall:
+                logger.log(
+                    u"Every ep in this season is needed, downloading the whole " + bestSeasonResult.provider.providerType + " " + bestSeasonResult.name)
+                epObjs = []
+                for curEpNum in allEps:
+                    for season in set([x.season for x in episodes]):
+                        epObjs.append(show.getEpisode(season, curEpNum))
+                bestSeasonResult.episodes = epObjs
+
+                return [bestSeasonResult]
+
+            elif not anyWanted:
+                logger.log(
+                    u"No eps from this season are wanted at this quality, ignoring the result of " + bestSeasonResult.name,
+                    logger.DEBUG)
+
+            else:
+
+                if bestSeasonResult.provider.providerType == GenericProvider.NZB:
+                    logger.log(u"Breaking apart the NZB and adding the individual ones to our results", logger.DEBUG)
+
+                    # if not, break it apart and add them as the lowest priority results
+                    individualResults = nzbSplitter.splitResult(bestSeasonResult)
+                    for curResult in individualResults:
+                        if len(curResult.episodes) == 1:
+                            epNum = curResult.episodes[0].episode
+                        elif len(curResult.episodes) > 1:
+                            epNum = MULTI_EP_RESULT
+
+                        if epNum in foundResults[curProvider.name]:
+                            foundResults[curProvider.name][epNum].append(curResult)
+                        else:
+                            foundResults[curProvider.name][epNum] = [curResult]
+
+                # If this is a torrent all we can do is leech the entire torrent, user will have to select which eps not do download in his torrent client
+                else:
+
+                    # Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it.
+                    logger.log(
+                        u"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!")
+                    epObjs = []
+                    for curEpNum in allEps:
+                        for season in set([x.season for x in episodes]):
+                            epObjs.append(show.getEpisode(season, curEpNum))
+                    bestSeasonResult.episodes = epObjs
+
+                    epNum = MULTI_EP_RESULT
+                    if epNum in foundResults[curProvider.name]:
+                        foundResults[curProvider.name][epNum].append(bestSeasonResult)
+                    else:
+                        foundResults[curProvider.name][epNum] = [bestSeasonResult]
+
+        # go through multi-ep results and see if we really want them or not, get rid of the rest
+        multiResults = {}
+        if MULTI_EP_RESULT in foundResults[curProvider.name]:
+            for multiResult in foundResults[curProvider.name][MULTI_EP_RESULT]:
+
+                logger.log(u"Seeing if we want to bother with multi-episode result " + multiResult.name, logger.DEBUG)
+
+                if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(multiResult.name, multiResult.size,
+                                                                               multiResult.provider.name):
+                    logger.log(multiResult.name + u" has previously failed, rejecting this multi-ep result")
+                    continue
+
+                # see how many of the eps that this result covers aren't covered by single results
+                neededEps = []
+                notNeededEps = []
+                for epObj in multiResult.episodes:
+                    epNum = epObj.episode
+                    # if we have results for the episode
+                    if epNum in foundResults[curProvider.name] and len(foundResults[curProvider.name][epNum]) > 0:
+                        neededEps.append(epNum)
+                    else:
+                        notNeededEps.append(epNum)
+
+                logger.log(
+                    u"Single-ep check result is neededEps: " + str(neededEps) + ", notNeededEps: " + str(notNeededEps),
+                    logger.DEBUG)
+
+                if not notNeededEps:
+                    logger.log(u"All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG)
+                    continue
+
+                # check if these eps are already covered by another multi-result
+                multiNeededEps = []
+                multiNotNeededEps = []
+                for epObj in multiResult.episodes:
+                    epNum = epObj.episode
+                    if epNum in multiResults:
+                        multiNotNeededEps.append(epNum)
+                    else:
+                        multiNeededEps.append(epNum)
+
+                logger.log(
+                    u"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(
+                        multiNotNeededEps), logger.DEBUG)
+
+                if not multiNeededEps:
+                    logger.log(
+                        u"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result",
+                        logger.DEBUG)
+                    continue
+
+                # if we're keeping this multi-result then remember it
+                for epObj in multiResult.episodes:
+                    multiResults[epObj.episode] = multiResult
+
+                # don't bother with the single result if we're going to get it with a multi result
+                for epObj in multiResult.episodes:
+                    epNum = epObj.episode
+                    if epNum in foundResults[curProvider.name]:
+                        logger.log(
+                            u"A needed multi-episode result overlaps with a single-episode result for ep #" + str(
+                                epNum) + ", removing the single-episode results from the list", logger.DEBUG)
+                        del foundResults[curProvider.name][epNum]
+
+        # of all the single ep results narrow it down to the best one for each episode
+        finalResults += set(multiResults.values())
+        for curEp in foundResults[curProvider.name]:
+            if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
+                continue
+
+            if not len(foundResults[curProvider.name][curEp]) > 0:
+                continue
+
+            # if all results were rejected move on to the next episode
+            bestResult = pickBestResult(foundResults[curProvider.name][curEp], show)
+            if not bestResult:
+                continue
+
+            # add result if its not a duplicate and
+            found = False
+            for i, result in enumerate(finalResults):
+                for bestResultEp in bestResult.episodes:
+                    if bestResultEp in result.episodes:
+                        if result.quality < bestResult.quality:
+                            finalResults.pop(i)
+                        else:
+                            found = True
+            if not found:
+                finalResults += [bestResult]
+
+        # check that we got all the episodes we wanted first before doing a match and snatch
+        wantedEpCount = 0
+        for wantedEp in episodes:
+            for result in finalResults:
+                if wantedEp in result.episodes and isFinalResult(result):
+                    wantedEpCount += 1
+
+        # make sure we search every provider for results unless we found everything we wanted
+        if wantedEpCount == len(episodes):
+            break
+
+    if not didSearch:
+        logger.log(u"No NZB/Torrent providers found or enabled in the sickrage config for backlog searches. Please check your settings.",
+                   logger.ERROR)
+
+    return finalResults
diff --git a/sickbeard/search_queue.py b/sickbeard/search_queue.py
index a2c87602e5ffc5ff3cd5149e1eaa95c0044aeb06..b99984e46bedad4a1f206613c874874627a3da7d 100644
--- a/sickbeard/search_queue.py
+++ b/sickbeard/search_queue.py
@@ -1,296 +1,296 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
-import time
-import traceback
-import threading
-
-import sickbeard
-from sickbeard import db, logger, common, exceptions, helpers
-from sickbeard import generic_queue, scheduler
-from sickbeard import search, failed_history, history
-from sickbeard import ui
-from sickbeard.exceptions import ex
-from sickbeard.search import pickBestResult
-
-search_queue_lock = threading.Lock()
-
-BACKLOG_SEARCH = 10
-DAILY_SEARCH = 20
-FAILED_SEARCH = 30
-MANUAL_SEARCH = 40
-
-MANUAL_SEARCH_HISTORY = []
-MANUAL_SEARCH_HISTORY_SIZE = 100
-
-class SearchQueue(generic_queue.GenericQueue):
-    def __init__(self):
-        generic_queue.GenericQueue.__init__(self)
-        self.queue_name = "SEARCHQUEUE"
-
-    def is_in_queue(self, show, segment):
-        for cur_item in self.queue:
-            if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
-                return True
-        return False
-
-    def is_ep_in_queue(self, segment):
-        for cur_item in self.queue:
-            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.segment == segment:
-                return True
-        return False
-    
-    def is_show_in_queue(self, show):
-        for cur_item in self.queue:
-            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.show.indexerid == show:
-                return True
-        return False
-    
-    def get_all_ep_from_queue(self, show):
-        ep_obj_list = []
-        for cur_item in self.queue:
-            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and str(cur_item.show.indexerid) == show:
-                ep_obj_list.append(cur_item)
-        return ep_obj_list
-    
-    def pause_backlog(self):
-        self.min_priority = generic_queue.QueuePriorities.HIGH
-
-    def unpause_backlog(self):
-        self.min_priority = 0
-
-    def is_backlog_paused(self):
-        # backlog priorities are NORMAL, this should be done properly somewhere
-        return self.min_priority >= generic_queue.QueuePriorities.NORMAL
-
-    def is_manualsearch_in_progress(self):
-        # Only referenced in webserve.py, only current running manualsearch or failedsearch is needed!!
-        if isinstance(self.currentItem, (ManualSearchQueueItem, FailedQueueItem)):
-            return True
-        return False
-    
-    def is_backlog_in_progress(self):
-        for cur_item in self.queue + [self.currentItem]:
-            if isinstance(cur_item, BacklogQueueItem):
-                return True
-        return False
-
-    def is_dailysearch_in_progress(self):
-        for cur_item in self.queue + [self.currentItem]:
-            if isinstance(cur_item, DailySearchQueueItem):
-                return True
-        return False
-
-    def queue_length(self):
-        length = {'backlog': 0, 'daily': 0, 'manual': 0, 'failed': 0}
-        for cur_item in self.queue:
-            if isinstance(cur_item, DailySearchQueueItem):
-                length['daily'] += 1
-            elif isinstance(cur_item, BacklogQueueItem):
-                length['backlog'] += 1
-            elif isinstance(cur_item, ManualSearchQueueItem):
-                length['manual'] += 1
-            elif isinstance(cur_item, FailedQueueItem):
-                length['failed'] += 1
-        return length
-
-
-    def add_item(self, item):
-        if isinstance(item, DailySearchQueueItem):
-            # daily searches
-            generic_queue.GenericQueue.add_item(self, item)
-        elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
-            # backlog searches
-            generic_queue.GenericQueue.add_item(self, item)
-        elif isinstance(item, (ManualSearchQueueItem, FailedQueueItem)) and not self.is_ep_in_queue(item.segment):
-            # manual and failed searches
-            generic_queue.GenericQueue.add_item(self, item)
-        else:
-            logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
-
-class DailySearchQueueItem(generic_queue.QueueItem):
-    def __init__(self):
-        self.success = None
-        generic_queue.QueueItem.__init__(self, 'Daily Search', DAILY_SEARCH)
-
-    def run(self):
-        generic_queue.QueueItem.run(self)
-
-        try:
-            logger.log("Beginning daily search for new episodes")
-            foundResults = search.searchForNeededEpisodes()
-
-            if not len(foundResults):
-                logger.log(u"No needed episodes found")
-            else:
-                for result in foundResults:
-                    # just use the first result for now
-                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
-                    self.success = search.snatchEpisode(result)
-
-                    # give the CPU a break
-                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
-
-            generic_queue.QueueItem.finish(self)
-        except Exception:
-            logger.log(traceback.format_exc(), logger.DEBUG)
-
-        if self.success is None:
-            self.success = False
-
-        self.finish()
-
-
-class ManualSearchQueueItem(generic_queue.QueueItem):
-    def __init__(self, show, segment, downCurQuality=False):
-        generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH)
-        self.priority = generic_queue.QueuePriorities.HIGH
-        self.name = 'MANUAL-' + str(show.indexerid)
-        self.success = None
-        self.show = show
-        self.segment = segment
-        self.started = None
-        self.downCurQuality = downCurQuality
-
-    def run(self):
-        generic_queue.QueueItem.run(self)
-
-        try:
-            logger.log("Beginning manual search for: [" + self.segment.prettyName() + "]")
-            self.started = True
-            
-            searchResult = search.searchProviders(self.show, [self.segment], True, self.downCurQuality)
-
-            if searchResult:
-                # just use the first result for now
-                logger.log(u"Downloading " + searchResult[0].name + " from " + searchResult[0].provider.name)
-                self.success = search.snatchEpisode(searchResult[0])
-
-                # give the CPU a break
-                time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
-
-            else:
-                ui.notifications.message('No downloads were found',
-                                         "Couldn't find a download for <i>%s</i>" % self.segment.prettyName())
-
-                logger.log(u"Unable to find a download for: [" + self.segment.prettyName() + "]")
-
-        except Exception:
-            logger.log(traceback.format_exc(), logger.DEBUG)
-        
-        ### Keep a list with the 100 last executed searches
-        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
-        
-        if self.success is None:
-            self.success = False
-
-        self.finish()
-
-
-class BacklogQueueItem(generic_queue.QueueItem):
-    def __init__(self, show, segment):
-        generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH)
-        self.priority = generic_queue.QueuePriorities.LOW
-        self.name = 'BACKLOG-' + str(show.indexerid)
-        self.success = None
-        self.show = show
-        self.segment = segment
-
-    def run(self):
-        generic_queue.QueueItem.run(self)
-
-        try:
-            logger.log("Beginning backlog search for: [" + self.show.name + "]")
-            searchResult = search.searchProviders(self.show, self.segment, False)
-
-            if searchResult:
-                for result in searchResult:
-                    # just use the first result for now
-                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
-                    search.snatchEpisode(result)
-
-                    # give the CPU a break
-                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
-            else:
-                logger.log(u"No needed episodes found during backlog search for: [" + self.show.name + "]")
-        except Exception:
-            logger.log(traceback.format_exc(), logger.DEBUG)
-
-        self.finish()
-
-
-class FailedQueueItem(generic_queue.QueueItem):
-    def __init__(self, show, segment, downCurQuality=False):
-        generic_queue.QueueItem.__init__(self, 'Retry', FAILED_SEARCH)
-        self.priority = generic_queue.QueuePriorities.HIGH
-        self.name = 'RETRY-' + str(show.indexerid)
-        self.show = show
-        self.segment = segment
-        self.success = None
-        self.started = None
-        self.downCurQuality = downCurQuality
-
-    def run(self):
-        generic_queue.QueueItem.run(self)
-        self.started = True
-        
-        try:
-            for epObj in self.segment:
-            
-                logger.log(u"Marking episode as bad: [" + epObj.prettyName() + "]")
-                
-                failed_history.markFailed(epObj)
-    
-                (release, provider) = failed_history.findRelease(epObj)
-                if release:
-                    failed_history.logFailed(release)
-                    history.logFailed(epObj, release, provider)
-    
-                failed_history.revertEpisode(epObj)
-                logger.log("Beginning failed download search for: [" + epObj.prettyName() + "]")
-
-            searchResult = search.searchProviders(self.show, self.segment, True, self.downCurQuality)
-
-            if searchResult:
-                for result in searchResult:
-                    # just use the first result for now
-                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
-                    search.snatchEpisode(result)
-
-                    # give the CPU a break
-                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
-            else:
-                pass
-                #logger.log(u"No valid episode found to retry for: [" + self.segment.prettyName() + "]")
-        except Exception:
-            logger.log(traceback.format_exc(), logger.DEBUG)
-            
-        ### Keep a list with the 100 last executed searches
-        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
-
-        if self.success is None:
-            self.success = False
-
-        self.finish()
-        
-def fifo(myList, item, maxSize = 100):
-    if len(myList) >= maxSize:
-        myList.pop(0)
-    myList.append(item)
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import with_statement
+
+import time
+import traceback
+import threading
+
+import sickbeard
+from sickbeard import db, logger, common, exceptions, helpers
+from sickbeard import generic_queue, scheduler
+from sickbeard import search, failed_history, history
+from sickbeard import ui
+from sickbeard.exceptions import ex
+from sickbeard.search import pickBestResult
+
+search_queue_lock = threading.Lock()
+
+BACKLOG_SEARCH = 10
+DAILY_SEARCH = 20
+FAILED_SEARCH = 30
+MANUAL_SEARCH = 40
+
+MANUAL_SEARCH_HISTORY = []
+MANUAL_SEARCH_HISTORY_SIZE = 100
+
+class SearchQueue(generic_queue.GenericQueue):
+    def __init__(self):
+        generic_queue.GenericQueue.__init__(self)
+        self.queue_name = "SEARCHQUEUE"
+
+    def is_in_queue(self, show, segment):
+        for cur_item in self.queue:
+            if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
+                return True
+        return False
+
+    def is_ep_in_queue(self, segment):
+        for cur_item in self.queue:
+            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.segment == segment:
+                return True
+        return False
+    
+    def is_show_in_queue(self, show):
+        for cur_item in self.queue:
+            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.show.indexerid == show:
+                return True
+        return False
+    
+    def get_all_ep_from_queue(self, show):
+        ep_obj_list = []
+        for cur_item in self.queue:
+            if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and str(cur_item.show.indexerid) == show:
+                ep_obj_list.append(cur_item)
+        return ep_obj_list
+    
+    def pause_backlog(self):
+        self.min_priority = generic_queue.QueuePriorities.HIGH
+
+    def unpause_backlog(self):
+        self.min_priority = 0
+
+    def is_backlog_paused(self):
+        # backlog priorities are NORMAL, this should be done properly somewhere
+        return self.min_priority >= generic_queue.QueuePriorities.NORMAL
+
+    def is_manualsearch_in_progress(self):
+        # Only referenced in webserve.py, only current running manualsearch or failedsearch is needed!!
+        if isinstance(self.currentItem, (ManualSearchQueueItem, FailedQueueItem)):
+            return True
+        return False
+    
+    def is_backlog_in_progress(self):
+        for cur_item in self.queue + [self.currentItem]:
+            if isinstance(cur_item, BacklogQueueItem):
+                return True
+        return False
+
+    def is_dailysearch_in_progress(self):
+        for cur_item in self.queue + [self.currentItem]:
+            if isinstance(cur_item, DailySearchQueueItem):
+                return True
+        return False
+
+    def queue_length(self):
+        length = {'backlog': 0, 'daily': 0, 'manual': 0, 'failed': 0}
+        for cur_item in self.queue:
+            if isinstance(cur_item, DailySearchQueueItem):
+                length['daily'] += 1
+            elif isinstance(cur_item, BacklogQueueItem):
+                length['backlog'] += 1
+            elif isinstance(cur_item, ManualSearchQueueItem):
+                length['manual'] += 1
+            elif isinstance(cur_item, FailedQueueItem):
+                length['failed'] += 1
+        return length
+
+
+    def add_item(self, item):
+        if isinstance(item, DailySearchQueueItem):
+            # daily searches
+            generic_queue.GenericQueue.add_item(self, item)
+        elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
+            # backlog searches
+            generic_queue.GenericQueue.add_item(self, item)
+        elif isinstance(item, (ManualSearchQueueItem, FailedQueueItem)) and not self.is_ep_in_queue(item.segment):
+            # manual and failed searches
+            generic_queue.GenericQueue.add_item(self, item)
+        else:
+            logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
+
+class DailySearchQueueItem(generic_queue.QueueItem):
+    def __init__(self):
+        self.success = None
+        generic_queue.QueueItem.__init__(self, 'Daily Search', DAILY_SEARCH)
+
+    def run(self):
+        generic_queue.QueueItem.run(self)
+
+        try:
+            logger.log("Beginning daily search for new episodes")
+            foundResults = search.searchForNeededEpisodes()
+
+            if not len(foundResults):
+                logger.log(u"No needed episodes found")
+            else:
+                for result in foundResults:
+                    # just use the first result for now
+                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
+                    self.success = search.snatchEpisode(result)
+
+                    # give the CPU a break
+                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
+
+            generic_queue.QueueItem.finish(self)
+        except Exception:
+            logger.log(traceback.format_exc(), logger.DEBUG)
+
+        if self.success is None:
+            self.success = False
+
+        self.finish()
+
+
+class ManualSearchQueueItem(generic_queue.QueueItem):
+    def __init__(self, show, segment, downCurQuality=False):
+        generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH)
+        self.priority = generic_queue.QueuePriorities.HIGH
+        self.name = 'MANUAL-' + str(show.indexerid)
+        self.success = None
+        self.show = show
+        self.segment = segment
+        self.started = None
+        self.downCurQuality = downCurQuality
+
+    def run(self):
+        generic_queue.QueueItem.run(self)
+
+        try:
+            logger.log("Beginning manual search for: [" + self.segment.prettyName() + "]")
+            self.started = True
+            
+            searchResult = search.searchProviders(self.show, [self.segment], True, self.downCurQuality)
+
+            if searchResult:
+                # just use the first result for now
+                logger.log(u"Downloading " + searchResult[0].name + " from " + searchResult[0].provider.name)
+                self.success = search.snatchEpisode(searchResult[0])
+
+                # give the CPU a break
+                time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
+
+            else:
+                ui.notifications.message('No downloads were found',
+                                         "Couldn't find a download for <i>%s</i>" % self.segment.prettyName())
+
+                logger.log(u"Unable to find a download for: [" + self.segment.prettyName() + "]")
+
+        except Exception:
+            logger.log(traceback.format_exc(), logger.DEBUG)
+        
+        ### Keep a list with the 100 last executed searches
+        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
+        
+        if self.success is None:
+            self.success = False
+
+        self.finish()
+
+
+class BacklogQueueItem(generic_queue.QueueItem):
+    def __init__(self, show, segment):
+        generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH)
+        self.priority = generic_queue.QueuePriorities.LOW
+        self.name = 'BACKLOG-' + str(show.indexerid)
+        self.success = None
+        self.show = show
+        self.segment = segment
+
+    def run(self):
+        generic_queue.QueueItem.run(self)
+
+        try:
+            logger.log("Beginning backlog search for: [" + self.show.name + "]")
+            searchResult = search.searchProviders(self.show, self.segment, False)
+
+            if searchResult:
+                for result in searchResult:
+                    # just use the first result for now
+                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
+                    search.snatchEpisode(result)
+
+                    # give the CPU a break
+                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
+            else:
+                logger.log(u"No needed episodes found during backlog search for: [" + self.show.name + "]")
+        except Exception:
+            logger.log(traceback.format_exc(), logger.DEBUG)
+
+        self.finish()
+
+
+class FailedQueueItem(generic_queue.QueueItem):
+    def __init__(self, show, segment, downCurQuality=False):
+        generic_queue.QueueItem.__init__(self, 'Retry', FAILED_SEARCH)
+        self.priority = generic_queue.QueuePriorities.HIGH
+        self.name = 'RETRY-' + str(show.indexerid)
+        self.show = show
+        self.segment = segment
+        self.success = None
+        self.started = None
+        self.downCurQuality = downCurQuality
+
+    def run(self):
+        generic_queue.QueueItem.run(self)
+        self.started = True
+        
+        try:
+            for epObj in self.segment:
+            
+                logger.log(u"Marking episode as bad: [" + epObj.prettyName() + "]")
+                
+                failed_history.markFailed(epObj)
+    
+                (release, provider) = failed_history.findRelease(epObj)
+                if release:
+                    failed_history.logFailed(release)
+                    history.logFailed(epObj, release, provider)
+    
+                failed_history.revertEpisode(epObj)
+                logger.log("Beginning failed download search for: [" + epObj.prettyName() + "]")
+
+            searchResult = search.searchProviders(self.show, self.segment, True, self.downCurQuality)
+
+            if searchResult:
+                for result in searchResult:
+                    # just use the first result for now
+                    logger.log(u"Downloading " + result.name + " from " + result.provider.name)
+                    search.snatchEpisode(result)
+
+                    # give the CPU a break
+                    time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
+            else:
+                pass
+                #logger.log(u"No valid episode found to retry for: [" + self.segment.prettyName() + "]")
+        except Exception:
+            logger.log(traceback.format_exc(), logger.DEBUG)
+            
+        ### Keep a list with the 100 last executed searches
+        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
+
+        if self.success is None:
+            self.success = False
+
+        self.finish()
+        
+def fifo(myList, item, maxSize = 100):
+    if len(myList) >= maxSize:
+        myList.pop(0)
+    myList.append(item)
diff --git a/sickbeard/showUpdater.py b/sickbeard/showUpdater.py
index 669974e4d381a44d7b198f624c51802a859a2f03..a5dd91550afc7e3e46dcbc20acd76b68c0813ea4 100644
--- a/sickbeard/showUpdater.py
+++ b/sickbeard/showUpdater.py
@@ -77,7 +77,10 @@ class ShowUpdater():
 
                 # if should_update returns True (not 'Ended') or show is selected stale 'Ended' then update, otherwise just refresh
                 if curShow.should_update(update_date=update_date) or curShow.indexerid in stale_should_update:
-                    curQueueItem = sickbeard.showQueueScheduler.action.updateShow(curShow, True)  # @UndefinedVariable
+                    try:
+                        curQueueItem = sickbeard.showQueueScheduler.action.updateShow(curShow, True)  # @UndefinedVariable
+                    except exceptions.CantUpdateException as e:
+                        logger.log("Unable to update show: {0}".format(str(e)),logger.DEBUG)
                 else:
                     logger.log(
                         u"Not updating episodes for show " + curShow.name + " because it's marked as ended and last/next episode is not within the grace period.",
diff --git a/sickbeard/show_queue.py b/sickbeard/show_queue.py
index ac3fb3315e4fee1ec5f1a7b2f980f389b3586070..1c094925c91f637a137a9ed40af9711918f1b9f0 100644
--- a/sickbeard/show_queue.py
+++ b/sickbeard/show_queue.py
@@ -78,13 +78,16 @@ class ShowQueue(generic_queue.GenericQueue):
     def updateShow(self, show, force=False):
 
         if self.isBeingAdded(show):
-            logger.log(str(show.name) + u" is still being added, wait until it is finished before you update.",logger.DEBUG)
+            raise exceptions.CantUpdateException(
+                str(show.name) + u" is still being added, wait until it is finished before you update.")
 
         if self.isBeingUpdated(show):
-            logger.log(str(show.name) + u" is already being updated by Post-processor or manually started, can't update again until it's done.",logger.DEBUG)
+            raise exceptions.CantUpdateException(
+                str(show.name) + u" is already being updated by Post-processor or manually started, can't update again until it's done.")
 
         if self.isInUpdateQueue(show):
-            logger.log(str(show.name) + u" is in process of being updated by Post-processor or manually started, can't update again until it's done.",logger.DEBUG)
+            raise exceptions.CantUpdateException(
+                str(show.name) + u" is in process of being updated by Post-processor or manually started, can't update again until it's done.")
 
         if not force:
             queueItemObj = QueueItemUpdate(show)
@@ -102,7 +105,7 @@ class ShowQueue(generic_queue.GenericQueue):
 
         if (self.isBeingUpdated(show) or self.isInUpdateQueue(show)) and not force:
             logger.log(
-                u"A refresh was attempted but there is already an update queued or in progress. Since updates do a refres at the end anyway I'm skipping this request.",
+                u"A refresh was attempted but there is already an update queued or in progress. Since updates do a refresh at the end anyway I'm skipping this request.",
                 logger.DEBUG)
             return
 
@@ -129,7 +132,11 @@ class ShowQueue(generic_queue.GenericQueue):
         return queueItemObj
 
     def addShow(self, indexer, indexer_id, showDir, default_status=None, quality=None, flatten_folders=None,
-                lang="en", subtitles=None, anime=None, scene=None, paused=None):
+                lang=None, subtitles=None, anime=None, scene=None, paused=None):
+
+        if lang is None:
+            lang = sickbeard.INDEXER_DEFAULT_LANGUAGE
+
         queueItemObj = QueueItemAdd(indexer, indexer_id, showDir, default_status, quality, flatten_folders, lang,
                                     subtitles, anime, scene, paused)
 
@@ -202,6 +209,9 @@ class QueueItemAdd(ShowQueueItem):
         self.scene = scene
         self.paused = paused
 
+        if sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT:
+            self.paused = sickbeard.TRAKT_ROLLING_ADD_PAUSED
+
         self.show = None
 
         # this will initialize self.show to None
@@ -364,6 +374,8 @@ class QueueItemAdd(ShowQueueItem):
             logger.log(u"Error searching dir for episodes: " + ex(e), logger.ERROR)
             logger.log(traceback.format_exc(), logger.DEBUG)
 
+        sickbeard.traktRollingScheduler.action.updateWantedList(self.show.indexerid)
+
         # if they set default ep status to WANTED then run the backlog to search for episodes
         if self.show.default_ep_status == WANTED:
             logger.log(u"Launching backlog for this show since its episodes are WANTED")
diff --git a/sickbeard/traktChecker.py b/sickbeard/traktChecker.py
index 59761e5449df2b1e127f41e5b6b13992112e61b0..bab65b89781fe600c2b7f926f17acb57c3818568 100644
--- a/sickbeard/traktChecker.py
+++ b/sickbeard/traktChecker.py
@@ -32,7 +32,32 @@ from sickbeard import notifiers
 from sickbeard.common import SNATCHED, SNATCHED_PROPER, DOWNLOADED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED, UNKNOWN, FAILED
 from common import Quality, qualityPresetStrings, statusStrings
 from lib.trakt import *
-from trakt.exceptions import traktException, traktAuthException, traktServerBusy
+from trakt.exceptions import traktException
+
+
+def setEpisodeToWanted(show, s, e):
+    """
+    Sets an episode to wanted, only is it is currently skipped
+    """
+    epObj = show.getEpisode(int(s), int(e))
+    if epObj:
+
+        with epObj.lock:
+            if epObj.status != SKIPPED or epObj.airdate == datetime.date.fromordinal(1):
+                return
+
+            logger.log(u"Setting episode s" + str(s) + "e" + str(e) + " of show " + show.name + " to wanted")
+            # figure out what segment the episode is in and remember it so we can backlog it
+
+            epObj.status = WANTED
+            epObj.saveToDB()
+
+        cur_backlog_queue_item = search_queue.BacklogQueueItem(show, [epObj])
+        sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item)
+
+        logger.log(u"Starting backlog for " + show.name + " season " + str(
+                s) + " episode " + str(e) + " because some eps were set to wanted")
+
 
 class TraktChecker():
 
@@ -45,7 +70,7 @@ class TraktChecker():
 
     def run(self, force=False):
         if not sickbeard.USE_TRAKT:
-            logger.log(u"Trakt integrazione disabled, quit", logger.DEBUG)
+            logger.log(u"Trakt integration disabled, quit", logger.DEBUG)
             return
 
         # add shows from trakt.tv watchlist
@@ -82,7 +107,7 @@ class TraktChecker():
                 return
 
             traktShow = filter(lambda x: int(indexerid) in [int(x['show']['ids']['tvdb'] or 0), int(x['show']['ids']['tvrage'] or 0)], library)
-        except (traktException, traktAuthException, traktServerBusy) as e:
+        except traktException as e:
             logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
 
         return traktShow
@@ -115,7 +140,7 @@ class TraktChecker():
             logger.log(u"Removing " + show_obj.name + " from trakt.tv library", logger.DEBUG)
             try:
                 self.trakt_api.traktRequest("sync/collection/remove", data, method='POST')
-            except (traktException, traktAuthException, traktServerBusy) as e:
+            except traktException as e:
                 logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
                 pass
 
@@ -150,7 +175,7 @@ class TraktChecker():
 
             try:
                 self.trakt_api.traktRequest("sync/collection", data, method='POST')
-            except (traktException, traktAuthException, traktServerBusy) as e:
+            except traktException as e:
                 logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
                 return
 
@@ -266,7 +291,7 @@ class TraktChecker():
             if int(sickbeard.TRAKT_METHOD_ADD) == 1:
                 newShow = helpers.findCertainShow(sickbeard.showList, indexer_id)
                 if newShow is not None:
-                    self.setEpisodeToWanted(newShow, 1, 1)
+                    setEpisodeToWanted(newShow, 1, 1)
                 else:
                     self.todoWanted.append((indexer_id, 1, 1))
 
@@ -297,7 +322,7 @@ class TraktChecker():
                     self.todoWanted.append((indexer_id, show['episode']['season'], show['episode']['number']))
                 else:
                     if newShow.indexer == indexer:
-                        self.setEpisodeToWanted(newShow, show['episode']['season'], show['episode']['number'])
+                        setEpisodeToWanted(newShow, show['episode']['season'], show['episode']['number'])
             except TypeError:
                 logger.log(u"Could not parse the output from trakt for " + show["show"]["title"], logger.DEBUG)
 
@@ -331,35 +356,32 @@ class TraktChecker():
                 logger.log(u"There was an error creating the show, no root directory setting found", logger.ERROR)
                 return
 
-    def setEpisodeToWanted(self, show, s, e):
-        """
-        Sets an episode to wanted, only is it is currently skipped
-        """
-        epObj = show.getEpisode(int(s), int(e))
-        if epObj:
+    def manageNewShow(self, show):
+        logger.log(u"Checking if trakt watch list wants to search for episodes from new show " + show.name, logger.DEBUG)
+        episodes = [i for i in self.todoWanted if i[0] == show.indexerid]
+        for episode in episodes:
+            self.todoWanted.remove(episode)
+            setEpisodeToWanted(show, episode[1], episode[2])
 
-            with epObj.lock:
-                if epObj.status != SKIPPED or epObj.airdate == datetime.date.fromordinal(1):
-                    return
+    def _getShowWatchlist(self):
 
-                logger.log(u"Setting episode s" + str(s) + "e" + str(e) + " of show " + show.name + " to wanted")
-                # figure out what segment the episode is in and remember it so we can backlog it
+        try:
+            self.ShowWatchlist = self.trakt_api.traktRequest("sync/watchlist/shows")
+        except traktException as e:
+            logger.log(u"Could not connect to trakt service, cannot download Show Watchlist: %s" % ex(e), logger.ERROR)
+            return False
 
-                epObj.status = WANTED
-                epObj.saveToDB()
+        return True
 
-            cur_backlog_queue_item = search_queue.BacklogQueueItem(show, [epObj])
-            sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item)
+    def _getEpisodeWatchlist(self):
 
-            logger.log(u"Starting backlog for " + show.name + " season " + str(
-                    s) + " episode " + str(e) + " because some eps were set to wanted")
+        try:
+            self.EpisodeWatchlist = self.trakt_api.traktRequest("sync/watchlist/episodes")
+        except traktException as e:
+            logger.log(u"Could not connect to trakt service, cannot download Episode Watchlist: %s" % ex(e), logger.WARNING)
+            return False
 
-    def manageNewShow(self, show):
-        logger.log(u"Checking if trakt watch list wants to search for episodes from new show " + show.name, logger.DEBUG)
-        episodes = [i for i in self.todoWanted if i[0] == show.indexerid]
-        for episode in episodes:
-            self.todoWanted.remove(episode)
-            self.setEpisodeToWanted(show, episode[1], episode[2])
+        return True
 
     def check_watchlist (self, show_obj, season=None, episode=None):
 
@@ -386,22 +408,177 @@ class TraktChecker():
 
         return found
 
-    def _getShowWatchlist(self):
+class TraktRolling():
+
+    def __init__(self):
+        self.trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
+        self.EpisodeWatched = []
+
+    def run(self, force=False):
+        if not (sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT):
+            return
+
+        logger.log(u"Start getting list from Traktv", logger.DEBUG)
+
+        logger.log(u"Getting EpisodeWatched", logger.DEBUG)
+        if not self._getEpisodeWatched():
+            return
+
+        self.updateWantedList()
+
+    def _getEpisodeWatched(self):
 
         try:
-            self.ShowWatchlist = self.trakt_api.traktRequest("sync/watchlist/shows")
-        except (traktException, traktAuthException, traktServerBusy) as e:
-            logger.log(u"Could not connect to trakt service, cannot download Show Watchlist: %s" % ex(e), logger.ERROR)
+            self.EpisodeWatched = self.trakt_api.traktRequest("sync/watched/shows")
+        except traktException as e:
+            logger.log(u"Could not connect to trakt service, cannot download show from library: %s" % ex(e), logger.ERROR)
             return False
 
         return True
 
-    def _getEpisodeWatchlist(self):
+    def refreshEpisodeWatched(self):
 
-        try:
-            self.EpisodeWatchlist = self.trakt_api.traktRequest("sync/watchlist/episodes")
-        except (traktException, traktAuthException, traktServerBusy) as e:
-            logger.log(u"Could not connect to trakt service, cannot download Episode Watchlist: %s" % ex(e), logger.WARNING)
+       if not (sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT):
+           return False
+
+       if not self._getEpisodeWatched():
+           return False
+
+       return True
+
+    def updateWantedList(self, indexer_id = None):
+
+        if not (sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT):
             return False
 
+        if not self.refreshEpisodeWatched():
+            return False
+
+        num_of_download = sickbeard.TRAKT_ROLLING_NUM_EP
+
+        if not len(self.EpisodeWatched) or num_of_download == 0:
+            return True
+
+        logger.log(u"Start looking if having " + str(num_of_download) + " episode(s) not watched", logger.DEBUG)
+
+        myDB = db.DBConnection()
+
+        sql_selection="SELECT indexer, indexer_id, imdb_id, show_name, season, episode, paused FROM (SELECT * FROM tv_shows s,tv_episodes e WHERE s.indexer_id = e.showid) T1 WHERE T1.episode_id IN (SELECT T2.episode_id FROM tv_episodes T2 WHERE T2.showid = T1.indexer_id and T2.status in (?) and T2.season!=0 and airdate is not null ORDER BY T2.season,T2.episode LIMIT 1)"
+
+        if indexer_id is not None:
+            sql_selection=sql_selection + " and indexer_id = " + str(indexer_id)
+        else:
+            sql_selection=sql_selection + " and T1.paused = 0"
+
+	    sql_selection=sql_selection + " ORDER BY T1.show_name,season,episode"
+
+        results = myDB.select(sql_selection,[SKIPPED])
+
+        for cur_result in results:
+
+            indexer_id = str(cur_result["indexer_id"])
+            show_name = (cur_result["show_name"])
+            sn_sb = cur_result["season"]
+            ep_sb = cur_result["episode"]
+
+            newShow = helpers.findCertainShow(sickbeard.showList, int(indexer_id))
+            imdb_id = cur_result["imdb_id"]
+
+            num_of_ep=0
+            season = 1
+            episode = 0
+
+            last_per_season = self.trakt_api.traktRequest("shows/" + str(imdb_id) + "/seasons?extended=full")
+            if not last_per_season:
+                logger.log(u"Could not connect to trakt service, cannot download last season for show", logger.ERROR)
+                return False
+
+            logger.log(u"indexer_id: " + str(indexer_id) + ", Show: " + show_name + " - First skipped Episode: Season " + str(sn_sb) + ", Episode " + str(ep_sb), logger.DEBUG)
+
+            if imdb_id not in (show['show']['ids']['imdb'] for show in self.EpisodeWatched):
+                logger.log(u"Show not founded in Watched list", logger.DEBUG)
+                if (sn_sb*100+ep_sb) > 100+num_of_download:
+                    logger.log(u"First " + str(num_of_download) + " episode already downloaded", logger.DEBUG)
+                    continue
+                else:
+                    sn_sb = 1
+                    ep_sb = 1
+                    num_of_ep = num_of_download
+            else:
+                logger.log(u"Show founded in Watched list", logger.DEBUG)
+
+                show_watched = [show for show in self.EpisodeWatched if show['show']['ids']['imdb'] == imdb_id]
+
+                season = show_watched[0]['seasons'][-1]['number']
+                episode = show_watched[0]['seasons'][-1]['episodes'][-1]['number']
+                logger.log(u"Last watched, Season: " + str(season) + " - Episode: " + str(episode), logger.DEBUG)
+
+                num_of_ep = num_of_download - (self._num_ep_for_season(last_per_season, sn_sb, ep_sb) - self._num_ep_for_season(last_per_season, season, episode)) + 1
+
+            logger.log(u"Number of Episode to Download: " + str(num_of_ep), logger.DEBUG)
+
+            s = sn_sb
+            e = ep_sb
+
+            for x in range(0,num_of_ep):
+
+                last_s = [last_x_s for last_x_s in last_per_season if last_x_s['number'] == s]
+                if last_s is None:
+                    break
+                if episode == 0 or (s*100+e) <= (int(last_s[0]['number'])*100+int(last_s[0]['episode_count'])): 
+
+                    if (s*100+e) > (season*100+episode):
+                        if not cur_result["paused"]:
+                            if newShow is not None:
+                                setEpisodeToWanted(newShow, s, e)
+                            else:
+                                self.todoWanted.append(int(indexer_id), s, e)
+                    else:
+                        self.setEpisodeToDefaultWatched(newShow, s, e)
+
+                    if (s*100+e) == (int(last_s[0]['number'])*100+int(last_s[0]['episode_count'])):
+                        s = s + 1
+                        e = 1
+                    else:
+                        e = e + 1
+
+        logger.log(u"Stop looking if having " + str(num_of_download) + " episode not watched", logger.DEBUG)
         return True
+
+    def setEpisodeToDefaultWatched(self, show, s, e):
+        """
+        Sets an episode to ignored, only if it is currently skipped or failed
+        """
+        epObj = show.getEpisode(int(s), int(e))
+        if epObj:
+
+            with epObj.lock:
+                if epObj.status != SKIPPED:
+                    return
+
+                logger.log(u"Setting episode s" + str(s) + "e" + str(e) + " of show " + show.name + " to wanted")
+                # figure out what segment the episode is in and remember it so we can backlog it
+
+                epObj.status = sickbeard.TRAKT_ROLLING_DEFAULT_WATCHED_STATUS
+                epObj.saveToDB()
+
+    def _num_ep_for_season(self, show, season, episode):
+
+        num_ep = 0
+
+        for curSeason in show:
+
+            sn = int(curSeason["number"])
+            ep = int(curSeason["episode_count"])
+
+            if (sn < season):
+                num_ep = num_ep + (ep)
+            elif (sn == season):
+                num_ep = num_ep + episode
+            elif (sn == 0):
+                continue
+            else:
+                continue
+
+        return num_ep
+
diff --git a/sickbeard/tv.py b/sickbeard/tv.py
index 4b138b8648a30ad7efd58cd0add9c583557a9359..58a6bdbd534427ab8eb0f22e707df41347a503db 100644
--- a/sickbeard/tv.py
+++ b/sickbeard/tv.py
@@ -1082,12 +1082,12 @@ class TVShow(object):
                 # check if downloaded files still exist, update our data if this has changed
                 if not sickbeard.SKIP_REMOVED_FILES:
                     with curEp.lock:
-                        # if it used to have a file associated with it and it doesn't anymore then set it to IGNORED
+                        # if it used to have a file associated with it and it doesn't anymore then set it to ARCHIVED
                         if curEp.location and curEp.status in Quality.DOWNLOADED:
                             logger.log(str(self.indexerid) + u": Location for " + str(season) + "x" + str(
-                                episode) + " doesn't exist, removing it and changing our status to IGNORED",
+                                episode) + " doesn't exist, removing it and changing our status to ARCHIVED",
                                        logger.DEBUG)
-                            curEp.status = IGNORED
+                            curEp.status = ARCHIVED
                             curEp.subtitles = list()
                             curEp.subtitles_searchcount = 0
                             curEp.subtitles_lastsearch = str(datetime.datetime.min)
diff --git a/sickbeard/versionChecker.py b/sickbeard/versionChecker.py
index d32c778203b454415841e6b7ccfd62c8e7f41960..4ce4fdb1b48431d5cc6fb87988103cf41ee22e11 100644
--- a/sickbeard/versionChecker.py
+++ b/sickbeard/versionChecker.py
@@ -1,786 +1,786 @@
-# Author: Nic Wolfe <nic@wolfeden.ca>
-# URL: http://code.google.com/p/sickbeard/
-#
-# This file is part of SickRage.
-#
-# SickRage is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# SickRage is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import platform
-import subprocess
-import re
-import urllib
-import tarfile
-import stat
-import traceback
-import db
-import time
-
-import sickbeard
-from sickbeard import notifiers
-from sickbeard import ui
-from sickbeard import logger, helpers
-from sickbeard.exceptions import ex
-from sickbeard import encodingKludge as ek
-from lib import requests
-from lib.requests.exceptions import RequestException
-
-import shutil
-import lib.shutil_custom
-
-shutil.copyfile = lib.shutil_custom.copyfile_custom
-
-
-class CheckVersion():
-    """
-    Version check class meant to run as a thread object with the sr scheduler.
-    """
-
-    def __init__(self):
-        self.updater = None
-        self.install_type = None        
-
-        if sickbeard.gh:
-            self.install_type = self.find_install_type()
-            if self.install_type == 'git':
-                self.updater = GitUpdateManager()
-            elif self.install_type == 'source':
-                self.updater = SourceUpdateManager()
-
-    def run(self, force=False):
-
-        if self.updater:
-            # set current branch version
-            sickbeard.BRANCH = self.get_branch()
-
-            if self.check_for_new_version(force):
-                if sickbeard.AUTO_UPDATE:
-                    logger.log(u"New update found for SickRage, starting auto-updater ...")
-                    ui.notifications.message('New update found for SickRage, starting auto-updater')
-                    if self.safe_to_update() == True and self._runbackup() == True:
-                        if sickbeard.versionCheckScheduler.action.update():
-                            logger.log(u"Update was successful!")
-                            ui.notifications.message('Update was successful')
-                            sickbeard.events.put(sickbeard.events.SystemEvent.RESTART)
-                        else:
-                            logger.log(u"Update failed!")
-                            ui.notifications.message('Update failed!')
-
-    def _runbackup(self):
-        # Do a system backup before update
-        logger.log(u"Config backup in progress...")
-        ui.notifications.message('Backup', 'Config backup in progress...')
-        try:
-            backupDir = os.path.join(sickbeard.DATA_DIR, 'backup')
-            if not os.path.isdir(backupDir):
-                os.mkdir(backupDir)
-    
-            if self._keeplatestbackup(backupDir) == True and self._backup(backupDir) == True:
-                logger.log(u"Config backup successful, updating...")
-                ui.notifications.message('Backup', 'Config backup successful, updating...')
-                return True
-            else:
-                logger.log(u"Config backup failed, aborting update",logger.ERROR)
-                ui.notifications.message('Backup', 'Config backup failed, aborting update')
-                return False
-        except Exception as e:
-            logger.log('Update: Config backup failed. Error: {0}'.format(ex(e)),logger.ERROR)
-            ui.notifications.message('Backup', 'Config backup failed, aborting update')
-            return False
-
-    def _keeplatestbackup(self,backupDir=None):
-        if backupDir:
-            import glob
-            files = glob.glob(os.path.join(backupDir,'*.zip'))
-            if not files:
-                return True
-            now = time.time()
-            newest = files[0], now - os.path.getctime(files[0])
-            for file in files[1:]:
-                age = now - os.path.getctime(file)
-                if age < newest[1]:
-                    newest = file, age
-            files.remove(newest[0])
-            
-            for file in files:
-                os.remove(file)
-            return True
-        else:
-            return False
-    
-    # TODO: Merge with backup in helpers
-    def _backup(self,backupDir=None):
-        if backupDir:
-            source = [os.path.join(sickbeard.DATA_DIR, 'sickbeard.db'), sickbeard.CONFIG_FILE]
-            source.append(os.path.join(sickbeard.DATA_DIR, 'failed.db'))
-            source.append(os.path.join(sickbeard.DATA_DIR, 'cache.db'))
-            target = os.path.join(backupDir, 'sickrage-' + time.strftime('%Y%m%d%H%M%S') + '.zip')
-
-            for (path, dirs, files) in os.walk(sickbeard.CACHE_DIR, topdown=True):
-                for dirname in dirs:
-                    if path == sickbeard.CACHE_DIR and dirname not in ['images']:
-                        dirs.remove(dirname)
-                for filename in files:
-                    source.append(os.path.join(path, filename))
-
-            if helpers.backupConfigZip(source, target, sickbeard.DATA_DIR):
-                return True
-            else:
-                return False
-        else:
-            return False
-
-    def safe_to_update(self):
-
-        def db_safe(self):
-            try:
-                result = self.getDBcompare(sickbeard.BRANCH)
-                if result == 'equal':
-                    logger.log(u"We can proceed with the update. New update has same DB version", logger.DEBUG)
-                    return True
-                elif result == 'upgrade':
-                    logger.log(u"We can't proceed with the update. New update has a new DB version. Please manually update", logger.WARNING)
-                    return False
-                elif result == 'downgrade':
-                    logger.log(u"We can't proceed with the update. New update has a old DB version. It's not possible to downgrade", logger.ERROR)
-                    return False
-                else:
-                    logger.log(u"We can't proceed with the update. Unable to check remote DB version", logger.ERROR)
-                    return False
-            except:
-                logger.log(u"We can't proceed with the update. Unable to compare DB version", logger.ERROR)
-                return False
-        
-        def postprocessor_safe(self):
-            if not sickbeard.autoPostProcesserScheduler.action.amActive:
-                logger.log(u"We can proceed with the update. Post-Processor is not running", logger.DEBUG)
-                return True
-            else:
-                logger.log(u"We can't proceed with the update. Post-Processor is running", logger.DEBUG)
-                return False
-        
-        def showupdate_safe(self):
-            if not sickbeard.showUpdateScheduler.action.amActive:
-                logger.log(u"We can proceed with the update. Shows are not being updated", logger.DEBUG)
-                return True
-            else:
-                logger.log(u"We can't proceed with the update. Shows are being updated", logger.DEBUG)
-                return False
-
-        db_safe = db_safe(self)
-        postprocessor_safe = postprocessor_safe(self)
-        showupdate_safe = showupdate_safe(self)
-
-        if db_safe == True and postprocessor_safe == True and showupdate_safe == True:
-            logger.log(u"Proceeding with auto update", logger.DEBUG)
-            return True
-        else:
-            logger.log(u"Auto update aborted", logger.DEBUG)
-            return False
-
-    def getDBcompare(self, branchDest):
-        try:
-            response = requests.get("https://raw.githubusercontent.com/SICKRAGETV/SickRage/" + str(branchDest) +"/sickbeard/databases/mainDB.py", verify=False)
-            response.raise_for_status()
-            match = re.search(r"MAX_DB_VERSION\s=\s(?P<version>\d{2,3})",response.text)
-            branchDestDBversion = int(match.group('version'))
-            myDB = db.DBConnection()
-            branchCurrDBversion = myDB.checkDBVersion()
-            if branchDestDBversion > branchCurrDBversion:
-                return 'upgrade'
-            elif branchDestDBversion == branchCurrDBversion:
-                return 'equal'
-            else:
-                return 'downgrade'
-        except RequestException as e:
-            return 'error'
-        except Exception as e:
-            return 'error'
-
-    def find_install_type(self):
-        """
-        Determines how this copy of sr was installed.
-
-        returns: type of installation. Possible values are:
-            'win': any compiled windows build
-            'git': running from source using git
-            'source': running from source without git
-        """
-
-        # check if we're a windows build
-        if sickbeard.BRANCH.startswith('build '):
-            install_type = 'win'
-        elif os.path.isdir(ek.ek(os.path.join, sickbeard.PROG_DIR, u'.git')):
-            install_type = 'git'
-        else:
-            install_type = 'source'
-
-        return install_type
-
-    def check_for_new_version(self, force=False):
-        """
-        Checks the internet for a newer version.
-
-        returns: bool, True for new version or False for no new version.
-
-        force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
-        """
-
-        if not self.updater or not sickbeard.VERSION_NOTIFY and not sickbeard.AUTO_UPDATE and not force:
-            logger.log(u"Version checking is disabled, not checking for the newest version")
-            return False
-
-        # checking for updates
-        if not sickbeard.AUTO_UPDATE:
-            logger.log(u"Checking for updates using " + self.install_type.upper())
-
-        if not self.updater.need_update():
-            sickbeard.NEWEST_VERSION_STRING = None
-
-            if force:
-                ui.notifications.message('No update needed')
-                logger.log(u"No update needed")
-
-            # no updates needed
-            return False
-
-        # found updates
-        self.updater.set_newest_text()
-        return True
-
-    def update(self):
-        if self.updater:
-            # update branch with current config branch value
-            self.updater.branch = sickbeard.BRANCH
-
-            # check for updates
-            if self.updater.need_update():
-                return self.updater.update()
-
-    def list_remote_branches(self):
-        if self.updater:
-            return self.updater.list_remote_branches()
-
-    def get_branch(self):
-        if self.updater:
-            return self.updater.branch
-
-
-class UpdateManager():
-    def get_github_org(self):
-        return sickbeard.GIT_ORG
-
-    def get_github_repo(self):
-        return sickbeard.GIT_REPO
-
-    def get_update_url(self):
-        return sickbeard.WEB_ROOT + "/home/update/?pid=" + str(sickbeard.PID)
-
-class GitUpdateManager(UpdateManager):
-    def __init__(self):
-        self._git_path = self._find_working_git()
-        self.github_org = self.get_github_org()
-        self.github_repo = self.get_github_repo()
-
-        self.branch = sickbeard.BRANCH
-        if sickbeard.BRANCH == '':
-            self.branch = self._find_installed_branch()
-
-        self._cur_commit_hash = None
-        self._newest_commit_hash = None
-        self._num_commits_behind = 0
-        self._num_commits_ahead = 0
-
-    def _git_error(self):
-        error_message = 'Unable to find your git executable - Shutdown SickRage and EITHER set git_path in your config.ini OR delete your .git folder and run from source to enable updates.'
-        sickbeard.NEWEST_VERSION_STRING = error_message
-
-    def _find_working_git(self):
-        test_cmd = 'version'
-
-        if sickbeard.GIT_PATH:
-            main_git = '"' + sickbeard.GIT_PATH + '"'
-        else:
-            main_git = 'git'
-
-        logger.log(u"Checking if we can use git commands: " + main_git + ' ' + test_cmd, logger.DEBUG)
-        output, err, exit_status = self._run_git(main_git, test_cmd)
-
-        if exit_status == 0:
-            logger.log(u"Using: " + main_git, logger.DEBUG)
-            return main_git
-        else:
-            logger.log(u"Not using: " + main_git, logger.DEBUG)
-
-        # trying alternatives
-
-
-        alternative_git = []
-
-        # osx people who start sr from launchd have a broken path, so try a hail-mary attempt for them
-        if platform.system().lower() == 'darwin':
-            alternative_git.append('/usr/local/git/bin/git')
-
-        if platform.system().lower() == 'windows':
-            if main_git != main_git.lower():
-                alternative_git.append(main_git.lower())
-
-        if alternative_git:
-            logger.log(u"Trying known alternative git locations", logger.DEBUG)
-
-            for cur_git in alternative_git:
-                logger.log(u"Checking if we can use git commands: " + cur_git + ' ' + test_cmd, logger.DEBUG)
-                output, err, exit_status = self._run_git(cur_git, test_cmd)
-
-                if exit_status == 0:
-                    logger.log(u"Using: " + cur_git, logger.DEBUG)
-                    return cur_git
-                else:
-                    logger.log(u"Not using: " + cur_git, logger.DEBUG)
-
-        # Still haven't found a working git
-        error_message = 'Unable to find your git executable - Shutdown SickRage and EITHER set git_path in your config.ini OR delete your .git folder and run from source to enable updates.'
-        sickbeard.NEWEST_VERSION_STRING = error_message
-
-        return None
-
-    def _run_git(self, git_path, args):
-
-        output = err = exit_status = None
-
-        if not git_path:
-            logger.log(u"No git specified, can't use git commands", logger.ERROR)
-            exit_status = 1
-            return (output, err, exit_status)
-
-        cmd = git_path + ' ' + args
-
-        try:
-            logger.log(u"Executing " + cmd + " with your shell in " + sickbeard.PROG_DIR, logger.DEBUG)
-            p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
-                                 shell=True, cwd=sickbeard.PROG_DIR)
-            output, err = p.communicate()
-            exit_status = p.returncode
-
-            if output:
-                output = output.strip()
-
-
-        except OSError:
-            logger.log(u"Command " + cmd + " didn't work")
-            exit_status = 1
-
-        if exit_status == 0:
-            logger.log(cmd + u" : returned successful", logger.DEBUG)
-            exit_status = 0
-
-        elif exit_status == 1:
-            if 'stash' in output:
-                logger.log(u"Please enable 'git reset' in settings or stash your changes in local files",logger.WARNING)
-            else:
-                logger.log(cmd + u" returned : " + str(output), logger.ERROR)
-            exit_status = 1
-
-        elif exit_status == 128 or 'fatal:' in output or err:
-            logger.log(cmd + u" returned : " + str(output), logger.ERROR)
-            exit_status = 128
-
-        else:
-            logger.log(cmd + u" returned : " + str(output) + u", treat as error for now", logger.ERROR)
-            exit_status = 1
-
-        return (output, err, exit_status)
-
-    def _find_installed_version(self):
-        """
-        Attempts to find the currently installed version of SickRage.
-
-        Uses git show to get commit version.
-
-        Returns: True for success or False for failure
-        """
-
-        output, err, exit_status = self._run_git(self._git_path, 'rev-parse HEAD')  # @UnusedVariable
-
-        if exit_status == 0 and output:
-            cur_commit_hash = output.strip()
-            if not re.match('^[a-z0-9]+$', cur_commit_hash):
-                logger.log(u"Output doesn't look like a hash, not using it", logger.ERROR)
-                return False
-            self._cur_commit_hash = cur_commit_hash
-            sickbeard.CUR_COMMIT_HASH = str(cur_commit_hash)
-            return True
-        else:
-            return False
-
-    def _find_installed_branch(self):
-        branch_info, err, exit_status = self._run_git(self._git_path, 'symbolic-ref -q HEAD')  # @UnusedVariable
-        if exit_status == 0 and branch_info:
-            branch = branch_info.strip().replace('refs/heads/', '', 1)
-            if branch:
-                return branch
-                
-        return ""
-        
-    def _check_github_for_update(self):
-        """
-        Uses git commands to check if there is a newer version that the provided
-        commit hash. If there is a newer version it sets _num_commits_behind.
-        """
-
-        self._num_commits_behind = 0
-        self._num_commits_ahead = 0
-
-        # update remote origin url
-        self.update_remote_origin()
-
-        # get all new info from github
-        output, err, exit_status = self._run_git(self._git_path, 'fetch %s' % sickbeard.GIT_REMOTE)
-
-        if not exit_status == 0:
-            logger.log(u"Unable to contact github, can't check for update", logger.ERROR)
-            return
-
-        # get latest commit_hash from remote
-        output, err, exit_status = self._run_git(self._git_path, 'rev-parse --verify --quiet "@{upstream}"')
-
-        if exit_status == 0 and output:
-            cur_commit_hash = output.strip()
-
-            if not re.match('^[a-z0-9]+$', cur_commit_hash):
-                logger.log(u"Output doesn't look like a hash, not using it", logger.DEBUG)
-                return
-
-            else:
-                self._newest_commit_hash = cur_commit_hash
-        else:
-            logger.log(u"git didn't return newest commit hash", logger.DEBUG)
-            return
-
-        # get number of commits behind and ahead (option --count not supported git < 1.7.2)
-        output, err, exit_status = self._run_git(self._git_path, 'rev-list --left-right "@{upstream}"...HEAD')
-
-        if exit_status == 0 and output:
-
-            try:
-                self._num_commits_behind = int(output.count("<"))
-                self._num_commits_ahead = int(output.count(">"))
-
-            except:
-                logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG)
-                return
-
-        logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash)
-                   + u", num_commits_behind = " + str(self._num_commits_behind) + u", num_commits_ahead = " + str(
-            self._num_commits_ahead), logger.DEBUG)
-
-    def set_newest_text(self):
-
-        # if we're up to date then don't set this
-        sickbeard.NEWEST_VERSION_STRING = None
-
-        if self._num_commits_ahead:
-            logger.log(u"Local branch is ahead of " + self.branch + ". Automatic update not possible.", logger.ERROR)
-            newest_text = "Local branch is ahead of " + self.branch + ". Automatic update not possible."
-
-        elif self._num_commits_behind > 0:
-
-            base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
-            if self._newest_commit_hash:
-                url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash
-            else:
-                url = base_url + '/commits/'
-
-            newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a> '
-            newest_text += " (you're " + str(self._num_commits_behind) + " commit"
-            if self._num_commits_behind > 1:
-                newest_text += 's'
-            newest_text += ' behind)' + "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>"
-
-        else:
-            return
-
-        sickbeard.NEWEST_VERSION_STRING = newest_text
-
-    def need_update(self):
-
-        if self.branch != self._find_installed_branch():
-            logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
-            return True
-
-        self._find_installed_version()
-        if not self._cur_commit_hash:
-            return True
-        else:
-            try:
-                self._check_github_for_update()
-            except Exception, e:
-                logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR)
-                return False
-
-            if self._num_commits_behind > 0:
-                return True
-
-        return False
-
-    def update(self):
-        """
-        Calls git pull origin <branch> in order to update SickRage. Returns a bool depending
-        on the call's success.
-        """
-
-        # update remote origin url
-        self.update_remote_origin()
-
-        # remove untracked files and performs a hard reset on git branch to avoid update issues
-        if sickbeard.GIT_RESET:
-            self.clean()
-            self.reset()
-
-        if self.branch == self._find_installed_branch():
-            output, err, exit_status = self._run_git(self._git_path, 'pull -f %s %s' % (sickbeard.GIT_REMOTE, self.branch))  # @UnusedVariable
-        else:
-            output, err, exit_status = self._run_git(self._git_path, 'checkout -f ' + self.branch)  # @UnusedVariable
-
-        if exit_status == 0:
-            self._find_installed_version()
-
-            # Notify update successful
-            if sickbeard.NOTIFY_ON_UPDATE:
-                notifiers.notify_git_update(sickbeard.CUR_COMMIT_HASH if sickbeard.CUR_COMMIT_HASH else "")
-
-            return True
-        else:
-            return False
-
-    def clean(self):
-        """
-        Calls git clean to remove all untracked files. Returns a bool depending
-        on the call's success.
-        """
-        output, err, exit_status = self._run_git(self._git_path, 'clean -df ""')  # @UnusedVariable
-        if exit_status == 0:
-            return True
-
-    def reset(self):
-        """
-        Calls git reset --hard to perform a hard reset. Returns a bool depending
-        on the call's success.
-        """
-        output, err, exit_status = self._run_git(self._git_path, 'reset --hard')  # @UnusedVariable
-        if exit_status == 0:
-            return True
-
-    def list_remote_branches(self):
-        # update remote origin url
-        self.update_remote_origin()
-
-        branches, err, exit_status = self._run_git(self._git_path, 'ls-remote --heads %s' % sickbeard.GIT_REMOTE)  # @UnusedVariable
-        if exit_status == 0 and branches:
-            if branches:
-                return re.findall('\S+\Wrefs/heads/(.*)', branches)
-        return []
-
-    def update_remote_origin(self):
-        self._run_git(self._git_path, 'config remote.origin.url %s' % sickbeard.GIT_REMOTE_URL)
-
-class SourceUpdateManager(UpdateManager):
-    def __init__(self):
-        self.github_org = self.get_github_org()
-        self.github_repo = self.get_github_repo()
-
-        self.branch = sickbeard.BRANCH
-        if sickbeard.BRANCH == '':
-            self.branch = self._find_installed_branch()
-
-        self._cur_commit_hash = sickbeard.CUR_COMMIT_HASH
-        self._newest_commit_hash = None
-        self._num_commits_behind = 0
-
-    def _find_installed_branch(self):
-        if sickbeard.CUR_COMMIT_BRANCH == "":
-            return "master"
-        else:
-            return sickbeard.CUR_COMMIT_BRANCH
-        
-    def need_update(self):
-        # need this to run first to set self._newest_commit_hash
-        try:
-            self._check_github_for_update()
-        except Exception, e:
-            logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR)
-            return False
-
-        if self.branch != self._find_installed_branch():
-            logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
-            return True
-
-        if not self._cur_commit_hash or self._num_commits_behind > 0:
-            return True
-
-        return False
-
-    def _check_github_for_update(self):
-        """
-        Uses pygithub to ask github if there is a newer version that the provided
-        commit hash. If there is a newer version it sets SickRage's version text.
-
-        commit_hash: hash that we're checking against
-        """
-
-        self._num_commits_behind = 0
-        self._newest_commit_hash = None
-
-        # try to get newest commit hash and commits behind directly by comparing branch and current commit
-        if self._cur_commit_hash:
-            branch_compared = sickbeard.gh.compare(base=self.branch, head=self._cur_commit_hash)
-            self._newest_commit_hash = branch_compared.base_commit.sha
-            self._num_commits_behind = branch_compared.behind_by
-
-        # fall back and iterate over last 100 (items per page in gh_api) commits
-        if not self._newest_commit_hash:
-
-            for curCommit in sickbeard.gh.get_commits():
-                if not self._newest_commit_hash:
-                    self._newest_commit_hash = curCommit.sha
-                    if not self._cur_commit_hash:
-                        break
-
-                if curCommit.sha == self._cur_commit_hash:
-                    break
-
-                # when _cur_commit_hash doesn't match anything _num_commits_behind == 100
-                self._num_commits_behind += 1
-
-        logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash)
-                   + u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG)
-
-    def set_newest_text(self):
-
-        # if we're up to date then don't set this
-        sickbeard.NEWEST_VERSION_STRING = None
-
-        if not self._cur_commit_hash:
-            logger.log(u"Unknown current version number, don't know if we should update or not", logger.DEBUG)
-
-            newest_text = "Unknown current version number: If you've never used the SickRage upgrade system before then current version is not set."
-            newest_text += "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>"
-
-        elif self._num_commits_behind > 0:
-            base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
-            if self._newest_commit_hash:
-                url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash
-            else:
-                url = base_url + '/commits/'
-
-            newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a>'
-            newest_text += " (you're " + str(self._num_commits_behind) + " commit"
-            if self._num_commits_behind > 1:
-                newest_text += "s"
-            newest_text += " behind)" + "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>"
-        else:
-            return
-
-        sickbeard.NEWEST_VERSION_STRING = newest_text
-
-    def update(self):
-        """
-        Downloads the latest source tarball from github and installs it over the existing version.
-        """
-
-        base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
-        tar_download_url = base_url + '/tarball/' + self.branch
-
-        try:
-            # prepare the update dir
-            sr_update_dir = ek.ek(os.path.join, sickbeard.PROG_DIR, u'sr-update')
-
-            if os.path.isdir(sr_update_dir):
-                logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting")
-                shutil.rmtree(sr_update_dir)
-
-            logger.log(u"Creating update folder " + sr_update_dir + " before extracting")
-            os.makedirs(sr_update_dir)
-
-            # retrieve file
-            logger.log(u"Downloading update from " + repr(tar_download_url))
-            tar_download_path = os.path.join(sr_update_dir, u'sr-update.tar')
-            urllib.urlretrieve(tar_download_url, tar_download_path)
-
-            if not ek.ek(os.path.isfile, tar_download_path):
-                logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.ERROR)
-                return False
-
-            if not ek.ek(tarfile.is_tarfile, tar_download_path):
-                logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR)
-                return False
-
-            # extract to sr-update dir
-            logger.log(u"Extracting file " + tar_download_path)
-            tar = tarfile.open(tar_download_path)
-            tar.extractall(sr_update_dir)
-            tar.close()
-
-            # delete .tar.gz
-            logger.log(u"Deleting file " + tar_download_path)
-            os.remove(tar_download_path)
-
-            # find update dir name
-            update_dir_contents = [x for x in os.listdir(sr_update_dir) if
-                                   os.path.isdir(os.path.join(sr_update_dir, x))]
-            if len(update_dir_contents) != 1:
-                logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR)
-                return False
-            content_dir = os.path.join(sr_update_dir, update_dir_contents[0])
-
-            # walk temp folder and move files to main folder
-            logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR)
-            for dirname, dirnames, filenames in os.walk(content_dir):  # @UnusedVariable
-                dirname = dirname[len(content_dir) + 1:]
-                for curfile in filenames:
-                    old_path = os.path.join(content_dir, dirname, curfile)
-                    new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile)
-
-                    # Avoid DLL access problem on WIN32/64
-                    # These files needing to be updated manually
-                    #or find a way to kill the access from memory
-                    if curfile in ('unrar.dll', 'unrar64.dll'):
-                        try:
-                            os.chmod(new_path, stat.S_IWRITE)
-                            os.remove(new_path)
-                            os.renames(old_path, new_path)
-                        except Exception, e:
-                            logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG)
-                            os.remove(old_path)  # Trash the updated file without moving in new path
-                        continue
-
-                    if os.path.isfile(new_path):
-                        os.remove(new_path)
-                    os.renames(old_path, new_path)
-
-            sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
-            sickbeard.CUR_COMMIT_BRANCH = self.branch
-            
-        except Exception, e:
-            logger.log(u"Error while trying to update: " + ex(e), logger.ERROR)
-            logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG)
-            return False
-
-        # Notify update successful
-        notifiers.notify_git_update(sickbeard.NEWEST_VERSION_STRING)
-
-        return True
-
-    def list_remote_branches(self):
-        return [x.name for x in sickbeard.gh.get_branches() if x]
+# Author: Nic Wolfe <nic@wolfeden.ca>
+# URL: http://code.google.com/p/sickbeard/
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import platform
+import subprocess
+import re
+import urllib
+import tarfile
+import stat
+import traceback
+import db
+import time
+
+import sickbeard
+from sickbeard import notifiers
+from sickbeard import ui
+from sickbeard import logger, helpers
+from sickbeard.exceptions import ex
+from sickbeard import encodingKludge as ek
+from lib import requests
+from lib.requests.exceptions import RequestException
+
+import shutil
+import lib.shutil_custom
+
+shutil.copyfile = lib.shutil_custom.copyfile_custom
+
+
+class CheckVersion():
+    """
+    Version check class meant to run as a thread object with the sr scheduler.
+    """
+
+    def __init__(self):
+        self.updater = None
+        self.install_type = None        
+
+        if sickbeard.gh:
+            self.install_type = self.find_install_type()
+            if self.install_type == 'git':
+                self.updater = GitUpdateManager()
+            elif self.install_type == 'source':
+                self.updater = SourceUpdateManager()
+
+    def run(self, force=False):
+
+        if self.updater:
+            # set current branch version
+            sickbeard.BRANCH = self.get_branch()
+
+            if self.check_for_new_version(force):
+                if sickbeard.AUTO_UPDATE:
+                    logger.log(u"New update found for SickRage, starting auto-updater ...")
+                    ui.notifications.message('New update found for SickRage, starting auto-updater')
+                    if self.safe_to_update() == True and self._runbackup() == True:
+                        if sickbeard.versionCheckScheduler.action.update():
+                            logger.log(u"Update was successful!")
+                            ui.notifications.message('Update was successful')
+                            sickbeard.events.put(sickbeard.events.SystemEvent.RESTART)
+                        else:
+                            logger.log(u"Update failed!")
+                            ui.notifications.message('Update failed!')
+
+    def _runbackup(self):
+        # Do a system backup before update
+        logger.log(u"Config backup in progress...")
+        ui.notifications.message('Backup', 'Config backup in progress...')
+        try:
+            backupDir = os.path.join(sickbeard.DATA_DIR, 'backup')
+            if not os.path.isdir(backupDir):
+                os.mkdir(backupDir)
+    
+            if self._keeplatestbackup(backupDir) == True and self._backup(backupDir) == True:
+                logger.log(u"Config backup successful, updating...")
+                ui.notifications.message('Backup', 'Config backup successful, updating...')
+                return True
+            else:
+                logger.log(u"Config backup failed, aborting update",logger.ERROR)
+                ui.notifications.message('Backup', 'Config backup failed, aborting update')
+                return False
+        except Exception as e:
+            logger.log('Update: Config backup failed. Error: {0}'.format(ex(e)),logger.ERROR)
+            ui.notifications.message('Backup', 'Config backup failed, aborting update')
+            return False
+
+    def _keeplatestbackup(self,backupDir=None):
+        if backupDir:
+            import glob
+            files = glob.glob(os.path.join(backupDir,'*.zip'))
+            if not files:
+                return True
+            now = time.time()
+            newest = files[0], now - os.path.getctime(files[0])
+            for file in files[1:]:
+                age = now - os.path.getctime(file)
+                if age < newest[1]:
+                    newest = file, age
+            files.remove(newest[0])
+            
+            for file in files:
+                os.remove(file)
+            return True
+        else:
+            return False
+    
+    # TODO: Merge with backup in helpers
+    def _backup(self,backupDir=None):
+        if backupDir:
+            source = [os.path.join(sickbeard.DATA_DIR, 'sickbeard.db'), sickbeard.CONFIG_FILE]
+            source.append(os.path.join(sickbeard.DATA_DIR, 'failed.db'))
+            source.append(os.path.join(sickbeard.DATA_DIR, 'cache.db'))
+            target = os.path.join(backupDir, 'sickrage-' + time.strftime('%Y%m%d%H%M%S') + '.zip')
+
+            for (path, dirs, files) in os.walk(sickbeard.CACHE_DIR, topdown=True):
+                for dirname in dirs:
+                    if path == sickbeard.CACHE_DIR and dirname not in ['images']:
+                        dirs.remove(dirname)
+                for filename in files:
+                    source.append(os.path.join(path, filename))
+
+            if helpers.backupConfigZip(source, target, sickbeard.DATA_DIR):
+                return True
+            else:
+                return False
+        else:
+            return False
+
+    def safe_to_update(self):
+
+        def db_safe(self):
+            try:
+                result = self.getDBcompare(sickbeard.BRANCH)
+                if result == 'equal':
+                    logger.log(u"We can proceed with the update. New update has same DB version", logger.DEBUG)
+                    return True
+                elif result == 'upgrade':
+                    logger.log(u"We can't proceed with the update. New update has a new DB version. Please manually update", logger.WARNING)
+                    return False
+                elif result == 'downgrade':
+                    logger.log(u"We can't proceed with the update. New update has a old DB version. It's not possible to downgrade", logger.ERROR)
+                    return False
+                else:
+                    logger.log(u"We can't proceed with the update. Unable to check remote DB version", logger.ERROR)
+                    return False
+            except:
+                logger.log(u"We can't proceed with the update. Unable to compare DB version", logger.ERROR)
+                return False
+        
+        def postprocessor_safe(self):
+            if not sickbeard.autoPostProcesserScheduler.action.amActive:
+                logger.log(u"We can proceed with the update. Post-Processor is not running", logger.DEBUG)
+                return True
+            else:
+                logger.log(u"We can't proceed with the update. Post-Processor is running", logger.DEBUG)
+                return False
+        
+        def showupdate_safe(self):
+            if not sickbeard.showUpdateScheduler.action.amActive:
+                logger.log(u"We can proceed with the update. Shows are not being updated", logger.DEBUG)
+                return True
+            else:
+                logger.log(u"We can't proceed with the update. Shows are being updated", logger.DEBUG)
+                return False
+
+        db_safe = db_safe(self)
+        postprocessor_safe = postprocessor_safe(self)
+        showupdate_safe = showupdate_safe(self)
+
+        if db_safe == True and postprocessor_safe == True and showupdate_safe == True:
+            logger.log(u"Proceeding with auto update", logger.DEBUG)
+            return True
+        else:
+            logger.log(u"Auto update aborted", logger.DEBUG)
+            return False
+
+    def getDBcompare(self, branchDest):
+        try:
+            response = requests.get("https://raw.githubusercontent.com/SICKRAGETV/SickRage/" + str(branchDest) +"/sickbeard/databases/mainDB.py", verify=False)
+            response.raise_for_status()
+            match = re.search(r"MAX_DB_VERSION\s=\s(?P<version>\d{2,3})",response.text)
+            branchDestDBversion = int(match.group('version'))
+            myDB = db.DBConnection()
+            branchCurrDBversion = myDB.checkDBVersion()
+            if branchDestDBversion > branchCurrDBversion:
+                return 'upgrade'
+            elif branchDestDBversion == branchCurrDBversion:
+                return 'equal'
+            else:
+                return 'downgrade'
+        except RequestException as e:
+            return 'error'
+        except Exception as e:
+            return 'error'
+
+    def find_install_type(self):
+        """
+        Determines how this copy of sr was installed.
+
+        returns: type of installation. Possible values are:
+            'win': any compiled windows build
+            'git': running from source using git
+            'source': running from source without git
+        """
+
+        # check if we're a windows build
+        if sickbeard.BRANCH.startswith('build '):
+            install_type = 'win'
+        elif os.path.isdir(ek.ek(os.path.join, sickbeard.PROG_DIR, u'.git')):
+            install_type = 'git'
+        else:
+            install_type = 'source'
+
+        return install_type
+
+    def check_for_new_version(self, force=False):
+        """
+        Checks the internet for a newer version.
+
+        returns: bool, True for new version or False for no new version.
+
+        force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
+        """
+
+        if not self.updater or not sickbeard.VERSION_NOTIFY and not sickbeard.AUTO_UPDATE and not force:
+            logger.log(u"Version checking is disabled, not checking for the newest version")
+            return False
+
+        # checking for updates
+        if not sickbeard.AUTO_UPDATE:
+            logger.log(u"Checking for updates using " + self.install_type.upper())
+
+        if not self.updater.need_update():
+            sickbeard.NEWEST_VERSION_STRING = None
+
+            if force:
+                ui.notifications.message('No update needed')
+                logger.log(u"No update needed")
+
+            # no updates needed
+            return False
+
+        # found updates
+        self.updater.set_newest_text()
+        return True
+
+    def update(self):
+        if self.updater:
+            # update branch with current config branch value
+            self.updater.branch = sickbeard.BRANCH
+
+            # check for updates
+            if self.updater.need_update():
+                return self.updater.update()
+
+    def list_remote_branches(self):
+        if self.updater:
+            return self.updater.list_remote_branches()
+
+    def get_branch(self):
+        if self.updater:
+            return self.updater.branch
+
+
+class UpdateManager():
+    def get_github_org(self):
+        return sickbeard.GIT_ORG
+
+    def get_github_repo(self):
+        return sickbeard.GIT_REPO
+
+    def get_update_url(self):
+        return sickbeard.WEB_ROOT + "/home/update/?pid=" + str(sickbeard.PID)
+
+class GitUpdateManager(UpdateManager):
+    def __init__(self):
+        self._git_path = self._find_working_git()
+        self.github_org = self.get_github_org()
+        self.github_repo = self.get_github_repo()
+
+        self.branch = sickbeard.BRANCH
+        if sickbeard.BRANCH == '':
+            self.branch = self._find_installed_branch()
+
+        self._cur_commit_hash = None
+        self._newest_commit_hash = None
+        self._num_commits_behind = 0
+        self._num_commits_ahead = 0
+
+    def _git_error(self):
+        error_message = 'Unable to find your git executable - Shutdown SickRage and EITHER set git_path in your config.ini OR delete your .git folder and run from source to enable updates.'
+        sickbeard.NEWEST_VERSION_STRING = error_message
+
+    def _find_working_git(self):
+        test_cmd = 'version'
+
+        if sickbeard.GIT_PATH:
+            main_git = '"' + sickbeard.GIT_PATH + '"'
+        else:
+            main_git = 'git'
+
+        logger.log(u"Checking if we can use git commands: " + main_git + ' ' + test_cmd, logger.DEBUG)
+        output, err, exit_status = self._run_git(main_git, test_cmd)
+
+        if exit_status == 0:
+            logger.log(u"Using: " + main_git, logger.DEBUG)
+            return main_git
+        else:
+            logger.log(u"Not using: " + main_git, logger.DEBUG)
+
+        # trying alternatives
+
+
+        alternative_git = []
+
+        # osx people who start sr from launchd have a broken path, so try a hail-mary attempt for them
+        if platform.system().lower() == 'darwin':
+            alternative_git.append('/usr/local/git/bin/git')
+
+        if platform.system().lower() == 'windows':
+            if main_git != main_git.lower():
+                alternative_git.append(main_git.lower())
+
+        if alternative_git:
+            logger.log(u"Trying known alternative git locations", logger.DEBUG)
+
+            for cur_git in alternative_git:
+                logger.log(u"Checking if we can use git commands: " + cur_git + ' ' + test_cmd, logger.DEBUG)
+                output, err, exit_status = self._run_git(cur_git, test_cmd)
+
+                if exit_status == 0:
+                    logger.log(u"Using: " + cur_git, logger.DEBUG)
+                    return cur_git
+                else:
+                    logger.log(u"Not using: " + cur_git, logger.DEBUG)
+
+        # Still haven't found a working git
+        error_message = 'Unable to find your git executable - Shutdown SickRage and EITHER set git_path in your config.ini OR delete your .git folder and run from source to enable updates.'
+        sickbeard.NEWEST_VERSION_STRING = error_message
+
+        return None
+
+    def _run_git(self, git_path, args):
+
+        output = err = exit_status = None
+
+        if not git_path:
+            logger.log(u"No git specified, can't use git commands", logger.ERROR)
+            exit_status = 1
+            return (output, err, exit_status)
+
+        cmd = git_path + ' ' + args
+
+        try:
+            logger.log(u"Executing " + cmd + " with your shell in " + sickbeard.PROG_DIR, logger.DEBUG)
+            p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+                                 shell=True, cwd=sickbeard.PROG_DIR)
+            output, err = p.communicate()
+            exit_status = p.returncode
+
+            if output:
+                output = output.strip()
+
+
+        except OSError:
+            logger.log(u"Command " + cmd + " didn't work")
+            exit_status = 1
+
+        if exit_status == 0:
+            logger.log(cmd + u" : returned successful", logger.DEBUG)
+            exit_status = 0
+
+        elif exit_status == 1:
+            if 'stash' in output:
+                logger.log(u"Please enable 'git reset' in settings or stash your changes in local files",logger.WARNING)
+            else:
+                logger.log(cmd + u" returned : " + str(output), logger.ERROR)
+            exit_status = 1
+
+        elif exit_status == 128 or 'fatal:' in output or err:
+            logger.log(cmd + u" returned : " + str(output), logger.ERROR)
+            exit_status = 128
+
+        else:
+            logger.log(cmd + u" returned : " + str(output) + u", treat as error for now", logger.ERROR)
+            exit_status = 1
+
+        return (output, err, exit_status)
+
+    def _find_installed_version(self):
+        """
+        Attempts to find the currently installed version of SickRage.
+
+        Uses git show to get commit version.
+
+        Returns: True for success or False for failure
+        """
+
+        output, err, exit_status = self._run_git(self._git_path, 'rev-parse HEAD')  # @UnusedVariable
+
+        if exit_status == 0 and output:
+            cur_commit_hash = output.strip()
+            if not re.match('^[a-z0-9]+$', cur_commit_hash):
+                logger.log(u"Output doesn't look like a hash, not using it", logger.ERROR)
+                return False
+            self._cur_commit_hash = cur_commit_hash
+            sickbeard.CUR_COMMIT_HASH = str(cur_commit_hash)
+            return True
+        else:
+            return False
+
+    def _find_installed_branch(self):
+        branch_info, err, exit_status = self._run_git(self._git_path, 'symbolic-ref -q HEAD')  # @UnusedVariable
+        if exit_status == 0 and branch_info:
+            branch = branch_info.strip().replace('refs/heads/', '', 1)
+            if branch:
+                return branch
+                
+        return ""
+        
+    def _check_github_for_update(self):
+        """
+        Uses git commands to check if there is a newer version that the provided
+        commit hash. If there is a newer version it sets _num_commits_behind.
+        """
+
+        self._num_commits_behind = 0
+        self._num_commits_ahead = 0
+
+        # update remote origin url
+        self.update_remote_origin()
+
+        # get all new info from github
+        output, err, exit_status = self._run_git(self._git_path, 'fetch %s' % sickbeard.GIT_REMOTE)
+
+        if not exit_status == 0:
+            logger.log(u"Unable to contact github, can't check for update", logger.ERROR)
+            return
+
+        # get latest commit_hash from remote
+        output, err, exit_status = self._run_git(self._git_path, 'rev-parse --verify --quiet "@{upstream}"')
+
+        if exit_status == 0 and output:
+            cur_commit_hash = output.strip()
+
+            if not re.match('^[a-z0-9]+$', cur_commit_hash):
+                logger.log(u"Output doesn't look like a hash, not using it", logger.DEBUG)
+                return
+
+            else:
+                self._newest_commit_hash = cur_commit_hash
+        else:
+            logger.log(u"git didn't return newest commit hash", logger.DEBUG)
+            return
+
+        # get number of commits behind and ahead (option --count not supported git < 1.7.2)
+        output, err, exit_status = self._run_git(self._git_path, 'rev-list --left-right "@{upstream}"...HEAD')
+
+        if exit_status == 0 and output:
+
+            try:
+                self._num_commits_behind = int(output.count("<"))
+                self._num_commits_ahead = int(output.count(">"))
+
+            except:
+                logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG)
+                return
+
+        logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash)
+                   + u", num_commits_behind = " + str(self._num_commits_behind) + u", num_commits_ahead = " + str(
+            self._num_commits_ahead), logger.DEBUG)
+
+    def set_newest_text(self):
+
+        # if we're up to date then don't set this
+        sickbeard.NEWEST_VERSION_STRING = None
+
+        if self._num_commits_ahead:
+            logger.log(u"Local branch is ahead of " + self.branch + ". Automatic update not possible.", logger.ERROR)
+            newest_text = "Local branch is ahead of " + self.branch + ". Automatic update not possible."
+
+        elif self._num_commits_behind > 0:
+
+            base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
+            if self._newest_commit_hash:
+                url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash
+            else:
+                url = base_url + '/commits/'
+
+            newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a> '
+            newest_text += " (you're " + str(self._num_commits_behind) + " commit"
+            if self._num_commits_behind > 1:
+                newest_text += 's'
+            newest_text += ' behind)' + "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>"
+
+        else:
+            return
+
+        sickbeard.NEWEST_VERSION_STRING = newest_text
+
+    def need_update(self):
+
+        if self.branch != self._find_installed_branch():
+            logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
+            return True
+
+        self._find_installed_version()
+        if not self._cur_commit_hash:
+            return True
+        else:
+            try:
+                self._check_github_for_update()
+            except Exception, e:
+                logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR)
+                return False
+
+            if self._num_commits_behind > 0:
+                return True
+
+        return False
+
+    def update(self):
+        """
+        Calls git pull origin <branch> in order to update SickRage. Returns a bool depending
+        on the call's success.
+        """
+
+        # update remote origin url
+        self.update_remote_origin()
+
+        # remove untracked files and performs a hard reset on git branch to avoid update issues
+        if sickbeard.GIT_RESET:
+            self.clean()
+            self.reset()
+
+        if self.branch == self._find_installed_branch():
+            output, err, exit_status = self._run_git(self._git_path, 'pull -f %s %s' % (sickbeard.GIT_REMOTE, self.branch))  # @UnusedVariable
+        else:
+            output, err, exit_status = self._run_git(self._git_path, 'checkout -f ' + self.branch)  # @UnusedVariable
+
+        if exit_status == 0:
+            self._find_installed_version()
+
+            # Notify update successful
+            if sickbeard.NOTIFY_ON_UPDATE:
+                notifiers.notify_git_update(sickbeard.CUR_COMMIT_HASH if sickbeard.CUR_COMMIT_HASH else "")
+
+            return True
+        else:
+            return False
+
+    def clean(self):
+        """
+        Calls git clean to remove all untracked files. Returns a bool depending
+        on the call's success.
+        """
+        output, err, exit_status = self._run_git(self._git_path, 'clean -df ""')  # @UnusedVariable
+        if exit_status == 0:
+            return True
+
+    def reset(self):
+        """
+        Calls git reset --hard to perform a hard reset. Returns a bool depending
+        on the call's success.
+        """
+        output, err, exit_status = self._run_git(self._git_path, 'reset --hard')  # @UnusedVariable
+        if exit_status == 0:
+            return True
+
+    def list_remote_branches(self):
+        # update remote origin url
+        self.update_remote_origin()
+
+        branches, err, exit_status = self._run_git(self._git_path, 'ls-remote --heads %s' % sickbeard.GIT_REMOTE)  # @UnusedVariable
+        if exit_status == 0 and branches:
+            if branches:
+                return re.findall('\S+\Wrefs/heads/(.*)', branches)
+        return []
+
+    def update_remote_origin(self):
+        self._run_git(self._git_path, 'config remote.origin.url %s' % sickbeard.GIT_REMOTE_URL)
+
+class SourceUpdateManager(UpdateManager):
+    def __init__(self):
+        self.github_org = self.get_github_org()
+        self.github_repo = self.get_github_repo()
+
+        self.branch = sickbeard.BRANCH
+        if sickbeard.BRANCH == '':
+            self.branch = self._find_installed_branch()
+
+        self._cur_commit_hash = sickbeard.CUR_COMMIT_HASH
+        self._newest_commit_hash = None
+        self._num_commits_behind = 0
+
+    def _find_installed_branch(self):
+        if sickbeard.CUR_COMMIT_BRANCH == "":
+            return "master"
+        else:
+            return sickbeard.CUR_COMMIT_BRANCH
+        
+    def need_update(self):
+        # need this to run first to set self._newest_commit_hash
+        try:
+            self._check_github_for_update()
+        except Exception, e:
+            logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR)
+            return False
+
+        if self.branch != self._find_installed_branch():
+            logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
+            return True
+
+        if not self._cur_commit_hash or self._num_commits_behind > 0:
+            return True
+
+        return False
+
+    def _check_github_for_update(self):
+        """
+        Uses pygithub to ask github if there is a newer version that the provided
+        commit hash. If there is a newer version it sets SickRage's version text.
+
+        commit_hash: hash that we're checking against
+        """
+
+        self._num_commits_behind = 0
+        self._newest_commit_hash = None
+
+        # try to get newest commit hash and commits behind directly by comparing branch and current commit
+        if self._cur_commit_hash:
+            branch_compared = sickbeard.gh.compare(base=self.branch, head=self._cur_commit_hash)
+            self._newest_commit_hash = branch_compared.base_commit.sha
+            self._num_commits_behind = branch_compared.behind_by
+
+        # fall back and iterate over last 100 (items per page in gh_api) commits
+        if not self._newest_commit_hash:
+
+            for curCommit in sickbeard.gh.get_commits():
+                if not self._newest_commit_hash:
+                    self._newest_commit_hash = curCommit.sha
+                    if not self._cur_commit_hash:
+                        break
+
+                if curCommit.sha == self._cur_commit_hash:
+                    break
+
+                # when _cur_commit_hash doesn't match anything _num_commits_behind == 100
+                self._num_commits_behind += 1
+
+        logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash)
+                   + u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG)
+
+    def set_newest_text(self):
+
+        # if we're up to date then don't set this
+        sickbeard.NEWEST_VERSION_STRING = None
+
+        if not self._cur_commit_hash:
+            logger.log(u"Unknown current version number, don't know if we should update or not", logger.DEBUG)
+
+            newest_text = "Unknown current version number: If you've never used the SickRage upgrade system before then current version is not set."
+            newest_text += "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>"
+
+        elif self._num_commits_behind > 0:
+            base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
+            if self._newest_commit_hash:
+                url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash
+            else:
+                url = base_url + '/commits/'
+
+            newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a>'
+            newest_text += " (you're " + str(self._num_commits_behind) + " commit"
+            if self._num_commits_behind > 1:
+                newest_text += "s"
+            newest_text += " behind)" + "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>"
+        else:
+            return
+
+        sickbeard.NEWEST_VERSION_STRING = newest_text
+
+    def update(self):
+        """
+        Downloads the latest source tarball from github and installs it over the existing version.
+        """
+
+        base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
+        tar_download_url = base_url + '/tarball/' + self.branch
+
+        try:
+            # prepare the update dir
+            sr_update_dir = ek.ek(os.path.join, sickbeard.PROG_DIR, u'sr-update')
+
+            if os.path.isdir(sr_update_dir):
+                logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting")
+                shutil.rmtree(sr_update_dir)
+
+            logger.log(u"Creating update folder " + sr_update_dir + " before extracting")
+            os.makedirs(sr_update_dir)
+
+            # retrieve file
+            logger.log(u"Downloading update from " + repr(tar_download_url))
+            tar_download_path = os.path.join(sr_update_dir, u'sr-update.tar')
+            urllib.urlretrieve(tar_download_url, tar_download_path)
+
+            if not ek.ek(os.path.isfile, tar_download_path):
+                logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.ERROR)
+                return False
+
+            if not ek.ek(tarfile.is_tarfile, tar_download_path):
+                logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR)
+                return False
+
+            # extract to sr-update dir
+            logger.log(u"Extracting file " + tar_download_path)
+            tar = tarfile.open(tar_download_path)
+            tar.extractall(sr_update_dir)
+            tar.close()
+
+            # delete .tar.gz
+            logger.log(u"Deleting file " + tar_download_path)
+            os.remove(tar_download_path)
+
+            # find update dir name
+            update_dir_contents = [x for x in os.listdir(sr_update_dir) if
+                                   os.path.isdir(os.path.join(sr_update_dir, x))]
+            if len(update_dir_contents) != 1:
+                logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR)
+                return False
+            content_dir = os.path.join(sr_update_dir, update_dir_contents[0])
+
+            # walk temp folder and move files to main folder
+            logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR)
+            for dirname, dirnames, filenames in os.walk(content_dir):  # @UnusedVariable
+                dirname = dirname[len(content_dir) + 1:]
+                for curfile in filenames:
+                    old_path = os.path.join(content_dir, dirname, curfile)
+                    new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile)
+
+                    # Avoid DLL access problem on WIN32/64
+                    # These files needing to be updated manually
+                    #or find a way to kill the access from memory
+                    if curfile in ('unrar.dll', 'unrar64.dll'):
+                        try:
+                            os.chmod(new_path, stat.S_IWRITE)
+                            os.remove(new_path)
+                            os.renames(old_path, new_path)
+                        except Exception, e:
+                            logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG)
+                            os.remove(old_path)  # Trash the updated file without moving in new path
+                        continue
+
+                    if os.path.isfile(new_path):
+                        os.remove(new_path)
+                    os.renames(old_path, new_path)
+
+            sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
+            sickbeard.CUR_COMMIT_BRANCH = self.branch
+            
+        except Exception, e:
+            logger.log(u"Error while trying to update: " + ex(e), logger.ERROR)
+            logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG)
+            return False
+
+        # Notify update successful
+        notifiers.notify_git_update(sickbeard.NEWEST_VERSION_STRING)
+
+        return True
+
+    def list_remote_branches(self):
+        return [x.name for x in sickbeard.gh.get_branches() if x]
diff --git a/sickbeard/webapi.py b/sickbeard/webapi.py
index ec25c41f93a9562dd7224a6c7004c57cf50bfe7a..0fd2b82aca6176078898bc2e9d644c253e6a58c5 100644
--- a/sickbeard/webapi.py
+++ b/sickbeard/webapi.py
@@ -1669,17 +1669,12 @@ class CMD_SickBeardSearchIndexers(ApiCall):
              }
     }
 
-    valid_languages = {
-        'el': 20, 'en': 7, 'zh': 27, 'it': 15, 'cs': 28, 'es': 16, 'ru': 22,
-        'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31,
-        'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32,
-        'sv': 8, 'sl': 30}
-
     def __init__(self, args, kwargs):
+        self.valid_languages = sickbeard.indexerApi().config['langabbv_to_id']
         # required
         # optional
         self.name, args = self.check_params(args, kwargs, "name", None, False, "string", [])
-        self.lang, args = self.check_params(args, kwargs, "lang", "en", False, "string", self.valid_languages.keys())
+        self.lang, args = self.check_params(args, kwargs, "lang", sickbeard.INDEXER_DEFAULT_LANGUAGE, False, "string", self.valid_languages.keys())
 
         self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, False, "int", [])
 
@@ -1696,7 +1691,7 @@ class CMD_SickBeardSearchIndexers(ApiCall):
             for _indexer in sickbeard.indexerApi().indexers if self.indexer == 0 else [int(self.indexer)]:
                 lINDEXER_API_PARMS = sickbeard.indexerApi(_indexer).api_params.copy()
 
-                if self.lang and not self.lang == 'en':
+                if self.lang and not self.lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                     lINDEXER_API_PARMS['language'] = self.lang
 
                 lINDEXER_API_PARMS['actors'] = False
@@ -1722,7 +1717,7 @@ class CMD_SickBeardSearchIndexers(ApiCall):
             for _indexer in sickbeard.indexerApi().indexers if self.indexer == 0 else [int(self.indexer)]:
                 lINDEXER_API_PARMS = sickbeard.indexerApi(_indexer).api_params.copy()
 
-                if self.lang and not self.lang == 'en':
+                if self.lang and not self.lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
                     lINDEXER_API_PARMS['language'] = self.lang
 
                 lINDEXER_API_PARMS['actors'] = False
@@ -2078,13 +2073,8 @@ class CMD_ShowAddNew(ApiCall):
              }
     }
 
-    valid_languages = {
-        'el': 20, 'en': 7, 'zh': 27, 'it': 15, 'cs': 28, 'es': 16, 'ru': 22,
-        'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31,
-        'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32,
-        'sv': 8, 'sl': 30}
-
     def __init__(self, args, kwargs):
+        self.valid_languages = sickbeard.indexerApi().config['langabbv_to_id']
         # required
         self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
 
@@ -2102,7 +2092,7 @@ class CMD_ShowAddNew(ApiCall):
                                                        "bool", [])
         self.status, args = self.check_params(args, kwargs, "status", None, False, "string",
                                               ["wanted", "skipped", "archived", "ignored"])
-        self.lang, args = self.check_params(args, kwargs, "lang", "en", False, "string",
+        self.lang, args = self.check_params(args, kwargs, "lang", sickbeard.INDEXER_DEFAULT_LANGUAGE, False, "string",
                                             self.valid_languages.keys())
         self.subtitles, args = self.check_params(args, kwargs, "subtitles", int(sickbeard.USE_SUBTITLES),
                                                  False, "int",
@@ -2752,8 +2742,8 @@ class CMD_ShowUpdate(ApiCall):
         try:
             sickbeard.showQueueScheduler.action.updateShow(showObj, True)  # @UndefinedVariable
             return _responds(RESULT_SUCCESS, msg=str(showObj.name) + " has queued to be updated")
-        except exceptions.CantUpdateException, e:
-            logger.log(u"API:: Unable to update " + str(showObj.name) + ". " + str(ex(e)), logger.ERROR)
+        except exceptions.CantUpdateException as e:
+            logger.log("API::Unable to update show: {0}".format(str(e)),logger.DEBUG)
             return _responds(RESULT_FAILURE, msg="Unable to update " + str(showObj.name))
 
 
diff --git a/sickbeard/webserve.py b/sickbeard/webserve.py
index f5a5f594687f84130ba68819191fec31cb8460f5..c3a0cfb29f7507276c51731c0fb77503f288ab87 100644
--- a/sickbeard/webserve.py
+++ b/sickbeard/webserve.py
@@ -31,7 +31,7 @@ from sickbeard import config, sab
 from sickbeard import clients
 from sickbeard import history, notifiers, processTV
 from sickbeard import ui
-from sickbeard import logger, helpers, exceptions, classes, db
+from sickbeard import logger, helpers, exceptions, classes, db, scheduler, showUpdater
 from sickbeard import encodingKludge as ek
 from sickbeard import search_queue
 from sickbeard import image_cache
@@ -56,7 +56,7 @@ from lib.dateutil import tz, parser as dateutil_parser
 from lib.unrar2 import RarFile
 from lib import adba, subliminal
 from lib.trakt import TraktAPI
-from lib.trakt.exceptions import traktException, traktAuthException, traktServerBusy
+from lib.trakt.exceptions import traktException
 from versionChecker import CheckVersion
 
 try:
@@ -959,13 +959,13 @@ class Home(WebRoot):
                 "dbloc": dbloc}
 
 
-    def testTrakt(self, username=None, password=None, disable_ssl=None):
+    def testTrakt(self, username=None, password=None, disable_ssl=None, blacklist_name=None):
         # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
         if disable_ssl == 'true':
             disable_ssl = True
         else:
             disable_ssl = False
-        return notifiers.trakt_notifier.test_notify(username, password, disable_ssl)
+        return notifiers.trakt_notifier.test_notify(username, password, disable_ssl, blacklist_name)
 
 
     def loadShowNotifyLists(self):
@@ -1493,8 +1493,8 @@ class Home(WebRoot):
             try:
                 sickbeard.showQueueScheduler.action.updateShow(showObj, True)
                 time.sleep(cpu_presets[sickbeard.CPU_PRESET])
-            except exceptions.CantUpdateException, e:
-                errors.append("Unable to force an update on the show.")
+            except exceptions.CantUpdateException as e:
+                errors.append("Unable to update show: {0}".format(str(e)))
 
         if do_update_exceptions:
             try:
@@ -1503,6 +1503,12 @@ class Home(WebRoot):
             except exceptions.CantUpdateException, e:
                 errors.append("Unable to force an update on scene exceptions of the show.")
 
+        if not paused and (sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT):
+            # Checking if trakt and rolling_download are enable because updateWantedList()
+            # doesn't do the distinction between a failuire and being not activated(Return false)
+            if not sickbeard.traktRollingScheduler.action.updateWantedList():
+                errors.append("Unable to force an update on wanted episode")
+
         if do_update_scene_numbering:
             try:
                 sickbeard.scene_numbering.xem_refresh(showObj.indexerid, showObj.indexer)
@@ -2146,12 +2152,6 @@ class HomeAddShows(Home):
     def getIndexerLanguages(self):
         result = sickbeard.indexerApi().config['valid_languages']
 
-        # Make sure list is sorted alphabetically but 'en' is in front
-        if 'en' in result:
-            del result[result.index('en')]
-        result.sort()
-        result.insert(0, 'en')
-
         return json.dumps({'results': result})
 
 
@@ -2159,9 +2159,9 @@ class HomeAddShows(Home):
         return helpers.sanitizeFileName(name)
 
 
-    def searchIndexersForShowName(self, search_term, lang="en", indexer=None):
+    def searchIndexersForShowName(self, search_term, lang=None, indexer=None):
         if not lang or lang == 'null':
-            lang = "en"
+            lang = sickbeard.INDEXER_DEFAULT_LANGUAGE
 
         search_term = search_term.encode('utf-8')
 
@@ -2349,12 +2349,12 @@ class HomeAddShows(Home):
                      None if show['first_aired'] is None else dateutil_parser.parse(show['first_aired']).strftime(sickbeard.DATE_PRESET)]
                     for show in recommendedlist if not helpers.findCertainShow(sickbeard.showList, [
                     int(show['ids'][indexers[sickbeard.TRAKT_DEFAULT_INDEXER - 1]])])))
-        except (traktException, traktAuthException, traktServerBusy) as e:
+        except traktException as e:
             logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
 
         return json.dumps({'results': final_results})
 
-    def addRecommendedShow(self, whichSeries=None, indexerLang="en", rootDir=None, defaultStatus=None,
+    def addRecommendedShow(self, whichSeries=None, indexerLang=None, rootDir=None, defaultStatus=None,
                            anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None,
                            fullShowPath=None, other_shows=None, skipShow=None, providedIndexer=None, anime=None,
                            scene=None):
@@ -2365,6 +2365,9 @@ class HomeAddShows(Home):
         indexer_id = whichSeries.split('|')[0]
         show_name = whichSeries.split('|')[2]
 
+        if indexerLang is None:
+            indexerLang = sickbeard.INDEXER_DEFAULT_LANGUAGE
+
         return self.addNewShow('|'.join([indexer_name, str(indexer), show_url, indexer_id, show_name, ""]),
                                indexerLang, rootDir,
                                defaultStatus,
@@ -2394,8 +2397,11 @@ class HomeAddShows(Home):
         trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
 
         try:
-            if sickbeard.TRAKT_BLACKLIST_NAME is not None:
+            not_liked_show = ""
+            if sickbeard.TRAKT_BLACKLIST_NAME is not None and sickbeard.TRAKT_BLACKLIST_NAME:
                 not_liked_show = trakt_api.traktRequest("users/" + sickbeard.TRAKT_USERNAME + "/lists/" + sickbeard.TRAKT_BLACKLIST_NAME + "/items") or []
+            else:
+                logger.log(u"trending blacklist name is empty", logger.DEBUG)
 
             limit_show = 50 + len(not_liked_show)
 
@@ -2412,6 +2418,9 @@ class HomeAddShows(Home):
                             if not_liked_show:
                                 if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb'] for show in not_liked_show if show['type'] == 'show'):	
                                     t.trending_shows += [show]
+                            else:
+                                t.trending_shows += [show]
+
                 except exceptions.MultipleShowObjectsException:
                     continue
 
@@ -2420,7 +2429,7 @@ class HomeAddShows(Home):
             else:
                 t.blacklist = False
 
-        except (traktException, traktAuthException, traktServerBusy) as e:
+        except traktException as e:
             logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
 
         return t.respond()
@@ -2488,7 +2497,7 @@ class HomeAddShows(Home):
         # done adding show
         return self.redirect('/home/')
 
-    def addNewShow(self, whichSeries=None, indexerLang="en", rootDir=None, defaultStatus=None,
+    def addNewShow(self, whichSeries=None, indexerLang=None, rootDir=None, defaultStatus=None,
                    anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None,
                    fullShowPath=None, other_shows=None, skipShow=None, providedIndexer=None, anime=None,
                    scene=None):
@@ -2497,6 +2506,9 @@ class HomeAddShows(Home):
         provided then it forwards back to newShow, if not it goes to /home.
         """
 
+        if indexerLang is None:
+            indexerLang = sickbeard.INDEXER_DEFAULT_LANGUAGE
+
         # grab our list of other dirs if given
         if not other_shows:
             other_shows = []
@@ -2535,7 +2547,8 @@ class HomeAddShows(Home):
 
             indexer = int(series_pieces[1])
             indexer_id = int(series_pieces[3])
-            show_name = series_pieces[4]
+            # Show name was sent in UTF-8 in the form
+            show_name = series_pieces[4].decode('utf-8')
         else:
             # if no indexer was provided use the default indexer set in General settings
             if not providedIndexer:
@@ -3285,7 +3298,7 @@ class Manage(Home, WebRoot):
                     sickbeard.showQueueScheduler.action.updateShow(showObj, True)
                     updates.append(showObj.name)
                 except exceptions.CantUpdateException, e:
-                    errors.append("Unable to update show " + showObj.name + ": " + ex(e))
+                    errors.append("Unable to update show: {0}".format(str(e)))
 
             # don't bother refreshing shows that were updated anyway
             if curShowID in toRefresh and curShowID not in toUpdate:
@@ -3614,22 +3627,37 @@ class ConfigGeneral(Config):
 
     def saveGeneral(self, log_dir=None, log_nr = 5, log_size = 1048576, web_port=None, web_log=None, encryption_version=None, web_ipv6=None,
                     update_shows_on_start=None, update_shows_on_snatch=None, trash_remove_show=None, trash_rotate_logs=None, update_frequency=None,
-                    launch_browser=None, showupdate_hour=3, web_username=None,
+                    indexerDefaultLang='en', launch_browser=None, showupdate_hour=3, web_username=None,
                     api_key=None, indexer_default=None, timezone_display=None, cpu_preset=None,
                     web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None,
                     handle_reverse_proxy=None, sort_article=None, auto_update=None, notify_on_update=None,
                     proxy_setting=None, proxy_indexers=None, anon_redirect=None, git_path=None, git_remote=None,
-                    calendar_unprotected=None, no_restart=None,
+                    calendar_unprotected=None, debug=None, no_restart=None,
                     display_filesize=None, fuzzy_dating=None, trim_zero=None, date_preset=None, date_preset_na=None, time_preset=None,
-                    indexer_timeout=None, play_videos=None, download_url=None, rootDir=None, theme_name=None,
+                    indexer_timeout=None, download_url=None, rootDir=None, theme_name=None,
                     git_reset=None, git_username=None, git_password=None, git_autoissues=None):
 
         results = []
 
         # Misc
-        sickbeard.PLAY_VIDEOS = config.checkbox_to_value(play_videos)
         sickbeard.DOWNLOAD_URL = download_url
+        sickbeard.INDEXER_DEFAULT_LANGUAGE = indexerDefaultLang
         sickbeard.LAUNCH_BROWSER = config.checkbox_to_value(launch_browser)
+        if sickbeard.SHOWUPDATE_HOUR != config.to_int(showupdate_hour):
+            sickbeard.showUpdateScheduler.stop.set()
+            logger.log(u"Waiting for the SHOWUPDATER thread to exit so we can set new start hour")
+            try:
+                sickbeard.showUpdateScheduler.join(10) # Wait 10 sec for the thread to exit
+            except:
+                pass
+            if  sickbeard.showUpdateScheduler.isAlive():
+                logger.log(u"Unable to stop SHOWUPDATER thread, the new configuration will be applied after a restart", logger.WARNING)
+            else:
+                logger.log(u"Starting SHOWUPDATER thread with the new start hour: " + str(config.to_int(showupdate_hour)))
+                sickbeard.showUpdateScheduler = scheduler.Scheduler(showUpdater.ShowUpdater(),
+                                              cycleTime=datetime.timedelta(hours=1),
+                                              threadName="SHOWUPDATER",
+                                              start_time=datetime.time(hour=config.to_int(showupdate_hour)))            
         sickbeard.SHOWUPDATE_HOUR = config.to_int(showupdate_hour)
         config.change_VERSION_NOTIFY(config.checkbox_to_value(version_notify))
         sickbeard.AUTO_UPDATE = config.checkbox_to_value(auto_update)
@@ -3656,12 +3684,16 @@ class ConfigGeneral(Config):
         sickbeard.GIT_REMOTE = git_remote
         sickbeard.CALENDAR_UNPROTECTED = config.checkbox_to_value(calendar_unprotected)
         sickbeard.NO_RESTART = config.checkbox_to_value(no_restart)
+        sickbeard.DEBUG = config.checkbox_to_value(debug)
         # sickbeard.LOG_DIR is set in config.change_LOG_DIR()
 
         sickbeard.WEB_PORT = config.to_int(web_port)
         sickbeard.WEB_IPV6 = config.checkbox_to_value(web_ipv6)
         # sickbeard.WEB_LOG is set in config.change_LOG_DIR()
-        sickbeard.ENCRYPTION_VERSION = config.checkbox_to_value(encryption_version)
+        if config.checkbox_to_value(encryption_version) == 1:
+            sickbeard.ENCRYPTION_VERSION = 2
+        else:
+            sickbeard.ENCRYPTION_VERSION = 0
         sickbeard.WEB_USERNAME = web_username
         sickbeard.WEB_PASSWORD = web_password
 
@@ -4597,6 +4629,7 @@ class ConfigNotifications(Config):
                           trakt_remove_watchlist=None, trakt_sync_watchlist=None, trakt_method_add=None,
                           trakt_start_paused=None, trakt_use_recommended=None, trakt_sync=None,
                           trakt_default_indexer=None, trakt_remove_serieslist=None, trakt_disable_ssl_verify=None, trakt_timeout=None, trakt_blacklist_name=None,
+                          trakt_use_rolling_download=None, trakt_rolling_num_ep=None, trakt_rolling_add_paused=None, trakt_rolling_frequency=None, trakt_rolling_default_watched_status=None, 
                           use_synologynotifier=None, synologynotifier_notify_onsnatch=None,
                           synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
                           use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None,
@@ -4721,6 +4754,11 @@ class ConfigNotifications(Config):
         sickbeard.TRAKT_DISABLE_SSL_VERIFY = config.checkbox_to_value(trakt_disable_ssl_verify)
         sickbeard.TRAKT_TIMEOUT = int(trakt_timeout)
         sickbeard.TRAKT_BLACKLIST_NAME = trakt_blacklist_name
+        sickbeard.TRAKT_USE_ROLLING_DOWNLOAD = config.checkbox_to_value(trakt_use_rolling_download)
+        sickbeard.TRAKT_ROLLING_NUM_EP = int(trakt_rolling_num_ep)
+        sickbeard.TRAKT_ROLLING_ADD_PAUSED = config.checkbox_to_value(trakt_rolling_add_paused)
+        sickbeard.TRAKT_ROLLING_FREQUENCY = int(trakt_rolling_frequency)
+        sickbeard.TRAKT_ROLLING_DEFAULT_WATCHED_STATUS = int(trakt_rolling_default_watched_status)
 
         if sickbeard.USE_TRAKT:
             sickbeard.traktCheckerScheduler.silent = False
@@ -4794,7 +4832,7 @@ class ConfigSubtitles(Config):
 
     def saveSubtitles(self, use_subtitles=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None,
                       service_order=None, subtitles_history=None, subtitles_finder_frequency=None,
-                      subtitles_multi=None):
+                      subtitles_multi=None, embedded_subtitles_all=None):
 
         results = []
 
@@ -4820,6 +4858,7 @@ class ConfigSubtitles(Config):
             subtitles_languages.replace(' ', '').split(','))] if subtitles_languages != '' else ''
         sickbeard.SUBTITLES_DIR = subtitles_dir
         sickbeard.SUBTITLES_HISTORY = config.checkbox_to_value(subtitles_history)
+        sickbeard.EMBEDDED_SUBTITLES_ALL = config.checkbox_to_value(embedded_subtitles_all)
         sickbeard.SUBTITLES_FINDER_FREQUENCY = config.to_int(subtitles_finder_frequency, default=1)
         sickbeard.SUBTITLES_MULTI = config.checkbox_to_value(subtitles_multi)
 
@@ -4930,6 +4969,8 @@ class ErrorLogs(WebRoot):
                 if match:
                     level = match.group(7)
                     logName = match.group(8)
+                    if not sickbeard.DEBUG and (level == 'DEBUG' or level == 'DB'):
+                        continue
                     if level not in logger.reverseNames:
                         lastLine = False
                         continue
diff --git a/tests/config_tests.py b/tests/config_tests.py
index 4b1ffc82516272651d8efb2b69a6b571aabf2b3c..42a8fecdd5b6536401d2287333c56bf108aba7dd 100644
--- a/tests/config_tests.py
+++ b/tests/config_tests.py
@@ -1,21 +1,21 @@
-import unittest
-
-import sys
-import os.path
-sys.path.append(os.path.abspath('..'))
-
-from sickbeard import config
-
-
-class QualityTests(unittest.TestCase):
-
-    def test_clean_url(self):
-        self.assertEqual(config.clean_url("https://subdomain.domain.tld/endpoint"), "https://subdomain.domain.tld/endpoint")
-        self.assertEqual(config.clean_url("google.com/xml.rpc"), "http://google.com/xml.rpc")
-        self.assertEqual(config.clean_url("google.com"), "http://google.com/")
-        self.assertEqual(config.clean_url("http://www.example.com/folder/"), "http://www.example.com/folder/")
-        self.assertEqual(config.clean_url("scgi:///home/user/.config/path/socket"), "scgi:///home/user/.config/path/socket")
-
-if __name__ == '__main__':
-    suite = unittest.TestLoader().loadTestsFromTestCase(QualityTests)
-    unittest.TextTestRunner(verbosity=2).run(suite)
+import unittest
+
+import sys
+import os.path
+sys.path.append(os.path.abspath('..'))
+
+from sickbeard import config
+
+
+class QualityTests(unittest.TestCase):
+
+    def test_clean_url(self):
+        self.assertEqual(config.clean_url("https://subdomain.domain.tld/endpoint"), "https://subdomain.domain.tld/endpoint")
+        self.assertEqual(config.clean_url("google.com/xml.rpc"), "http://google.com/xml.rpc")
+        self.assertEqual(config.clean_url("google.com"), "http://google.com/")
+        self.assertEqual(config.clean_url("http://www.example.com/folder/"), "http://www.example.com/folder/")
+        self.assertEqual(config.clean_url("scgi:///home/user/.config/path/socket"), "scgi:///home/user/.config/path/socket")
+
+if __name__ == '__main__':
+    suite = unittest.TestLoader().loadTestsFromTestCase(QualityTests)
+    unittest.TextTestRunner(verbosity=2).run(suite)