diff --git a/gui/slick/images/providers/torrentz.png b/gui/slick/images/providers/torrentz.png
new file mode 100644
index 0000000000000000000000000000000000000000..8d3493e7b6ec4e610489d2853fcd0e795a5c2e4a
Binary files /dev/null and b/gui/slick/images/providers/torrentz.png differ
diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py
index 845f8288d54c6db742e425f4a4c3ec0b5f87e719..243fa898309fe94730aeb7454e9c2ec13dcc2194 100755
--- a/sickbeard/__init__.py
+++ b/sickbeard/__init__.py
@@ -38,7 +38,7 @@ from sickbeard import providers, metadata, config, webserveInit
 from sickbeard.providers.generic import GenericProvider
 from providers import btn, newznab, womble, thepiratebay, torrentleech, kat, iptorrents, \
     omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, hounddawgs, nextgen, speedcd, nyaatorrents, animenzb, torrentbytes, animezb, \
-    freshontv, morethantv, bitsoup, t411, tokyotoshokan, shazbat, rarbg, alpharatio, tntvillage, binsearch, scenetime, btdigg
+    freshontv, morethantv, bitsoup, t411, tokyotoshokan, shazbat, rarbg, alpharatio, tntvillage, binsearch, scenetime, btdigg, torrentz
 from sickbeard.config import CheckSection, check_setting_int, check_setting_str, check_setting_float, ConfigMigrator, \
     naming_ep_type
 from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
diff --git a/sickbeard/providers/__init__.py b/sickbeard/providers/__init__.py
index 5e39b4f616baf1d6a96bd8530a5b6426bd2519ad..9a1f777e0e28e18b3004775a297ee8d9bb51248f 100644
--- a/sickbeard/providers/__init__.py
+++ b/sickbeard/providers/__init__.py
@@ -46,6 +46,7 @@ __all__ = ['womble',
            'binsearch',
            'scenetime',
            'btdigg',
+           'torrentz',
 ]
 
 import sickbeard
diff --git a/sickbeard/providers/scc.py b/sickbeard/providers/scc.py
index b3cf3600a70fe247b27b44d3d04dc1a5f072ed51..fecd13f7ad4d3765594ab01083f2d537222f91f8 100644
--- a/sickbeard/providers/scc.py
+++ b/sickbeard/providers/scc.py
@@ -111,7 +111,7 @@ class SCCProvider(generic.TorrentProvider):
     def _get_season_search_strings(self, ep_obj):
 
         search_strings = []
-        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+        for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
             if ep_obj.show.air_by_date or ep_obj.show.sports:
                 sp_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
             elif ep_obj.show.anime:
@@ -130,7 +130,7 @@ class SCCProvider(generic.TorrentProvider):
         if not ep_obj:
             return []
 
-        for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
+        for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
             if self.show.air_by_date:
                 ep_string = sanitizeSceneName(show_name) + ' ' + str(ep_obj.airdate).replace('-', '.')
             elif self.show.sports:
diff --git a/sickbeard/providers/torrentz.py b/sickbeard/providers/torrentz.py
new file mode 100644
index 0000000000000000000000000000000000000000..7494e389933211a4968f6160c8986e8d776c95e6
--- /dev/null
+++ b/sickbeard/providers/torrentz.py
@@ -0,0 +1,178 @@
+# Author: Dustyn Gibson <miigotu@gmail.com>
+# URL: https://github.com/junalmeida/Sick-Beard
+# Ported by :Matigonkas
+#
+# This file is part of SickRage.
+#
+# SickRage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SickRage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
+
+import urllib
+
+import re
+import time
+import requests
+from requests.exceptions import Timeout, ConnectTimeout
+
+import generic
+
+import sickbeard
+from sickbeard.common import *
+from sickbeard import logger
+from sickbeard import tvcache
+from sickbeard.show_name_helpers import allPossibleShowNames
+from sickbeard.helpers import sanitizeSceneName
+from bs4 import BeautifulSoup
+
+class TORRENTZProvider(generic.TorrentProvider):
+
+    def __init__(self):
+
+        generic.TorrentProvider.__init__(self, "Torrentz")
+
+        self.supportsBacklog = True
+
+        self.confirmed = False
+        self.cache = TORRENTZCache(self)
+
+        self.urls = {'verified': 'https://torrentz.eu/feed_verified?p=%d',
+                         'feed': 'https://torrentz.eu/feed?p=%d',
+                         'base': 'https://torrentz.eu/',
+                    }
+        self.session = requests.Session()
+
+    def isEnabled(self):
+        return self.enabled
+
+    def imageName(self):
+        return 'torrentz.png'
+
+    def _get_season_search_strings(self, epObj, season=None):
+        if not epObj or season:
+            return []
+
+        strings = []
+        for title in set(allPossibleShowNames(epObj.show)):
+            if epObj.show.air_by_date or epObj.show.sports:
+                season = str(epObj.airdate).split('-')[0]
+            elif epObj.show.anime:
+                season = '%d' % epObj.scene_absolute_number
+            else:
+                season = 'S%02d' % epObj.season
+
+            strings.append(u'%s %s' % (sanitizeSceneName(title), season))
+
+        return strings
+
+    def _get_episode_search_strings(self, epObj, add_string=''):
+        if not epObj:
+            return []
+
+        strings = []
+        for title in set(allPossibleShowNames(epObj.show)):
+            if epObj.show.air_by_date or epObj.show.sports:
+                season = str(epObj.airdate).replace('-', ' ')
+            elif epObj.show.anime:
+                episode = '%d' % epObj.scene_absolute_number
+            else:
+                episode = 'S%02dE%02d' % (epObj.season, epObj.episode)
+
+            if add_string:
+                episode += ' ' + add_string
+
+            strings.append([u'%s %s' % (sanitizeSceneName(title), episode)])
+
+        return strings
+
+    def _split_description(self, description):
+        match = re.findall(r'[0-9]+', description)
+        return (int(match[0]) *1024 * 1024, match[1], match[2])
+
+    def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
+        items = []
+        for search_string in search_strings:
+            if self.confirmed:
+                search = self.urls['verified']
+            else:
+                search = self.urls['feed']
+            if 'RSS' not in search_string:
+                search +=  'q=' + urllib.quote_plus(search_string)
+
+            for p in range(0,2):
+                try:
+                    time.sleep(cpu_presets[sickbeard.CPU_PRESET])
+                    page = self.getURL(search % p, timeout=10)
+                except ConnectTimeout, Timeout:
+                    logger.log('Seems to be down right now!')
+                    continue
+
+                if not page or not page.startswith("<?xml"):
+                    logger.log('Wrong data returned from: ' + search % p, logger.WARNING)
+                    continue
+
+                data = BeautifulSoup(page)
+                for item in data.find_all('item'):
+                    if 'tv' not in item.category.text:
+                        continue
+
+                    title = item.title.text
+                    url = item.guid.text.split('/')[-1]
+                    dsize, seeds, peers = self._split_description(item.description.text)
+
+                    # Have to get the details page to get the correct title
+                    try:
+                        time.sleep(cpu_presets[sickbeard.CPU_PRESET])
+                        details = self.getURL(self.urls['base'] + url, timeout=10)
+                    except ConnectTimeout, Timeout:
+                        logger.log('Seems to be down right now!')
+                        continue
+
+                    if details and details.startswith('<!DOCTYPE html>'):
+                        dpage = BeautifulSoup(details)
+                        title = dpage.find('dt').a['href'].split('/')[-2]
+                        del dpage
+
+                    logger.log('Adding item: ' + str(title, url, dsize, seeds, peers), logger.DEBUG)
+                    items.append((title, url, dsize, seeds, peers))
+        return items
+
+    def _get_size(self, item):
+        title, url, size, seeders, leechers = item
+        return size
+
+    def _get_title_and_url(self, item):
+
+        title, url, size, seeders, leechers = item
+
+        if title:
+            title = self._clean_title_from_provider(title)
+
+        if url:
+            url = url.replace('&amp;', '&')
+
+        return (title, url)
+
+class TORRENTZCache(tvcache.TVCache):
+
+    def __init__(self, provider):
+
+        tvcache.TVCache.__init__(self, provider)
+
+        # only poll every 15 minutes max
+        self.minTime = 15
+
+    def _getRSSData(self):
+         params = {'RSS': ['rss']}
+         return {'entries': self.provider._doSearch(params)}
+
+provider = TORRENTZProvider()