diff --git a/sickbeard/providers/torrentleech.py b/sickbeard/providers/torrentleech.py
index b8b5dc6266a4341a7281206db8c374338b2e4f87..f9cae0d1d025c1828dca936d1dfe429b1bf79535 100644
--- a/sickbeard/providers/torrentleech.py
+++ b/sickbeard/providers/torrentleech.py
@@ -1,5 +1,6 @@
 # coding=utf-8
 # Author: Dustyn Gibson <miigotu@gmail.com>
+# Contributor: pluzun <pluzun59@gmail.com>
 #
 # URL: https://sickrage.github.io
 #
@@ -50,7 +51,8 @@ class TorrentLeechProvider(TorrentProvider):  # pylint: disable=too-many-instanc
         self.url = "https://www.torrentleech.org"
         self.urls = {
             "login": urljoin(self.url, "user/account/login/"),
-            "search": urljoin(self.url, "torrents/browse"),
+            "search": urljoin(self.url, "torrents/browse/list/"),
+            "download": urljoin(self.url, "download/"),
         }
 
         # Proper Strings
@@ -87,17 +89,6 @@ class TorrentLeechProvider(TorrentProvider):  # pylint: disable=too-many-instanc
         # TV, Episodes, BoxSets, Episodes HD, Animation, Anime, Cartoons
         # 2,26,27,32,7,34,35
 
-        # Units
-        units = ["B", "KB", "MB", "GB", "TB", "PB"]
-
-        def process_column_header(td):
-            result = ""
-            if td.a:
-                result = td.a.get("title")
-            if not result:
-                result = td.get_text(strip=True)
-            return result
-
         for mode in search_strings:
             items = []
             logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
@@ -115,57 +106,50 @@ class TorrentLeechProvider(TorrentProvider):  # pylint: disable=too-many-instanc
                 else:
                     categories = ["2", "26", "27", "32", "7", "34", "35"]
 
-                search_params = {
-                    "categories": ",".join(categories),
-                    "query": search_string
-                }
+                # Craft the query URL
+                categories_url = 'categories/{categories}/'.format(categories=",".join(categories))
+                query_url = 'query/{query_string}'.format(query_string=search_string)
+                params_url = urljoin(categories_url, query_url)
+                search_url = urljoin(self.urls['search'], params_url)
 
-                data = self.get_url(self.urls["search"], params=search_params, returns="text")
+                data = self.get_url(search_url, returns='json')
                 if not data:
                     logger.log("No data returned from provider", logger.DEBUG)
                     continue
 
-                with BS4Parser(data, "html5lib") as html:
-                    torrent_table = html.find("table", id="torrenttable")
-                    torrent_rows = torrent_table("tr") if torrent_table else []
+                torrent_list = data['torrentList']
 
-                    # Continue only if at least one Release is found
-                    if len(torrent_rows) < 2:
-                        logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
-                        continue
+                if len(torrent_list) < 1:
+                    logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
+                    continue
+
+                for torrent in torrent_list:
+                    try:
+                        title = torrent['name']
+                        download_url = urljoin(self.urls['download'], '{id}/{filename}'.format(id=torrent['fid'], filename=torrent['filename']))
 
-                    labels = [process_column_header(label) for label in torrent_rows[0]("th")]
+                        seeders = torrent['seeders']
+                        leechers = torrent['leechers']
 
-                    # Skip column headers
-                    for result in torrent_rows[1:]:
-                        try:
-                            title = result.find("td", class_="name").find("a").get_text(strip=True)
-                            download_url = urljoin(self.url, result.find("td", class_="quickdownload").find("a")["href"])
-                            if not all([title, download_url]):
+                        if seeders < self.minseed or leechers < self.minleech:
+                            if mode != "RSS":
+                                logger.log("Discarding torrent because it doesn't meet the"
+                                            " minimum seeders or leechers: {0} (S:{1} L:{2})".format
+                                            (title, seeders, leechers), logger.DEBUG)
                                 continue
 
-                            seeders = try_int(result.find("td", class_="seeders").get_text(strip=True))
-                            leechers = try_int(result.find("td", class_="leechers").get_text(strip=True))
+                        size = torrent['size']
 
-                            # Filter unseeded torrent
-                            if seeders < self.minseed or leechers < self.minleech:
-                                if mode != "RSS":
-                                    logger.log("Discarding torrent because it doesn't meet the"
-                                               " minimum seeders or leechers: {0} (S:{1} L:{2})".format
-                                               (title, seeders, leechers), logger.DEBUG)
-                                continue
+                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
 
-                            torrent_size = result("td")[labels.index("Size")].get_text()
-                            size = convert_size(torrent_size, units=units) or -1
+                        if mode != "RSS":
+                            logger.log("Found result: {0} with {1} seeders and {2} leechers".format
+                                       (title, seeders, leechers), logger.DEBUG)
 
-                            item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
-                            if mode != "RSS":
-                                logger.log("Found result: {0} with {1} seeders and {2} leechers".format
-                                           (title, seeders, leechers), logger.DEBUG)
+                        items.append(item)
+                    except StandardError:
+                        continue
 
-                            items.append(item)
-                        except StandardError:
-                            continue
 
             # For each search mode sort all the items by seeders if available
             items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)