diff --git a/sickbeard/providers/newpct.py b/sickbeard/providers/newpct.py
index ad1dd5412b1d4da7079fc73b8e25ad349aef0284..218935cf3c3a168a2250b72125aab2b077a5ddd4 100644
--- a/sickbeard/providers/newpct.py
+++ b/sickbeard/providers/newpct.py
@@ -1,3 +1,5 @@
+# coding=utf-8
+
 # Author: CristianBB
 # Greetings to Mr. Pine-apple
 #
@@ -13,18 +15,19 @@
 # SickRage is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
+# GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
 
 import traceback
-from six.moves import urllib
+import re
+import urllib
 
+from sickbeard import helpers
 from sickbeard import logger
 from sickbeard import tvcache
 from sickbeard.providers import generic
-from sickbeard.common import USER_AGENT
 from sickbeard.bs4_parser import BS4Parser
 
 
@@ -36,6 +39,8 @@ class newpctProvider(generic.TorrentProvider):
         self.supportsBacklog = True
         self.onlyspasearch = None
         self.cache = newpctCache(self)
+        self.minseed = None
+        self.minleech = None
 
         self.urls = {
             'base_url': 'http://www.newpct.com',
@@ -43,7 +48,6 @@ class newpctProvider(generic.TorrentProvider):
         }
 
         self.url = self.urls['base_url']
-        self.headers.update({'User-Agent': USER_AGENT})
 
         """
         Search query:
@@ -71,6 +75,7 @@ class newpctProvider(generic.TorrentProvider):
             'q': ''
         }
 
+   
     def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
 
         results = []
@@ -87,9 +92,12 @@ class newpctProvider(generic.TorrentProvider):
             logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
 
             for search_string in search_strings[mode]:
+                if mode is not 'RSS':
+                    logger.log(u"Search string: %s " % search_string, logger.DEBUG)
+                    
                 self.search_params.update({'q': search_string.strip()})
 
-                logger.log(u"Search URL: %s" % self.urls['search'] + '?' + urllib.parse.urlencode(self.search_params), logger.DEBUG)
+                logger.log(u"Search URL: %s" % self.urls['search'] + '?' + urllib.urlencode(self.search_params), logger.DEBUG)
                 data = self.getURL(self.urls['search'], post_data=self.search_params, timeout=30)
                 if not data:
                     continue
@@ -103,36 +111,84 @@ class newpctProvider(generic.TorrentProvider):
                             continue
 
                         torrent_table = torrent_tbody.findAll('tr')
-                        num_results = len(torrent_table) - 1
-
-                        iteration = 0
+                        #last item isn't a result
+                        torrent_table.pop()
+                        
                         for row in torrent_table:
                             try:
-                                if iteration < num_results:
-                                    torrent_size = row.findAll('td')[2]
-                                    torrent_row = row.findAll('a')[1]
-
-                                    download_url = torrent_row.get('href')
-                                    title_raw = torrent_row.get('title')
-                                    size = self._convertSize(torrent_size.text)
-
-                                    title = self._processTitle(title_raw)
-
-                                    item = title, download_url, size
-                                    logger.log(u"Found result: %s " % title, logger.DEBUG)
-
-                                    items[mode].append(item)
-                                    iteration += 1
-
+                                torrent_size = row.findAll('td')[2]
+                                torrent_row = row.findAll('a')[0]
+
+                                download_url = torrent_row.get('href')
+                                size = self._convertSize(torrent_size.text)
+                                title_raw = torrent_row.get('title')
+                                title = self._processTitle(title_raw)
+                                    
+                                # FIXME
+                                seeders = 1
+                                leechers = 0
+                                                                            
                             except (AttributeError, TypeError):
                                 continue
+                                
+                            if not all([title, download_url]):
+                                continue
+                            
+                            # Filter unseeded torrent
+                            if seeders < self.minseed or leechers < self.minleech:
+                                if mode is not 'RSS':
+                                    logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
+                                continue
 
+                            item = title, download_url, size, seeders, leechers
+                            if mode is not 'RSS':
+                                logger.log(u"Found result: %s " % title, logger.DEBUG)
+                                
+                            items[mode].append(item)
+                                                            
                 except Exception:
                     logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.WARNING)
 
+            # For each search mode sort all the items by seeders if available
+            items[mode].sort(key=lambda tup: tup[3], reverse=True)
+            
             results += items[mode]
 
         return results
+    
+    def downloadResult(self, result):
+        """
+        Save the result to disk.
+        """
+
+        # check for auth
+        if not self._doLogin():
+            return False
+
+        urls, filename = self._makeURL(result)
+
+        for url in urls:
+            # Search results don't return torrent files directly, it returns show sheets so we must parse showSheet to access torrent.
+            data = self.getURL(url)
+            url_torrent = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()
+
+            if url_torrent.startswith('http'):
+                self.headers.update({'Referer': '/'.join(url_torrent.split('/')[:3]) + '/'})
+
+            logger.log(u"Downloading a result from " + self.name + " at " + url)
+
+            if helpers.download_file(url_torrent, filename, session=self.session, headers=self.headers):
+                if self._verify_download(filename):
+                    logger.log(u"Saved result to " + filename, logger.INFO)
+                    return True
+                else:
+                    logger.log(u"Could not download %s" % url, logger.WARNING)
+                    helpers.remove_file_failed(filename)
+
+        if len(urls):
+            logger.log(u"Failed to download any results", logger.WARNING)
+            
+        return False
 
     @staticmethod
     def _convertSize(size):
@@ -148,9 +204,11 @@ class newpctProvider(generic.TorrentProvider):
             size = size * 1024**4
         return int(size)
 
+    
     def _processTitle(self, title):
 
-        title = title.replace('Descargar ', '')
+        # Remove "Mas informacion sobre " literal from title
+        title = title[22:]
 
         # Quality
         title = title.replace('[HDTV]', '[720p HDTV x264]')
@@ -170,9 +228,8 @@ class newpctProvider(generic.TorrentProvider):
         title = title.replace('[MicroHD 1080p]', '[1080p BlueRay x264]')
 
         return title
-
-
-
+    
+    
 class newpctCache(tvcache.TVCache):
     def __init__(self, provider_obj):