Private GIT

Skip to content
Snippets Groups Projects
Commit ac952f02 authored by miigotu's avatar miigotu
Browse files

Merge pull request #83 from SickRage/CristianBB-newpct-fix

CristianBB newpct fix
parents e7e8a9fa be7ac53f
No related branches found
No related tags found
No related merge requests found
# coding=utf-8
# Author: CristianBB # Author: CristianBB
# Greetings to Mr. Pine-apple # Greetings to Mr. Pine-apple
# #
...@@ -19,12 +20,13 @@ ...@@ -19,12 +20,13 @@
# along with SickRage. If not, see <http://www.gnu.org/licenses/>. # along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import traceback import traceback
import re
from six.moves import urllib from six.moves import urllib
from sickbeard import helpers
from sickbeard import logger from sickbeard import logger
from sickbeard import tvcache from sickbeard import tvcache
from sickbeard.providers import generic from sickbeard.providers import generic
from sickbeard.common import USER_AGENT
from sickbeard.bs4_parser import BS4Parser from sickbeard.bs4_parser import BS4Parser
...@@ -37,13 +39,16 @@ class newpctProvider(generic.TorrentProvider): ...@@ -37,13 +39,16 @@ class newpctProvider(generic.TorrentProvider):
self.onlyspasearch = None self.onlyspasearch = None
self.cache = newpctCache(self) self.cache = newpctCache(self)
# Unsupported
# self.minseed = None
# self.minleech = None
self.urls = { self.urls = {
'base_url': 'http://www.newpct.com', 'base_url': 'http://www.newpct.com',
'search': 'http://www.newpct.com/buscar-descargas/' 'search': 'http://www.newpct.com/buscar-descargas/'
} }
self.url = self.urls['base_url'] self.url = self.urls['base_url']
self.headers.update({'User-Agent': USER_AGENT})
""" """
Search query: Search query:
...@@ -71,14 +76,14 @@ class newpctProvider(generic.TorrentProvider): ...@@ -71,14 +76,14 @@ class newpctProvider(generic.TorrentProvider):
'q': '' 'q': ''
} }
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None): def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = [] results = []
items = {'Season': [], 'Episode': [], 'RSS': []} items = {'Season': [], 'Episode': [], 'RSS': []}
lang_info = '' if not epObj or not epObj.show else epObj.show.lang
# Only search if user conditions are true # Only search if user conditions are true
lang_info = '' if not epObj or not epObj.show else epObj.show.lang
if self.onlyspasearch and lang_info != 'es': if self.onlyspasearch and lang_info != 'es':
logger.log(u"Show info is not spanish, skipping provider search", logger.DEBUG) logger.log(u"Show info is not spanish, skipping provider search", logger.DEBUG)
return results return results
...@@ -87,6 +92,9 @@ class newpctProvider(generic.TorrentProvider): ...@@ -87,6 +92,9 @@ class newpctProvider(generic.TorrentProvider):
logger.log(u"Search Mode: %s" % mode, logger.DEBUG) logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]: for search_string in search_strings[mode]:
if mode is not 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
self.search_params.update({'q': search_string.strip()}) self.search_params.update({'q': search_string.strip()})
logger.log(u"Search URL: %s" % self.urls['search'] + '?' + urllib.parse.urlencode(self.search_params), logger.DEBUG) logger.log(u"Search URL: %s" % self.urls['search'] + '?' + urllib.parse.urlencode(self.search_params), logger.DEBUG)
...@@ -98,42 +106,90 @@ class newpctProvider(generic.TorrentProvider): ...@@ -98,42 +106,90 @@ class newpctProvider(generic.TorrentProvider):
with BS4Parser(data, features=["html5lib", "permissive"]) as html: with BS4Parser(data, features=["html5lib", "permissive"]) as html:
torrent_tbody = html.find('tbody') torrent_tbody = html.find('tbody')
if len(torrent_tbody) < 1: if not len(torrent_tbody):
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG) logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue continue
torrent_table = torrent_tbody.findAll('tr') torrent_table = torrent_tbody.findAll('tr')
num_results = len(torrent_table) - 1 if not len(torrent_table):
logger.log(u"Torrent table does not have any rows", logger.DEBUG)
continue
iteration = 0 for row in torrent_table[:-1]:
for row in torrent_table:
try: try:
if iteration < num_results:
torrent_size = row.findAll('td')[2] torrent_size = row.findAll('td')[2]
torrent_row = row.findAll('a')[1] torrent_row = row.findAll('a')[0]
download_url = torrent_row.get('href') download_url = torrent_row.get('href', '')
title_raw = torrent_row.get('title')
size = self._convertSize(torrent_size.text) size = self._convertSize(torrent_size.text)
title = self._processTitle(torrent_row.get('title', ''))
# FIXME: Provider does not provide seeders/leechers
seeders = 1
leechers = 0
title = self._processTitle(title_raw) except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
item = title, download_url, size # Filter unseeded torrent (Unsupported)
# if seeders < self.minseed or leechers < self.minleech:
# if mode is not 'RSS':
# logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
# continue
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG) logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item) items[mode].append(item)
iteration += 1
except (AttributeError, TypeError):
continue
except Exception: except Exception:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.WARNING) logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.WARNING)
# For each search mode sort all the items by seeders if available (Unsupported)
# items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode] results += items[mode]
return results return results
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
for url in urls:
# Search results don't return torrent files directly, it returns show sheets so we must parse showSheet to access torrent.
data = self.getURL(url)
url_torrent = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()
if url_torrent.startswith('http'):
self.headers.update({'Referer': '/'.join(url_torrent.split('/')[:3]) + '/'})
logger.log(u"Downloading a result from " + self.name + " at " + url)
if helpers.download_file(url_torrent, filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers.remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
@staticmethod @staticmethod
def _convertSize(size): def _convertSize(size):
size, modifier = size.split(' ') size, modifier = size.split(' ')
...@@ -148,9 +204,12 @@ class newpctProvider(generic.TorrentProvider): ...@@ -148,9 +204,12 @@ class newpctProvider(generic.TorrentProvider):
size = size * 1024**4 size = size * 1024**4
return int(size) return int(size)
def _processTitle(self, title):
title = title.replace('Descargar ', '') @staticmethod
def _processTitle(title):
# Remove "Mas informacion sobre " literal from title
title = title[22:]
# Quality # Quality
title = title.replace('[HDTV]', '[720p HDTV x264]') title = title.replace('[HDTV]', '[720p HDTV x264]')
...@@ -169,8 +228,7 @@ class newpctProvider(generic.TorrentProvider): ...@@ -169,8 +228,7 @@ class newpctProvider(generic.TorrentProvider):
title = title.replace('[BluRay MicroHD]', '[1080p BlueRay x264]') title = title.replace('[BluRay MicroHD]', '[1080p BlueRay x264]')
title = title.replace('[MicroHD 1080p]', '[1080p BlueRay x264]') title = title.replace('[MicroHD 1080p]', '[1080p BlueRay x264]')
return title return title.strip()
class newpctCache(tvcache.TVCache): class newpctCache(tvcache.TVCache):
...@@ -178,8 +236,11 @@ class newpctCache(tvcache.TVCache): ...@@ -178,8 +236,11 @@ class newpctCache(tvcache.TVCache):
tvcache.TVCache.__init__(self, provider_obj) tvcache.TVCache.__init__(self, provider_obj)
self.minTime = 30 # set this 0 to suppress log line, since we aren't updating it anyways
self.minTime = 0
def _getRSSData(self):
return {'entries': []}
provider = newpctProvider() provider = newpctProvider()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment