Private GIT

Skip to content
Snippets Groups Projects
Commit c4249de4 authored by miigotu's avatar miigotu
Browse files

Merge pull request #724 from SickRage/providers

Fix/Rewrite danishbits and add RSS capabitlity
parents 4dab672e 2e93ca7f
No related branches found
No related tags found
No related merge requests found
# coding=utf-8 # coding=utf-8
# Author: seedboy # Author: Dustyn Gibson <miigotu@gmail.com>
# URL: https://github.com/seedboy # URL: https://sickrage.github.io
# #
# This file is part of SickRage. # This file is part of SickRage.
# #
...@@ -17,13 +17,12 @@ ...@@ -17,13 +17,12 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>. # along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import traceback from urllib import urlencode
import urllib from requests.utils import dict_from_cookiejar
import time
from sickbeard import logger from sickbeard import logger
from sickbeard import tvcache from sickbeard import tvcache
from sickrage.helper.common import convert_size from sickrage.helper.common import try_int, convert_size
from sickrage.providers.torrent.TorrentProvider import TorrentProvider from sickrage.providers.torrent.TorrentProvider import TorrentProvider
from sickbeard.bs4_parser import BS4Parser from sickbeard.bs4_parser import BS4Parser
...@@ -41,134 +40,121 @@ class DanishbitsProvider(TorrentProvider): # pylint: disable=too-many-instance- ...@@ -41,134 +40,121 @@ class DanishbitsProvider(TorrentProvider): # pylint: disable=too-many-instance-
self.cache = DanishbitsCache(self) self.cache = DanishbitsCache(self)
self.urls = {'base_url': 'https://danishbits.org/', self.url = 'https://danishbits.org/'
'search': 'https://danishbits.org/torrents.php?action=newbrowse&search=%s%s', self.urls = {
'login_page': 'https://danishbits.org/login.php'} 'login': self.url + 'login.php',
'search': self.url + 'torrents.php',
self.url = self.urls['base_url'] }
self.categories = '&group=3'
self.last_login_check = None
self.login_opener = None
self.minseed = 0 self.minseed = 0
self.minleech = 0 self.minleech = 0
self.freeleech = True self.freeleech = True
@staticmethod
def loginSuccess(output):
return output and "<title>Login :: Danishbits.org</title>" not in output
def login(self): def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
now = time.time()
if self.login_opener and self.last_login_check < (now - 3600):
try:
output = self.get_url(self.urls['test'])
if self.loginSuccess(output):
self.last_login_check = now
return True
else:
self.login_opener = None
except Exception:
self.login_opener = None
if self.login_opener:
return True return True
try:
data = self.get_url(self.urls['login_page'])
if not data:
return False
login_params = { login_params = {
'username': self.username, 'langlang': '',
'password': self.password, 'username': self.username.encode('utf-8'),
'password': self.password.encode('utf-8'),
'keeplogged': 1,
'login': 'Login'
} }
output = self.get_url(self.urls['login_page'], post_data=login_params)
if self.loginSuccess(output):
self.last_login_check = now
self.login_opener = self.session
return True
error = 'unknown' response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
except Exception: if not response:
error = traceback.format_exc() logger.log(u"Unable to connect to provider", logger.WARNING)
self.login_opener = None self.session.cookies.clear()
return False
self.login_opener = None if '<title>Login :: Danishbits.org</title>' in response:
logger.log(u"Failed to login: %s" % error, logger.ERROR) logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
self.session.cookies.clear()
return False return False
def search(self, search_params, age=0, ep_obj=None): # pylint: disable=too-many-branches,too-many-locals return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches,too-many-locals
results = [] results = []
if not self.login(): if not self.login():
return results return results
for mode in search_params: search_params = {
'action': 'newbrowse',
'group': 3,
'search': '',
}
for mode in search_strings:
items = [] items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG) logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_params[mode]: for search_string in search_strings[mode]:
if mode == 'RSS':
continue
if mode != 'RSS': if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG) logger.log(u"Search string: %s " % search_string, logger.DEBUG)
searchURL = self.urls['search'] % (urllib.quote(search_string.encode('utf-8')), self.categories) search_params['search'] = search_string
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.get_url(searchURL) search_url = "%s?%s" % (self.urls['search'], urlencode(search_params))
logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
# returns top 15 results by default, expandable in user profile to 100
data = self.get_url(search_url)
if not data: if not data:
continue continue
try: with BS4Parser(data, 'html5lib') as html:
with BS4Parser(data, "html5lib") as html: torrent_table = html.find('table', class_='torrent_table')
# Collecting entries torrent_rows = torrent_table.find_all('tr') if torrent_table else []
entries = html.find_all('tr', attrs={'class': 'torrent'})
# Xirg STANDARD TORRENTS # Continue only if at least one Release is found
# Continue only if one Release is found if len(torrent_rows) < 2:
if not entries:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG) logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue continue
for result in entries: def process_column_header(td):
result = ''
# try: if td.img:
title = result.find('div', attrs={'class': 'croptorrenttext'}).find('b').text result = td.img.get('title')
download_url = self.urls['base_url'] + result.find('span', attrs={'class': 'right'}).find('a')['href'] if not result:
seeders = int(result.find_all('td')[6].text) result = td.get_text(strip=True)
leechers = int(result.find_all('td')[7].text) return result.encode('utf-8')
torrent_size = result.find_all('td')[2].text
size = convert_size(torrent_size) or -1 # Literal: Navn, Størrelse, Kommentarer, Tilføjet, Snatches, Seeders, Leechers
freeleech = result.find('span', class_='freeleech') # Translation: Name, Size, Comments, Added, Snatches, Seeders, Leechers
# except (AttributeError, TypeError, KeyError): labels = [process_column_header(label) for label in torrent_rows[0].find_all('td')]
# logger.log(u"attrErr: {0}, tErr: {1}, kErr: {2}".format(AttributeError, TypeError, KeyError), logger.DEBUG)
# continue
if self.freeleech and not freeleech:
continue
for result in torrent_rows[1:]:
try:
title = result.find(class_='croptorrenttext').get_text(strip=True)
download_url = self.url + result.find(title="Direkte download link")['href']
if not all([title, download_url]): if not all([title, download_url]):
continue continue
# Filter unseeded torrent cells = result.find_all('td')
seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))
if seeders < self.minseed or leechers < self.minleech: if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS': if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG) logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue continue
freeleech = result.find(class_='freeleech')
if self.freeleech and not freeleech:
continue
torrent_size = cells[labels.index('Størrelse')].contents[0]
size = convert_size(torrent_size) or -1
item = title, download_url, size, seeders, leechers item = title, download_url, size, seeders, leechers
if mode != 'RSS': if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG) logger.log(u"Found result: %s " % title, logger.DEBUG)
items.append(item) items.append(item)
except Exception: except StandardError:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR) continue
# For each search mode sort all the items by seeders if available # For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True) items.sort(key=lambda tup: tup[3], reverse=True)
...@@ -190,8 +176,8 @@ class DanishbitsCache(tvcache.TVCache): ...@@ -190,8 +176,8 @@ class DanishbitsCache(tvcache.TVCache):
self.minTime = 10 self.minTime = 10
def _getRSSData(self): def _getRSSData(self):
search_params = {'RSS': ['']} search_strings = {'RSS': ['']}
return {'entries': self.provider.search(search_params)} return {'entries': self.provider.search(search_strings)}
provider = DanishbitsProvider() provider = DanishbitsProvider()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment