Private GIT

Skip to content
Snippets Groups Projects
Commit 462beb63 authored by miigotu's avatar miigotu
Browse files

Remove animeNZB (Site gone for over a month)

Rewrite parts of omgwtfnzb's and tokyotoshokan to work with new search strings
Remove old search string generators in show_name_helpers
Fix several bugs with tokyotoshokan
parent 70ca0ef5
No related branches found
No related tags found
No related merge requests found
......@@ -22,16 +22,15 @@ from os import sys
from random import shuffle
import sickbeard
from sickbeard import logger
from sickbeard.providers import btn, newznab, rsstorrent, womble, thepiratebay, torrentleech, kat, iptorrents, torrentz, \
omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, hounddawgs, speedcd, nyaatorrents, animenzb, bluetigers, cpasbien, fnt, xthor, torrentbytes, \
omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, hounddawgs, speedcd, nyaatorrents, bluetigers, cpasbien, fnt, xthor, torrentbytes, \
freshontv, morethantv, bitsoup, t411, tokyotoshokan, shazbat, rarbg, alpharatio, tntvillage, binsearch, torrentproject, extratorrent, \
scenetime, btdigg, transmitthenet, tvchaosuk, bitcannon, pretome, gftracker, hdspace, newpct, elitetorrent, bitsnoop, danishbits, hd4free, limetorrents
__all__ = [
'womble', 'btn', 'thepiratebay', 'kat', 'torrentleech', 'scc', 'hdtorrents',
'torrentday', 'hdbits', 'hounddawgs', 'iptorrents', 'omgwtfnzbs',
'speedcd', 'nyaatorrents', 'animenzb', 'torrentbytes', 'freshontv',
'speedcd', 'nyaatorrents', 'torrentbytes', 'freshontv',
'morethantv', 'bitsoup', 't411', 'tokyotoshokan', 'alpharatio',
'shazbat', 'rarbg', 'tntvillage', 'binsearch', 'bluetigers', 'cpasbien',
'fnt', 'xthor', 'scenetime', 'btdigg', 'transmitthenet', 'tvchaosuk',
......
......@@ -85,8 +85,8 @@ class AlphaRatioProvider(TorrentProvider): # pylint: disable=too-many-instance-
}
for mode in search_strings:
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
......
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib
import datetime
from sickbeard import classes
from sickbeard import show_name_helpers
from sickbeard import logger
from sickbeard import tvcache
from sickrage.providers.nzb.NZBProvider import NZBProvider
class animenzb(NZBProvider):
def __init__(self):
NZBProvider.__init__(self, "AnimeNZB")
self.supports_backlog = False
self.public = True
self.supports_absolute_numbering = True
self.anime_only = True
self.urls = {'base_url': 'http://animenzb.com/'}
self.url = self.urls['base_url']
self.cache = animenzbCache(self)
def _get_season_search_strings(self, ep_obj):
return [x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
def _get_episode_search_strings(self, ep_obj, add_string=''):
return [x for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
def search(self, search_string, age=0, ep_obj=None):
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
if self.show and not self.show.is_anime:
return []
params = {
"cat": "anime",
"q": search_string.encode('utf-8'),
"max": "100"
}
searchURL = self.url + "rss?" + urllib.urlencode(params)
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
results = []
if 'entries' in self.cache.getRSSFeed(searchURL):
for curItem in self.cache.getRSSFeed(searchURL)['entries']:
(title, url) = self._get_title_and_url(curItem)
if title and url:
results.append(curItem)
logger.log(u"Found result: %s " % title, logger.DEBUG)
# For each search mode sort all the items by seeders if available if available
results.sort(key=lambda tup: tup[0], reverse=True)
return results
def find_propers(self, search_date=None):
results = []
for item in self.search("v2|v3|v4|v5"):
(title, url) = self._get_title_and_url(item)
if 'published_parsed' in item and item['published_parsed']:
result_date = item.published_parsed
if result_date:
result_date = datetime.datetime(*result_date[0:6])
else:
continue
if not search_date or result_date > search_date:
search_result = classes.Proper(title, url, result_date, self.show)
results.append(search_result)
return results
class animenzbCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll animenzb every 20 minutes max
self.minTime = 20
def _getRSSData(self):
params = {
"cat": "anime".encode('utf-8'),
"max": "100".encode('utf-8')
}
rss_url = self.provider.url + 'rss?' + urllib.urlencode(params)
return self.getRSSFeed(rss_url)
provider = animenzb()
......@@ -24,7 +24,6 @@ import sickbeard
from sickbeard import tvcache
from sickbeard import classes
from sickbeard import logger
from sickbeard import show_name_helpers
from sickrage.helper.common import try_int
from sickrage.providers.nzb.NZBProvider import NZBProvider
......@@ -37,13 +36,17 @@ class OmgwtfnzbsProvider(NZBProvider):
self.api_key = None
self.cache = OmgwtfnzbsCache(self)
self.urls = {'base_url': 'https://omgwtfnzbs.org/'}
self.url = self.urls['base_url']
self.url = 'https://omgwtfnzbs.org/'
self.urls = {
'rss': 'https://rss.omgwtfnzbs.org/rss-download.php',
'api': 'https://api.omgwtfnzbs.org/json/'
}
def _check_auth(self):
if not self.username or not self.api_key:
logger.log(u"Invalid api key. Check your settings", logger.WARNING)
return False
return True
......@@ -56,15 +59,13 @@ class OmgwtfnzbsProvider(NZBProvider):
# provider doesn't return xml on error
return True
else:
parsedJSON = parsed_data
if 'notice' in parsedJSON:
description_text = parsedJSON.get('notice')
if 'notice' in parsed_data:
description_text = parsed_data.get('notice')
if 'information is incorrect' in parsedJSON.get('notice'):
if 'information is incorrect' in parsed_data.get('notice'):
logger.log(u"Invalid api key. Check your settings", logger.WARNING)
elif '0 results matched your terms' in parsedJSON.get('notice'):
elif '0 results matched your terms' in parsed_data.get('notice'):
return True
else:
......@@ -73,51 +74,54 @@ class OmgwtfnzbsProvider(NZBProvider):
return True
def _get_season_search_strings(self, ep_obj):
return [x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
def _get_episode_search_strings(self, ep_obj, add_string=''):
return [x for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
def _get_title_and_url(self, item):
return item['release'], item['getnzb']
def _get_size(self, item):
return try_int(item['sizebytes'], -1)
def search(self, search, age=0, ep_obj=None):
self._check_auth()
def search(self, search_strings, age=0, ep_obj=None):
results = []
if not self._check_auth():
return results
params = {'user': self.username,
search_params = {
'user': self.username,
'api': self.api_key,
'eng': 1,
'catid': '19,20', # SD,HD
'retention': sickbeard.USENET_RETENTION,
'search': search}
}
if age or not search_params['retention']:
search_params['retention'] = age
if age or not params['retention']:
params['retention'] = age
for mode in search_strings:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
searchURL = 'https://api.omgwtfnzbs.org/json/?' + urllib.urlencode(params)
logger.log(u"Search string: %s" % params, logger.DEBUG)
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
search_params['search'] = search_string
parsedJSON = self.get_url(searchURL, json=True)
if not parsedJSON:
return []
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
if self._checkAuthFromData(parsedJSON, is_XML=False):
results = []
logger.log(u"Search URL: %s" % self.urls['api'] + '?' + urllib.urlencode(search_params), logger.DEBUG)
data = self.get_url(self.urls['api'], params=search_params, json=True)
if not data:
continue
if self._checkAuthFromData(data, is_XML=False):
continue
for item in parsedJSON:
for item in data:
if 'release' in item and 'getnzb' in item:
logger.log(u"Found result: %s " % item.get('title'), logger.DEBUG)
results.append(item)
items.append(item)
return results
results += items
return []
return results
def find_propers(self, search_date=None):
search_terms = ['.PROPER.', '.REPACK.']
......@@ -126,7 +130,6 @@ class OmgwtfnzbsProvider(NZBProvider):
for term in search_terms:
for item in self.search(term, age=4):
if 'usenetage' in item:
title, url = self._get_title_and_url(item)
try:
result_date = datetime.fromtimestamp(int(item['usenetage']))
......@@ -165,12 +168,14 @@ class OmgwtfnzbsCache(tvcache.TVCache):
return title, url
def _getRSSData(self):
params = {'user': provider.username,
search_params = {
'user': provider.username,
'api': provider.api_key,
'eng': 1,
'catid': '19,20'} # SD,HD
'catid': '19,20' # SD,HD
}
rss_url = 'https://rss.omgwtfnzbs.org/rss-download.php?' + urllib.urlencode(params)
rss_url = self.provider.urls['rss'] + '?' + urllib.urlencode(search_params)
logger.log(u"Cache update URL: %s" % rss_url, logger.DEBUG)
......
......@@ -17,17 +17,18 @@
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import urllib
import traceback
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import show_name_helpers
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import try_int, convert_size
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class TokyoToshokanProvider(TorrentProvider):
class TokyoToshokanProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "TokyoToshokan")
......@@ -37,76 +38,87 @@ class TokyoToshokanProvider(TorrentProvider):
self.anime_only = True
self.ratio = None
self.cache = TokyoToshokanCache(self)
self.minseed = None
self.minleech = None
self.urls = {'base_url': 'http://tokyotosho.info/'}
self.url = self.urls['base_url']
self.url = 'http://tokyotosho.info/'
self.urls = {
'search': self.url + 'search.php',
'rss': self.url + 'rss.php'
}
self.cache = TokyoToshokanCache(self)
def seed_ratio(self):
return self.ratio
def _get_season_search_strings(self, ep_obj):
return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
def _get_episode_search_strings(self, ep_obj, add_string=''):
return [x.replace('.', ' ') for x in show_name_helpers.makeSceneSearchString(self.show, ep_obj)]
def search(self, search_string, age=0, ep_obj=None):
# FIXME ADD MODE
if self.show and not self.show.is_anime:
return []
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
results = []
if not self.show or not self.show.is_anime:
return results
for mode in search_strings:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
params = {
search_params = {
"terms": search_string.encode('utf-8'),
"type": 1, # get anime types
}
searchURL = self.url + 'search.php?' + urllib.urlencode(params)
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.get_url(searchURL)
logger.log(u"Search URL: %s" % self.urls['search'] + '?' + urllib.urlencode(search_params), logger.DEBUG)
data = self.get_url(self.urls['search'], params=search_params)
if not data:
return []
continue
results = []
try:
with BS4Parser(data, 'html5lib') as soup:
torrent_table = soup.find('table', attrs={'class': 'listing'})
torrent_table = soup.find('table', class_='listing')
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
if torrent_rows:
if torrent_rows[0].find('td', attrs={'class': 'centertext'}):
a = 1
else:
a = 0
for top, bottom in zip(torrent_rows[a::2], torrent_rows[a::2]):
title = top.find('td', attrs={'class': 'desc-top'}).text
title.lstrip()
download_url = top.find('td', attrs={'class': 'desc-top'}).find('a')['href']
# FIXME
size = -1
seeders = 1
leechers = 0
# Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
a = 1 if len(torrent_rows[0].find_all('td')) < 2 else 0
for top, bot in zip(torrent_rows[a::2], torrent_rows[a+1::2]):
try:
desc_top = top.find('td', class_='desc-top')
title = desc_top.get_text(strip=True)
download_url = desc_top.find('a')['href']
desc_bottom = bot.find('td', class_='desc-bot').get_text(strip=True)
size = convert_size(desc_bottom.split('|')[1].strip('Size: ')) or -1
stats = bot.find('td', class_='stats').get_text(strip=True)
sl = re.match(r'S:(?P<seeders>\d+)L:(?P<leechers>\d+)C:(?:\d+)ID:(?:\d+)', stats.replace(' ', ''))
seeders = try_int(sl.group('seeders')) if sl else 0
leechers = try_int(sl.group('leechers')) if sl else 0
except StandardError:
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
# if seeders < self.minseed or leechers < self.minleech:
# if mode != 'RSS':
# logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
# continue
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
results.append(item)
items.append(item)
except Exception as e:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
# FIXME SORTING
return results
......@@ -114,19 +126,22 @@ class TokyoToshokanCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll NyaaTorrents every 15 minutes max
# only poll TokyoToshokan every 15 minutes max
self.minTime = 15
def _getRSSData(self):
params = {
"filter": '1',
}
url = self.provider.url + 'rss.php?' + urllib.urlencode(params)
logger.log(u"Cache update URL: %s" % url, logger.DEBUG)
return self.getRSSFeed(url)
# def _getRSSData(self):
# params = {
# "filter": '1',
# }
#
# url = self.provider.urls['rss'] + '?' + urllib.urlencode(params)
#
# logger.log(u"Cache update URL: %s" % url, logger.DEBUG)
#
# return self.getRSSFeed(url)
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider.search(search_strings)}
provider = TokyoToshokanProvider()
......@@ -21,17 +21,13 @@ import fnmatch
import os
import re
import datetime
from functools import partial
import sickbeard
from sickbeard import common
from sickbeard.helpers import sanitizeSceneName
from sickbeard.scene_exceptions import get_scene_exceptions
from sickbeard import logger
from sickbeard import db
from sickrage.helper.encoding import ek, ss
from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickrage.helper.encoding import ek
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
resultFilters = [
"sub(bed|ed|pack|s)",
......@@ -56,7 +52,7 @@ def containsAtLeastOneWord(name, words):
"""
if isinstance(words, basestring):
words = words.split(',')
items = [(re.compile('(^|[\W_])%s($|[\W_])' % re.escape(word.strip()), re.I), word.strip()) for word in words]
items = [(re.compile(r'(^|[\W_])%s($|[\W_])' % re.escape(word.strip()), re.I), word.strip()) for word in words]
for regexp, word in items:
if regexp.search(name):
return word
......@@ -104,195 +100,6 @@ def filterBadReleases(name, parse=True):
return True
def sceneToNormalShowNames(name):
"""
Takes a show name from a scene dirname and converts it to a more "human-readable" format.
name: The show name to convert
Returns: a list of all the possible "normal" names
"""
if not name:
return []
name_list = [name]
# use both and and &
new_name = re.sub('(?i)([\. ])and([\. ])', '\\1&\\2', name, re.I)
if new_name not in name_list:
name_list.append(new_name)
results = []
for cur_name in name_list:
# add brackets around the year
results.append(re.sub('(\D)(\d{4})$', '\\1(\\2)', cur_name))
# add brackets around the country
country_match_str = '|'.join(common.countryList.values())
results.append(re.sub('(?i)([. _-])(' + country_match_str + ')$', '\\1(\\2)', cur_name))
results += name_list
return list(set(results))
def makeSceneShowSearchStrings(show, season=-1, anime=False):
showNames = allPossibleShowNames(show, season=season)
# scenify the names
if anime:
sanitizeSceneNameAnime = partial(sanitizeSceneName, anime=True)
return map(sanitizeSceneNameAnime, showNames)
else:
return map(sanitizeSceneName, showNames)
def makeSceneSeasonSearchString(show, ep_obj, extraSearchType=None):
if show.air_by_date or show.sports:
numseasons = 0
# the search string for air by date shows is just
seasonStrings = [str(ep_obj.airdate).split('-')[0]]
elif show.is_anime:
numseasons = 0
seasonEps = show.getAllEpisodes(ep_obj.season)
# get show qualities
anyQualities, bestQualities = common.Quality.splitQuality(show.quality)
# compile a list of all the episode numbers we need in this 'season'
seasonStrings = []
for episode in seasonEps:
# get quality of the episode
curCompositeStatus = episode.status
curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
if bestQualities:
highestBestQuality = max(bestQualities)
else:
highestBestQuality = 0
# if we need a better one then add it to the list of episodes to fetch
if (curStatus in (
common.DOWNLOADED,
common.SNATCHED) and curQuality < highestBestQuality) or curStatus == common.WANTED:
ab_number = episode.scene_absolute_number
if ab_number > 0:
seasonStrings.append("%02d" % ab_number)
else:
myDB = db.DBConnection()
numseasonsSQlResult = myDB.select(
"SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0",
[show.indexerid])
numseasons = int(numseasonsSQlResult[0][0])
seasonStrings = ["S%02d" % int(ep_obj.scene_season)]
showNames = set(makeSceneShowSearchStrings(show, ep_obj.scene_season))
toReturn = []
# search each show name
for curShow in showNames:
# most providers all work the same way
if not extraSearchType:
# if there's only one season then we can just use the show name straight up
if numseasons == 1:
toReturn.append(curShow)
# for providers that don't allow multiple searches in one request we only search for Sxx style stuff
else:
for cur_season in seasonStrings:
if ep_obj.show.is_anime:
if ep_obj.show.release_groups is not None:
if len(show.release_groups.whitelist) > 0:
for keyword in show.release_groups.whitelist:
toReturn.append(keyword + '.' + curShow + "." + cur_season)
else:
toReturn.append(curShow + "." + cur_season)
return toReturn
def makeSceneSearchString(show, ep_obj):
myDB = db.DBConnection()
numseasonsSQlResult = myDB.select(
"SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0",
[show.indexerid])
numseasons = int(numseasonsSQlResult[0][0])
# see if we should use dates instead of episodes
if (show.air_by_date or show.sports) and ep_obj.airdate != datetime.date.fromordinal(1):
epStrings = [str(ep_obj.airdate)]
elif show.is_anime:
epStrings = ["%02i" % int(ep_obj.scene_absolute_number if ep_obj.scene_absolute_number > 0 else ep_obj.scene_episode)]
else:
epStrings = ["S%02iE%02i" % (int(ep_obj.scene_season), int(ep_obj.scene_episode)),
"%ix%02i" % (int(ep_obj.scene_season), int(ep_obj.scene_episode))]
# for single-season shows just search for the show name -- if total ep count (exclude s0) is less than 11
# due to the amount of qualities and releases, it is easy to go over the 50 result limit on rss feeds otherwise
if numseasons == 1 and not ep_obj.show.is_anime:
epStrings = ['']
showNames = set(makeSceneShowSearchStrings(show, ep_obj.scene_season))
toReturn = []
for curShow in showNames:
for curEpString in epStrings:
if ep_obj.show.is_anime:
if ep_obj.show.release_groups is not None:
if len(ep_obj.show.release_groups.whitelist) > 0:
for keyword in ep_obj.show.release_groups.whitelist:
toReturn.append(keyword + '.' + curShow + '.' + curEpString)
elif len(ep_obj.show.release_groups.blacklist) == 0:
# If we have neither whitelist or blacklist we just append what we have
toReturn.append(curShow + '.' + curEpString)
else:
toReturn.append(curShow + '.' + curEpString)
return toReturn
def isGoodResult(name, show, log=True, season=-1):
"""
Use an automatically-created regex to make sure the result actually is the show it claims to be
"""
all_show_names = allPossibleShowNames(show, season=season)
showNames = map(sanitizeSceneName, all_show_names) + all_show_names
showNames += map(ss, all_show_names)
for curName in set(showNames):
if not show.is_anime:
escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(curName))
if show.startyear:
escaped_name += "(?:\W+" + str(show.startyear) + ")?"
curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+|(?:\d{1,3}.+\d{1,}[a-zA-Z]{2}\W+[a-zA-Z]{3,}\W+\d{4}.+))'
else:
escaped_name = re.sub('\\\\[\\s.-]', '[\W_]+', re.escape(curName))
# FIXME: find a "automatically-created" regex for anime releases # test at http://regexr.com?2uon3
curRegex = '^((\[.*?\])|(\d+[\.-]))*[ _\.]*' + escaped_name + '(([ ._-]+\d+)|([ ._-]+s\d{2})).*'
if log:
logger.log(u"Checking if show " + name + " matches " + curRegex, logger.DEBUG)
match = re.search(curRegex, name, re.I)
if match:
logger.log(u"Matched " + curRegex + " to " + name, logger.DEBUG)
return True
if log:
logger.log(
u"Provider gave result " + name + " but that doesn't seem like a valid result for " + show.name + " so I'm ignoring it")
return False
def allPossibleShowNames(show, season=-1):
"""
Figures out every possible variation of the name for a particular show. Includes TVDB name, TVRage name,
......@@ -354,7 +161,8 @@ def determineReleaseName(dir_name=None, nzb_name=None):
reg_expr = re.compile(fnmatch.translate(search), re.IGNORECASE)
files = [file_name for file_name in ek(os.listdir, dir_name) if
ek(os.path.isfile, ek(os.path.join, dir_name, file_name))]
results = filter(reg_expr.search, files)
results = [f for f in files if reg_expr.search(f)]
if len(results) == 1:
found_file = ek(os.path.basename, results[0])
......
......@@ -21,21 +21,6 @@ class SceneTests(test.SickbeardTestDBCase):
"""
Test Scene
"""
def _test_scene_to_norm_show_name(self, name, expected):
"""
Test scene to normal show names
:param name:
:param expected:
:return:
"""
result = show_name_helpers.sceneToNormalShowNames(name)
self.assertTrue(len(set(expected).intersection(set(result))) == len(expected))
dot_result = show_name_helpers.sceneToNormalShowNames(name.replace(' ', '.'))
dot_expected = [x.replace(' ', '.') for x in expected]
self.assertTrue(len(set(dot_expected).intersection(set(dot_result))) == len(dot_expected))
def _test_all_possible_show_names(self, name, indexerid=0, expected=None):
"""
Test all possible show names
......@@ -63,51 +48,6 @@ class SceneTests(test.SickbeardTestDBCase):
result = show_name_helpers.filterBadReleases(name)
self.assertEqual(result, expected)
def _test_is_good_name(self, name, show):
"""
Test if name is good
:param name:
:param show:
:return:
"""
self.assertTrue(show_name_helpers.isGoodResult(name, show))
def test_is_good_name(self):
"""
Perform good name tests
"""
list_of_cases = [('Show.Name.S01E02.Test-Test', 'Show/Name'),
('Show.Name.S01E02.Test-Test', 'Show. Name'),
('Show.Name.S01E02.Test-Test', 'Show- Name'),
('Show.Name.Part.IV.Test-Test', 'Show Name'),
('Show.Name.S01.Test-Test', 'Show Name'),
('Show.Name.E02.Test-Test', 'Show: Name'),
('Show Name Season 2 Test', 'Show: Name'), ]
for test_case in list_of_cases:
scene_name, show_name = test_case
show = Show(1, 0)
show.name = show_name
self._test_is_good_name(scene_name, show)
def test_scene_to_norm_show_names(self):
"""
Test scene to normal show names
"""
self._test_scene_to_norm_show_name('Show Name 2010', ['Show Name 2010', 'Show Name (2010)'])
self._test_scene_to_norm_show_name('Show Name US', ['Show Name US', 'Show Name (US)'])
self._test_scene_to_norm_show_name('Show Name AU', ['Show Name AU', 'Show Name (AU)'])
self._test_scene_to_norm_show_name('Show Name CA', ['Show Name CA', 'Show Name (CA)'])
self._test_scene_to_norm_show_name('Show and Name', ['Show and Name', 'Show & Name'])
self._test_scene_to_norm_show_name('Show and Name 2010', ['Show and Name 2010', 'Show & Name 2010', 'Show and Name (2010)', 'Show & Name (2010)'])
self._test_scene_to_norm_show_name('show name us', ['show name us', 'show name (us)'])
self._test_scene_to_norm_show_name('Show And Name', ['Show And Name', 'Show & Name'])
# failure cases
self._test_scene_to_norm_show_name('Show Name 90210', ['Show Name 90210'])
self._test_scene_to_norm_show_name('Show Name YA', ['Show Name YA'])
def test_all_possible_show_names(self):
"""
Test all possible show names
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment