Private GIT

Skip to content
Snippets Groups Projects
Select Git revision
  • b0cdd5788e894c90ecc31089947eaf225b447037
  • master default protected
  • fix_nzb_cat
  • develop
  • guessit2-minimal
  • ssl_warning
  • UHD-qualities
  • fix_providers8
  • !
  • tvvault
  • provider_alpharatio
  • v5.1.1
  • v5.1
  • v5.0.3
  • v5.0.2
  • v5.0.1
  • v5.0
  • v4.2.1.07
  • v4.2.1.06
  • v4.2.1.05
  • v4.2.1.04
  • v4.2.1.03
  • v4.2.1.02
  • v4.2.1.01
  • v4.2.1.0
  • v4.2.0.6
  • v4.2.0.5
  • v4.2.0.4
  • v4.2.0.3
  • v4.2.0.2
  • v4.2.0.1
31 results

setup.py

Blame
  • binsearch.py 3.75 KiB
    # Author: moparisthebest <admin@moparisthebest.com>
    #
    # This file is part of Sick Beard.
    #
    # Sick Beard is free software: you can redistribute it and/or modify
    # it under the terms of the GNU General Public License as published by
    # the Free Software Foundation, either version 3 of the License, or
    # (at your option) any later version.
    #
    # Sick Beard is distributed in the hope that it will be useful,
    # but WITHOUT ANY WARRANTY; without even the implied warranty of
    # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    # GNU General Public License for more details.
    #
    # You should have received a copy of the GNU General Public License
    # along with Sick Beard.  If not, see <http://www.gnu.org/licenses/>.
    
    import urllib
    import re
    
    from sickbeard import logger
    from sickbeard import tvcache
    from sickrage.providers.NZBProvider import NZBProvider
    
    
    class BinSearchProvider(NZBProvider):
        def __init__(self):
            NZBProvider.__init__(self, "BinSearch")
    
            self.public = True
            self.cache = BinSearchCache(self)
            self.urls = {'base_url': 'https://www.binsearch.info/'}
            self.url = self.urls['base_url']
            self.supports_backlog = False
    
    
    class BinSearchCache(tvcache.TVCache):
        def __init__(self, provider_obj):
            tvcache.TVCache.__init__(self, provider_obj)
            # only poll Binsearch every 30 minutes max
            self.minTime = 30
    
            # compile and save our regular expressions
    
            # this pulls the title from the URL in the description
            self.descTitleStart = re.compile(r'^.*https?://www\.binsearch\.info/.b=')
            self.descTitleEnd = re.compile('&amp;.*$')
    
            # these clean up the horrible mess of a title if the above fail
            self.titleCleaners = [
                re.compile(r'.?yEnc.?\(\d+/\d+\)$'),
                re.compile(r' \[\d+/\d+\] '),
            ]
    
        def _get_title_and_url(self, item):
            """
            Retrieves the title and URL data from the item XML node
    
            item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
    
            Returns: A tuple containing two strings representing title and URL respectively
            """
    
            title = item.get('description')
            if title:
                title = u'' + title
                if self.descTitleStart.match(title):
                    title = self.descTitleStart.sub('', title)
                    title = self.descTitleEnd.sub('', title)
                    title = title.replace('+', '.')
                else:
                    # just use the entire title, looks hard/impossible to parse
                    title = item.get('title')
                    if title:
                        for titleCleaner in self.titleCleaners:
                            title = titleCleaner.sub('', title)
    
            url = item.get('link')
            if url:
                url = url.replace('&amp;', '&')
    
            return (title, url)
    
        def updateCache(self):
            # check if we should update
            if not self.shouldUpdate():
                return
    
            # clear cache
            self._clearCache()
    
            # set updated
            self.setLastUpdate()
    
            cl = []
            for group in ['alt.binaries.hdtv', 'alt.binaries.hdtv.x264', 'alt.binaries.tv', 'alt.binaries.tvseries']:
                url = self.provider.url + 'rss.php?'
                urlArgs = {'max': 1000, 'g': group}
    
                url += urllib.urlencode(urlArgs)
    
                logger.log(u"Cache update URL: %s " % url, logger.DEBUG)
    
                for item in self.getRSSFeed(url)['entries'] or []:
                    ci = self._parseItem(item)
                    if ci:
                        cl.append(ci)
    
            if len(cl) > 0:
                myDB = self._getDB()
                myDB.mass_action(cl)
    
        def _checkAuth(self, data):
            return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
    
    provider = BinSearchProvider()