diff --git a/autoProcessTV/lib/requests/__init__.py b/autoProcessTV/lib/requests/__init__.py
index bba190029e71b1f143e7de71e849051cb2ff37a7..446500bfd4fc15eb24144a37d565df34cb1f34ab 100644
--- a/autoProcessTV/lib/requests/__init__.py
+++ b/autoProcessTV/lib/requests/__init__.py
@@ -13,7 +13,7 @@ Requests is an HTTP library, written in Python, for human beings. Basic GET
 usage:
 
    >>> import requests
-   >>> r = requests.get('http://python.org')
+   >>> r = requests.get('https://www.python.org')
    >>> r.status_code
    200
    >>> 'Python is a programming language' in r.content
@@ -22,7 +22,7 @@ usage:
 ... or POST:
 
    >>> payload = dict(key1='value1', key2='value2')
-   >>> r = requests.post("http://httpbin.org/post", data=payload)
+   >>> r = requests.post('http://httpbin.org/post', data=payload)
    >>> print(r.text)
    {
      ...
@@ -36,17 +36,17 @@ usage:
 The other HTTP methods are supported - see `requests.api`. Full documentation
 is at <http://python-requests.org>.
 
-:copyright: (c) 2014 by Kenneth Reitz.
+:copyright: (c) 2015 by Kenneth Reitz.
 :license: Apache 2.0, see LICENSE for more details.
 
 """
 
 __title__ = 'requests'
-__version__ = '2.3.0'
-__build__ = 0x020300
+__version__ = '2.6.0'
+__build__ = 0x020503
 __author__ = 'Kenneth Reitz'
 __license__ = 'Apache 2.0'
-__copyright__ = 'Copyright 2014 Kenneth Reitz'
+__copyright__ = 'Copyright 2015 Kenneth Reitz'
 
 # Attempt to enable urllib3's SNI support, if possible
 try:
diff --git a/autoProcessTV/lib/requests/adapters.py b/autoProcessTV/lib/requests/adapters.py
index 0f297ab25a639b0ba7aa433ad3993e31a0252412..02e0dd1f1d169bb57d5b3b8996b1868108d93523 100644
--- a/autoProcessTV/lib/requests/adapters.py
+++ b/autoProcessTV/lib/requests/adapters.py
@@ -9,23 +9,27 @@ and maintain connections.
 """
 
 import socket
-import copy
 
 from .models import Response
 from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
 from .packages.urllib3.response import HTTPResponse
 from .packages.urllib3.util import Timeout as TimeoutSauce
-from .compat import urlparse, basestring, urldefrag, unquote
+from .packages.urllib3.util.retry import Retry
+from .compat import urlparse, basestring
 from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
-                    except_on_missing_scheme, get_auth_from_url)
+                    prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
 from .structures import CaseInsensitiveDict
-from .packages.urllib3.exceptions import MaxRetryError
-from .packages.urllib3.exceptions import TimeoutError
-from .packages.urllib3.exceptions import SSLError as _SSLError
+from .packages.urllib3.exceptions import ConnectTimeoutError
 from .packages.urllib3.exceptions import HTTPError as _HTTPError
+from .packages.urllib3.exceptions import MaxRetryError
 from .packages.urllib3.exceptions import ProxyError as _ProxyError
+from .packages.urllib3.exceptions import ProtocolError
+from .packages.urllib3.exceptions import ReadTimeoutError
+from .packages.urllib3.exceptions import SSLError as _SSLError
+from .packages.urllib3.exceptions import ResponseError
 from .cookies import extract_cookies_to_jar
-from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
+from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
+                         ProxyError, RetryError)
 from .auth import _basic_auth_str
 
 DEFAULT_POOLBLOCK = False
@@ -57,13 +61,17 @@ class HTTPAdapter(BaseAdapter):
     :param pool_connections: The number of urllib3 connection pools to cache.
     :param pool_maxsize: The maximum number of connections to save in the pool.
     :param int max_retries: The maximum number of retries each connection
-        should attempt. Note, this applies only to failed connections and
-        timeouts, never to requests where the server returns a response.
+        should attempt. Note, this applies only to failed DNS lookups, socket
+        connections and connection timeouts, never to requests where data has
+        made it to the server. By default, Requests does not retry failed
+        connections. If you need granular control over the conditions under
+        which we retry a request, import urllib3's ``Retry`` class and pass
+        that instead.
     :param pool_block: Whether the connection pool should block for connections.
 
     Usage::
 
-      >>> import lib.requests
+      >>> import requests
       >>> s = requests.Session()
       >>> a = requests.adapters.HTTPAdapter(max_retries=3)
       >>> s.mount('http://', a)
@@ -74,7 +82,10 @@ class HTTPAdapter(BaseAdapter):
     def __init__(self, pool_connections=DEFAULT_POOLSIZE,
                  pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
                  pool_block=DEFAULT_POOLBLOCK):
-        self.max_retries = max_retries
+        if max_retries == DEFAULT_RETRIES:
+            self.max_retries = Retry(0, read=False)
+        else:
+            self.max_retries = Retry.from_int(max_retries)
         self.config = {}
         self.proxy_manager = {}
 
@@ -102,14 +113,17 @@ class HTTPAdapter(BaseAdapter):
         self.init_poolmanager(self._pool_connections, self._pool_maxsize,
                               block=self._pool_block)
 
-    def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
-        """Initializes a urllib3 PoolManager. This method should not be called
-        from user code, and is only exposed for use when subclassing the
+    def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
+        """Initializes a urllib3 PoolManager.
+
+        This method should not be called from user code, and is only
+        exposed for use when subclassing the
         :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
 
         :param connections: The number of urllib3 connection pools to cache.
         :param maxsize: The maximum number of connections to save in the pool.
         :param block: Block when no free connections are available.
+        :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
         """
         # save these values for pickling
         self._pool_connections = connections
@@ -117,7 +131,30 @@ class HTTPAdapter(BaseAdapter):
         self._pool_block = block
 
         self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
-                                       block=block)
+                                       block=block, strict=True, **pool_kwargs)
+
+    def proxy_manager_for(self, proxy, **proxy_kwargs):
+        """Return urllib3 ProxyManager for the given proxy.
+
+        This method should not be called from user code, and is only
+        exposed for use when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        :param proxy: The proxy to return a urllib3 ProxyManager for.
+        :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
+        :returns: ProxyManager
+        """
+        if not proxy in self.proxy_manager:
+            proxy_headers = self.proxy_headers(proxy)
+            self.proxy_manager[proxy] = proxy_from_url(
+                proxy,
+                proxy_headers=proxy_headers,
+                num_pools=self._pool_connections,
+                maxsize=self._pool_maxsize,
+                block=self._pool_block,
+                **proxy_kwargs)
+
+        return self.proxy_manager[proxy]
 
     def cert_verify(self, conn, url, verify, cert):
         """Verify a SSL certificate. This method should not be called from user
@@ -204,18 +241,9 @@ class HTTPAdapter(BaseAdapter):
         proxy = proxies.get(urlparse(url.lower()).scheme)
 
         if proxy:
-            except_on_missing_scheme(proxy)
-            proxy_headers = self.proxy_headers(proxy)
-
-            if not proxy in self.proxy_manager:
-                self.proxy_manager[proxy] = proxy_from_url(
-                                                proxy,
-                                                proxy_headers=proxy_headers,
-                                                num_pools=self._pool_connections,
-                                                maxsize=self._pool_maxsize,
-                                                block=self._pool_block)
-
-            conn = self.proxy_manager[proxy].connection_from_url(url)
+            proxy = prepend_scheme_if_needed(proxy, 'http')
+            proxy_manager = self.proxy_manager_for(proxy)
+            conn = proxy_manager.connection_from_url(url)
         else:
             # Only scheme should be lower case
             parsed = urlparse(url)
@@ -250,7 +278,7 @@ class HTTPAdapter(BaseAdapter):
         proxy = proxies.get(scheme)
 
         if proxy and scheme != 'https':
-            url, _ = urldefrag(request.url)
+            url = urldefragauth(request.url)
         else:
             url = request.path_url
 
@@ -297,7 +325,10 @@ class HTTPAdapter(BaseAdapter):
 
         :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
         :param stream: (optional) Whether to stream the request content.
-        :param timeout: (optional) The timeout on the request.
+        :param timeout: (optional) How long to wait for the server to send
+            data before giving up, as a float, or a (`connect timeout, read
+            timeout <user/advanced.html#timeouts>`_) tuple.
+        :type timeout: float or tuple
         :param verify: (optional) Whether to verify SSL certificates.
         :param cert: (optional) Any user-provided SSL certificate to be trusted.
         :param proxies: (optional) The proxies dictionary to apply to the request.
@@ -311,7 +342,18 @@ class HTTPAdapter(BaseAdapter):
 
         chunked = not (request.body is None or 'Content-Length' in request.headers)
 
-        timeout = TimeoutSauce(connect=timeout, read=timeout)
+        if isinstance(timeout, tuple):
+            try:
+                connect, read = timeout
+                timeout = TimeoutSauce(connect=connect, read=read)
+            except ValueError as e:
+                # this may raise a string formatting error.
+                err = ("Invalid timeout {0}. Pass a (connect, read) "
+                       "timeout tuple, or a single float to set "
+                       "both timeouts to the same value".format(timeout))
+                raise ValueError(err)
+        else:
+            timeout = TimeoutSauce(connect=timeout, read=timeout)
 
         try:
             if not chunked:
@@ -369,10 +411,16 @@ class HTTPAdapter(BaseAdapter):
                     # All is well, return the connection to the pool.
                     conn._put_conn(low_conn)
 
-        except socket.error as sockerr:
-            raise ConnectionError(sockerr, request=request)
+        except (ProtocolError, socket.error) as err:
+            raise ConnectionError(err, request=request)
 
         except MaxRetryError as e:
+            if isinstance(e.reason, ConnectTimeoutError):
+                raise ConnectTimeout(e, request=request)
+
+            if isinstance(e.reason, ResponseError):
+                raise RetryError(e, request=request)
+
             raise ConnectionError(e, request=request)
 
         except _ProxyError as e:
@@ -381,14 +429,9 @@ class HTTPAdapter(BaseAdapter):
         except (_SSLError, _HTTPError) as e:
             if isinstance(e, _SSLError):
                 raise SSLError(e, request=request)
-            elif isinstance(e, TimeoutError):
-                raise Timeout(e, request=request)
+            elif isinstance(e, ReadTimeoutError):
+                raise ReadTimeout(e, request=request)
             else:
                 raise
 
-        r = self.build_response(request, resp)
-
-        if not stream:
-            r.content
-
-        return r
\ No newline at end of file
+        return self.build_response(request, resp)
diff --git a/autoProcessTV/lib/requests/api.py b/autoProcessTV/lib/requests/api.py
index 01d853d5cae6fd270027f19407eae3d266fd38d7..98c92298eb02122e6be3aa132a9d06106aa6d657 100644
--- a/autoProcessTV/lib/requests/api.py
+++ b/autoProcessTV/lib/requests/api.py
@@ -16,22 +16,28 @@ from . import sessions
 
 def request(method, url, **kwargs):
     """Constructs and sends a :class:`Request <Request>`.
-    Returns :class:`Response <Response>` object.
 
     :param method: method for the new :class:`Request` object.
     :param url: URL for the new :class:`Request` object.
     :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
     :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
+    :param json: (optional) json data to send in the body of the :class:`Request`.
     :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
     :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
-    :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
+    :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
     :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
-    :param timeout: (optional) Float describing the timeout of the request in seconds.
+    :param timeout: (optional) How long to wait for the server to send data
+        before giving up, as a float, or a (`connect timeout, read timeout
+        <user/advanced.html#timeouts>`_) tuple.
+    :type timeout: float or tuple
     :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
+    :type allow_redirects: bool
     :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
     :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
     :param stream: (optional) if ``False``, the response content will be immediately downloaded.
     :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
 
     Usage::
 
@@ -41,14 +47,21 @@ def request(method, url, **kwargs):
     """
 
     session = sessions.Session()
-    return session.request(method=method, url=url, **kwargs)
+    response = session.request(method=method, url=url, **kwargs)
+    # By explicitly closing the session, we avoid leaving sockets open which
+    # can trigger a ResourceWarning in some cases, and look like a memory leak
+    # in others.
+    session.close()
+    return response
 
 
 def get(url, **kwargs):
-    """Sends a GET request. Returns :class:`Response` object.
+    """Sends a GET request.
 
     :param url: URL for the new :class:`Request` object.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     kwargs.setdefault('allow_redirects', True)
@@ -56,10 +69,12 @@ def get(url, **kwargs):
 
 
 def options(url, **kwargs):
-    """Sends a OPTIONS request. Returns :class:`Response` object.
+    """Sends a OPTIONS request.
 
     :param url: URL for the new :class:`Request` object.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     kwargs.setdefault('allow_redirects', True)
@@ -67,54 +82,65 @@ def options(url, **kwargs):
 
 
 def head(url, **kwargs):
-    """Sends a HEAD request. Returns :class:`Response` object.
+    """Sends a HEAD request.
 
     :param url: URL for the new :class:`Request` object.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     kwargs.setdefault('allow_redirects', False)
     return request('head', url, **kwargs)
 
 
-def post(url, data=None, **kwargs):
-    """Sends a POST request. Returns :class:`Response` object.
+def post(url, data=None, json=None, **kwargs):
+    """Sends a POST request.
 
     :param url: URL for the new :class:`Request` object.
     :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
+    :param json: (optional) json data to send in the body of the :class:`Request`.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
-    return request('post', url, data=data, **kwargs)
+    return request('post', url, data=data, json=json, **kwargs)
 
 
 def put(url, data=None, **kwargs):
-    """Sends a PUT request. Returns :class:`Response` object.
+    """Sends a PUT request.
 
     :param url: URL for the new :class:`Request` object.
     :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     return request('put', url, data=data, **kwargs)
 
 
 def patch(url, data=None, **kwargs):
-    """Sends a PATCH request. Returns :class:`Response` object.
+    """Sends a PATCH request.
 
     :param url: URL for the new :class:`Request` object.
     :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     return request('patch', url,  data=data, **kwargs)
 
 
 def delete(url, **kwargs):
-    """Sends a DELETE request. Returns :class:`Response` object.
+    """Sends a DELETE request.
 
     :param url: URL for the new :class:`Request` object.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     return request('delete', url, **kwargs)
diff --git a/autoProcessTV/lib/requests/auth.py b/autoProcessTV/lib/requests/auth.py
index 9f831b7ad03c97357bf298ec12c914d20343f45a..d1c482517667e511a16cfb0208b98a3260caf95e 100644
--- a/autoProcessTV/lib/requests/auth.py
+++ b/autoProcessTV/lib/requests/auth.py
@@ -16,7 +16,8 @@ from base64 import b64encode
 
 from .compat import urlparse, str
 from .cookies import extract_cookies_to_jar
-from .utils import parse_dict_header
+from .utils import parse_dict_header, to_native_string
+from .status_codes import codes
 
 CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
 CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
@@ -25,7 +26,11 @@ CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
 def _basic_auth_str(username, password):
     """Returns a Basic Auth string."""
 
-    return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
+    authstr = 'Basic ' + to_native_string(
+        b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
+    )
+
+    return authstr
 
 
 class AuthBase(object):
@@ -62,6 +67,7 @@ class HTTPDigestAuth(AuthBase):
         self.nonce_count = 0
         self.chal = {}
         self.pos = None
+        self.num_401_calls = 1
 
     def build_digest_header(self, method, url):
 
@@ -118,13 +124,15 @@ class HTTPDigestAuth(AuthBase):
         s += os.urandom(8)
 
         cnonce = (hashlib.sha1(s).hexdigest()[:16])
-        noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
         if _algorithm == 'MD5-SESS':
             HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
 
         if qop is None:
             respdig = KD(HA1, "%s:%s" % (nonce, HA2))
         elif qop == 'auth' or 'auth' in qop.split(','):
+            noncebit = "%s:%s:%s:%s:%s" % (
+                nonce, ncvalue, cnonce, 'auth', HA2
+                )
             respdig = KD(HA1, noncebit)
         else:
             # XXX handle auth-int.
@@ -146,6 +154,11 @@ class HTTPDigestAuth(AuthBase):
 
         return 'Digest %s' % (base)
 
+    def handle_redirect(self, r, **kwargs):
+        """Reset num_401_calls counter on redirects."""
+        if r.is_redirect:
+            self.num_401_calls = 1
+
     def handle_401(self, r, **kwargs):
         """Takes the given response and tries digest-auth, if needed."""
 
@@ -158,7 +171,7 @@ class HTTPDigestAuth(AuthBase):
 
         if 'digest' in s_auth.lower() and num_401_calls < 2:
 
-            setattr(self, 'num_401_calls', num_401_calls + 1)
+            self.num_401_calls += 1
             pat = re.compile(r'digest ', flags=re.IGNORECASE)
             self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
 
@@ -178,7 +191,7 @@ class HTTPDigestAuth(AuthBase):
 
             return _r
 
-        setattr(self, 'num_401_calls', 1)
+        self.num_401_calls = 1
         return r
 
     def __call__(self, r):
@@ -188,6 +201,11 @@ class HTTPDigestAuth(AuthBase):
         try:
             self.pos = r.body.tell()
         except AttributeError:
-            pass
+            # In the case of HTTPDigestAuth being reused and the body of
+            # the previous request was a file-like object, pos has the
+            # file position of the previous body. Ensure it's set to
+            # None.
+            self.pos = None
         r.register_hook('response', self.handle_401)
+        r.register_hook('response', self.handle_redirect)
         return r
diff --git a/autoProcessTV/lib/requests/certs.py b/autoProcessTV/lib/requests/certs.py
index bc00826191d77f9c180ab29623ea25171bb87dbd..07e64750703903bd3971aa7536f6605ae65d6759 100644
--- a/autoProcessTV/lib/requests/certs.py
+++ b/autoProcessTV/lib/requests/certs.py
@@ -11,14 +11,15 @@ If you are packaging Requests, e.g., for a Linux distribution or a managed
 environment, you can change the definition of where() to return a separately
 packaged CA bundle.
 """
-
 import os.path
 
-
-def where():
-    """Return the preferred certificate bundle."""
-    # vendored bundle inside Requests
-    return os.path.join(os.path.dirname(__file__), 'cacert.pem')
+try:
+    from certifi import where
+except ImportError:
+    def where():
+        """Return the preferred certificate bundle."""
+        # vendored bundle inside Requests
+        return os.path.join(os.path.dirname(__file__), 'cacert.pem')
 
 if __name__ == '__main__':
     print(where())
diff --git a/autoProcessTV/lib/requests/compat.py b/autoProcessTV/lib/requests/compat.py
index bdf10d6a9f9c0c073f85ca3cbdbc6c12eca610cd..70edff784951c903d89c48697ac77e08759a3098 100644
--- a/autoProcessTV/lib/requests/compat.py
+++ b/autoProcessTV/lib/requests/compat.py
@@ -21,61 +21,11 @@ is_py2 = (_ver[0] == 2)
 #: Python 3.x?
 is_py3 = (_ver[0] == 3)
 
-#: Python 3.0.x
-is_py30 = (is_py3 and _ver[1] == 0)
-
-#: Python 3.1.x
-is_py31 = (is_py3 and _ver[1] == 1)
-
-#: Python 3.2.x
-is_py32 = (is_py3 and _ver[1] == 2)
-
-#: Python 3.3.x
-is_py33 = (is_py3 and _ver[1] == 3)
-
-#: Python 3.4.x
-is_py34 = (is_py3 and _ver[1] == 4)
-
-#: Python 2.7.x
-is_py27 = (is_py2 and _ver[1] == 7)
-
-#: Python 2.6.x
-is_py26 = (is_py2 and _ver[1] == 6)
-
-#: Python 2.5.x
-is_py25 = (is_py2 and _ver[1] == 5)
-
-#: Python 2.4.x
-is_py24 = (is_py2 and _ver[1] == 4)   # I'm assuming this is not by choice.
-
-
-# ---------
-# Platforms
-# ---------
-
-
-# Syntax sugar.
-_ver = sys.version.lower()
-
-is_pypy = ('pypy' in _ver)
-is_jython = ('jython' in _ver)
-is_ironpython = ('iron' in _ver)
-
-# Assume CPython, if nothing else.
-is_cpython = not any((is_pypy, is_jython, is_ironpython))
-
-# Windows-based system.
-is_windows = 'win32' in str(sys.platform).lower()
-
-# Standard Linux 2+ system.
-is_linux = ('linux' in str(sys.platform).lower())
-is_osx = ('darwin' in str(sys.platform).lower())
-is_hpux = ('hpux' in str(sys.platform).lower())   # Complete guess.
-is_solaris = ('solar==' in str(sys.platform).lower())   # Complete guess.
-
 try:
     import simplejson as json
-except ImportError:
+except (ImportError, SyntaxError):
+    # simplejson does not support Python 3.2, it throws a SyntaxError
+    # because of u'...' Unicode literals.
     import json
 
 # ---------
@@ -90,7 +40,6 @@ if is_py2:
     from Cookie import Morsel
     from StringIO import StringIO
     from .packages.urllib3.packages.ordered_dict import OrderedDict
-    from httplib import IncompleteRead
 
     builtin_str = str
     bytes = str
@@ -98,7 +47,6 @@ if is_py2:
     basestring = basestring
     numeric_types = (int, long, float)
 
-
 elif is_py3:
     from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
     from urllib.request import parse_http_list, getproxies, proxy_bypass
@@ -106,7 +54,6 @@ elif is_py3:
     from http.cookies import Morsel
     from io import StringIO
     from collections import OrderedDict
-    from http.client import IncompleteRead
 
     builtin_str = str
     str = str
diff --git a/autoProcessTV/lib/requests/cookies.py b/autoProcessTV/lib/requests/cookies.py
index 831c49c6d276fe387cf7264edbc44c44585542b8..6969fe5cc4e37fd687e064e42b3d7eeb4cc7b3b5 100644
--- a/autoProcessTV/lib/requests/cookies.py
+++ b/autoProcessTV/lib/requests/cookies.py
@@ -157,26 +157,28 @@ class CookieConflictError(RuntimeError):
 
 
 class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
-    """Compatibility class; is a cookielib.CookieJar, but exposes a dict interface.
+    """Compatibility class; is a cookielib.CookieJar, but exposes a dict
+    interface.
 
     This is the CookieJar we create by default for requests and sessions that
     don't specify one, since some clients may expect response.cookies and
     session.cookies to support dict operations.
 
-    Don't use the dict interface internally; it's just for compatibility with
-    with external client code. All `requests` code should work out of the box
-    with externally provided instances of CookieJar, e.g., LWPCookieJar and
-    FileCookieJar.
-
-    Caution: dictionary operations that are normally O(1) may be O(n).
+    Requests does not use the dict interface internally; it's just for
+    compatibility with external client code. All requests code should work
+    out of the box with externally provided instances of ``CookieJar``, e.g.
+    ``LWPCookieJar`` and ``FileCookieJar``.
 
     Unlike a regular CookieJar, this class is pickleable.
-    """
 
+    .. warning:: dictionary operations that are normally O(1) may be O(n).
+    """
     def get(self, name, default=None, domain=None, path=None):
         """Dict-like get() that also supports optional domain and path args in
         order to resolve naming collisions from using one cookie jar over
-        multiple domains. Caution: operation is O(n), not O(1)."""
+        multiple domains.
+
+        .. warning:: operation is O(n), not O(1)."""
         try:
             return self._find_no_duplicates(name, domain, path)
         except KeyError:
@@ -199,37 +201,38 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
         return c
 
     def iterkeys(self):
-        """Dict-like iterkeys() that returns an iterator of names of cookies from the jar.
-        See itervalues() and iteritems()."""
+        """Dict-like iterkeys() that returns an iterator of names of cookies
+        from the jar. See itervalues() and iteritems()."""
         for cookie in iter(self):
             yield cookie.name
 
     def keys(self):
-        """Dict-like keys() that returns a list of names of cookies from the jar.
-        See values() and items()."""
+        """Dict-like keys() that returns a list of names of cookies from the
+        jar. See values() and items()."""
         return list(self.iterkeys())
 
     def itervalues(self):
-        """Dict-like itervalues() that returns an iterator of values of cookies from the jar.
-        See iterkeys() and iteritems()."""
+        """Dict-like itervalues() that returns an iterator of values of cookies
+        from the jar. See iterkeys() and iteritems()."""
         for cookie in iter(self):
             yield cookie.value
 
     def values(self):
-        """Dict-like values() that returns a list of values of cookies from the jar.
-        See keys() and items()."""
+        """Dict-like values() that returns a list of values of cookies from the
+        jar. See keys() and items()."""
         return list(self.itervalues())
 
     def iteritems(self):
-        """Dict-like iteritems() that returns an iterator of name-value tuples from the jar.
-        See iterkeys() and itervalues()."""
+        """Dict-like iteritems() that returns an iterator of name-value tuples
+        from the jar. See iterkeys() and itervalues()."""
         for cookie in iter(self):
             yield cookie.name, cookie.value
 
     def items(self):
-        """Dict-like items() that returns a list of name-value tuples from the jar.
-        See keys() and values(). Allows client-code to call "dict(RequestsCookieJar)
-        and get a vanilla python dict of key value pairs."""
+        """Dict-like items() that returns a list of name-value tuples from the
+        jar. See keys() and values(). Allows client-code to call
+        ``dict(RequestsCookieJar)`` and get a vanilla python dict of key value
+        pairs."""
         return list(self.iteritems())
 
     def list_domains(self):
@@ -259,8 +262,9 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
         return False  # there is only one domain in jar
 
     def get_dict(self, domain=None, path=None):
-        """Takes as an argument an optional domain and path and returns a plain old
-        Python dict of name-value pairs of cookies that meet the requirements."""
+        """Takes as an argument an optional domain and path and returns a plain
+        old Python dict of name-value pairs of cookies that meet the
+        requirements."""
         dictionary = {}
         for cookie in iter(self):
             if (domain is None or cookie.domain == domain) and (path is None
@@ -269,21 +273,24 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
         return dictionary
 
     def __getitem__(self, name):
-        """Dict-like __getitem__() for compatibility with client code. Throws exception
-        if there are more than one cookie with name. In that case, use the more
-        explicit get() method instead. Caution: operation is O(n), not O(1)."""
+        """Dict-like __getitem__() for compatibility with client code. Throws
+        exception if there are more than one cookie with name. In that case,
+        use the more explicit get() method instead.
+
+        .. warning:: operation is O(n), not O(1)."""
 
         return self._find_no_duplicates(name)
 
     def __setitem__(self, name, value):
-        """Dict-like __setitem__ for compatibility with client code. Throws exception
-        if there is already a cookie of that name in the jar. In that case, use the more
-        explicit set() method instead."""
+        """Dict-like __setitem__ for compatibility with client code. Throws
+        exception if there is already a cookie of that name in the jar. In that
+        case, use the more explicit set() method instead."""
 
         self.set(name, value)
 
     def __delitem__(self, name):
-        """Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name()."""
+        """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
+        ``remove_cookie_by_name()``."""
         remove_cookie_by_name(self, name)
 
     def set_cookie(self, cookie, *args, **kwargs):
@@ -300,10 +307,11 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
             super(RequestsCookieJar, self).update(other)
 
     def _find(self, name, domain=None, path=None):
-        """Requests uses this method internally to get cookie values. Takes as args name
-        and optional domain and path. Returns a cookie.value. If there are conflicting cookies,
-        _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown
-        if there are conflicting cookies."""
+        """Requests uses this method internally to get cookie values. Takes as
+        args name and optional domain and path. Returns a cookie.value. If
+        there are conflicting cookies, _find arbitrarily chooses one. See
+        _find_no_duplicates if you want an exception thrown if there are
+        conflicting cookies."""
         for cookie in iter(self):
             if cookie.name == name:
                 if domain is None or cookie.domain == domain:
@@ -313,10 +321,11 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
         raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
 
     def _find_no_duplicates(self, name, domain=None, path=None):
-        """__get_item__ and get call _find_no_duplicates -- never used in Requests internally.
-        Takes as args name and optional domain and path. Returns a cookie.value.
-        Throws KeyError if cookie is not found and CookieConflictError if there are
-        multiple cookies that match name and optionally domain and path."""
+        """Both ``__get_item__`` and ``get`` call this function: it's never
+        used elsewhere in Requests. Takes as args name and optional domain and
+        path. Returns a cookie.value. Throws KeyError if cookie is not found
+        and CookieConflictError if there are multiple cookies that match name
+        and optionally domain and path."""
         toReturn = None
         for cookie in iter(self):
             if cookie.name == name:
@@ -440,7 +449,7 @@ def merge_cookies(cookiejar, cookies):
     """
     if not isinstance(cookiejar, cookielib.CookieJar):
         raise ValueError('You can only merge into CookieJar')
-    
+
     if isinstance(cookies, dict):
         cookiejar = cookiejar_from_dict(
             cookies, cookiejar=cookiejar, overwrite=False)
diff --git a/autoProcessTV/lib/requests/exceptions.py b/autoProcessTV/lib/requests/exceptions.py
index a4ee9d630c21fde0041e6ce13e14345368fb3a7e..89135a802eb1a87e15aa5d3e8a94ed0fce50273b 100644
--- a/autoProcessTV/lib/requests/exceptions.py
+++ b/autoProcessTV/lib/requests/exceptions.py
@@ -44,7 +44,23 @@ class SSLError(ConnectionError):
 
 
 class Timeout(RequestException):
-    """The request timed out."""
+    """The request timed out.
+
+    Catching this error will catch both
+    :exc:`~requests.exceptions.ConnectTimeout` and
+    :exc:`~requests.exceptions.ReadTimeout` errors.
+    """
+
+
+class ConnectTimeout(ConnectionError, Timeout):
+    """The request timed out while trying to connect to the remote server.
+
+    Requests that produced this error are safe to retry.
+    """
+
+
+class ReadTimeout(Timeout):
+    """The server did not send any data in the allotted amount of time."""
 
 
 class URLRequired(RequestException):
@@ -73,3 +89,11 @@ class ChunkedEncodingError(RequestException):
 
 class ContentDecodingError(RequestException, BaseHTTPError):
     """Failed to decode response content"""
+
+
+class StreamConsumedError(RequestException, TypeError):
+    """The content for this response was already consumed"""
+
+
+class RetryError(RequestException):
+    """Custom retries logic failed"""
diff --git a/autoProcessTV/lib/requests/models.py b/autoProcessTV/lib/requests/models.py
index e2fa09f8cefa31960d3a6c48844adecbb2423146..419cf0a8b5ee8bd7cbec48daddaf928943974968 100644
--- a/autoProcessTV/lib/requests/models.py
+++ b/autoProcessTV/lib/requests/models.py
@@ -19,31 +19,35 @@ from .cookies import cookiejar_from_dict, get_cookie_header
 from .packages.urllib3.fields import RequestField
 from .packages.urllib3.filepost import encode_multipart_formdata
 from .packages.urllib3.util import parse_url
-from .packages.urllib3.exceptions import DecodeError
+from .packages.urllib3.exceptions import (
+    DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
 from .exceptions import (
-    HTTPError, RequestException, MissingSchema, InvalidURL,
-    ChunkedEncodingError, ContentDecodingError)
+    HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
+    ContentDecodingError, ConnectionError, StreamConsumedError)
 from .utils import (
     guess_filename, get_auth_from_url, requote_uri,
     stream_decode_response_unicode, to_key_val_list, parse_header_links,
     iter_slices, guess_json_utf, super_len, to_native_string)
 from .compat import (
     cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
-    is_py2, chardet, json, builtin_str, basestring, IncompleteRead)
+    is_py2, chardet, json, builtin_str, basestring)
 from .status_codes import codes
 
 #: The set of HTTP status codes that indicate an automatically
 #: processable redirect.
 REDIRECT_STATI = (
-    codes.moved,  # 301
-    codes.found,  # 302
-    codes.other,  # 303
-    codes.temporary_moved,  # 307
+    codes.moved,              # 301
+    codes.found,              # 302
+    codes.other,              # 303
+    codes.temporary_redirect, # 307
+    codes.permanent_redirect, # 308
 )
 DEFAULT_REDIRECT_LIMIT = 30
 CONTENT_CHUNK_SIZE = 10 * 1024
 ITER_CHUNK_SIZE = 512
 
+json_dumps = json.dumps
+
 
 class RequestEncodingMixin(object):
     @property
@@ -139,12 +143,13 @@ class RequestEncodingMixin(object):
             else:
                 fn = guess_filename(v) or k
                 fp = v
-            if isinstance(fp, str):
-                fp = StringIO(fp)
-            if isinstance(fp, bytes):
-                fp = BytesIO(fp)
 
-            rf = RequestField(name=k, data=fp.read(),
+            if isinstance(fp, (str, bytes, bytearray)):
+                fdata = fp
+            else:
+                fdata = fp.read()
+
+            rf = RequestField(name=k, data=fdata,
                               filename=fn, headers=fh)
             rf.make_multipart(content_type=ft)
             new_fields.append(rf)
@@ -187,7 +192,8 @@ class Request(RequestHooksMixin):
     :param url: URL to send.
     :param headers: dictionary of headers to send.
     :param files: dictionary of {filename: fileobject} files to multipart upload.
-    :param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
+    :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
+    :param json: json for the body to attach to the request (if data is not specified).
     :param params: dictionary of URL parameters to append to the URL.
     :param auth: Auth handler or (user, pass) tuple.
     :param cookies: dictionary or CookieJar of cookies to attach to this request.
@@ -210,7 +216,8 @@ class Request(RequestHooksMixin):
         params=None,
         auth=None,
         cookies=None,
-        hooks=None):
+        hooks=None,
+        json=None):
 
         # Default empty dicts for dict params.
         data = [] if data is None else data
@@ -228,6 +235,7 @@ class Request(RequestHooksMixin):
         self.headers = headers
         self.files = files
         self.data = data
+        self.json = json
         self.params = params
         self.auth = auth
         self.cookies = cookies
@@ -244,6 +252,7 @@ class Request(RequestHooksMixin):
             headers=self.headers,
             files=self.files,
             data=self.data,
+            json=self.json,
             params=self.params,
             auth=self.auth,
             cookies=self.cookies,
@@ -287,14 +296,15 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
         self.hooks = default_hooks()
 
     def prepare(self, method=None, url=None, headers=None, files=None,
-                data=None, params=None, auth=None, cookies=None, hooks=None):
+                data=None, params=None, auth=None, cookies=None, hooks=None,
+                json=None):
         """Prepares the entire request with the given parameters."""
 
         self.prepare_method(method)
         self.prepare_url(url, params)
         self.prepare_headers(headers)
         self.prepare_cookies(cookies)
-        self.prepare_body(data, files)
+        self.prepare_body(data, files, json)
         self.prepare_auth(auth, url)
         # Note that prepare_auth must be last to enable authentication schemes
         # such as OAuth to work on a fully prepared request.
@@ -309,8 +319,8 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
         p = PreparedRequest()
         p.method = self.method
         p.url = self.url
-        p.headers = self.headers.copy()
-        p._cookies = self._cookies.copy()
+        p.headers = self.headers.copy() if self.headers is not None else None
+        p._cookies = self._cookies.copy() if self._cookies is not None else None
         p.body = self.body
         p.hooks = self.hooks
         return p
@@ -324,21 +334,27 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
     def prepare_url(self, url, params):
         """Prepares the given HTTP URL."""
         #: Accept objects that have string representations.
-        try:
-            url = unicode(url)
-        except NameError:
-            # We're on Python 3.
-            url = str(url)
-        except UnicodeDecodeError:
-            pass
-
-        # Don't do any URL preparation for oddball schemes
+        #: We're unable to blindy call unicode/str functions
+        #: as this will include the bytestring indicator (b'')
+        #: on python 3.x.
+        #: https://github.com/kennethreitz/requests/pull/2238
+        if isinstance(url, bytes):
+            url = url.decode('utf8')
+        else:
+            url = unicode(url) if is_py2 else str(url)
+
+        # Don't do any URL preparation for non-HTTP schemes like `mailto`,
+        # `data` etc to work around exceptions from `url_parse`, which
+        # handles RFC 3986 only.
         if ':' in url and not url.lower().startswith('http'):
             self.url = url
             return
 
         # Support for unicode domain names and paths.
-        scheme, auth, host, port, path, query, fragment = parse_url(url)
+        try:
+            scheme, auth, host, port, path, query, fragment = parse_url(url)
+        except LocationParseError as e:
+            raise InvalidURL(*e.args)
 
         if not scheme:
             raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
@@ -395,7 +411,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
         else:
             self.headers = CaseInsensitiveDict()
 
-    def prepare_body(self, data, files):
+    def prepare_body(self, data, files, json=None):
         """Prepares the given HTTP body data."""
 
         # Check if file, fo, generator, iterator.
@@ -406,11 +422,13 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
         content_type = None
         length = None
 
+        if json is not None:
+            content_type = 'application/json'
+            body = json_dumps(json)
+
         is_stream = all([
             hasattr(data, '__iter__'),
-            not isinstance(data, basestring),
-            not isinstance(data, list),
-            not isinstance(data, dict)
+            not isinstance(data, (basestring, list, tuple, dict))
         ])
 
         try:
@@ -433,9 +451,9 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
             if files:
                 (body, content_type) = self._encode_files(files, data)
             else:
-                if data:
+                if data and json is None:
                     body = self._encode_params(data)
-                    if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
+                    if isinstance(data, basestring) or hasattr(data, 'read'):
                         content_type = None
                     else:
                         content_type = 'application/x-www-form-urlencoded'
@@ -443,7 +461,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
             self.prepare_content_length(body)
 
             # Add content-type if it wasn't explicitly provided.
-            if (content_type) and (not 'content-type' in self.headers):
+            if content_type and ('content-type' not in self.headers):
                 self.headers['Content-Type'] = content_type
 
         self.body = body
@@ -457,7 +475,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
             l = super_len(body)
             if l:
                 self.headers['Content-Length'] = builtin_str(l)
-        elif self.method not in ('GET', 'HEAD'):
+        elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
             self.headers['Content-Length'] = '0'
 
     def prepare_auth(self, auth, url=''):
@@ -558,6 +576,10 @@ class Response(object):
         #: and the arrival of the response (as a timedelta)
         self.elapsed = datetime.timedelta(0)
 
+        #: The :class:`PreparedRequest <PreparedRequest>` object to which this
+        #: is a response.
+        self.request = None
+
     def __getstate__(self):
         # Consume everything; accessing the content attribute makes
         # sure the content has been fully read.
@@ -596,7 +618,7 @@ class Response(object):
     def ok(self):
         try:
             self.raise_for_status()
-        except RequestException:
+        except HTTPError:
             return False
         return True
 
@@ -607,6 +629,11 @@ class Response(object):
         """
         return ('location' in self.headers and self.status_code in REDIRECT_STATI)
 
+    @property
+    def is_permanent_redirect(self):
+        """True if this Response one of the permanant versions of redirect"""
+        return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
+
     @property
     def apparent_encoding(self):
         """The apparent encoding, provided by the chardet library"""
@@ -618,21 +645,22 @@ class Response(object):
         large responses.  The chunk size is the number of bytes it should
         read into memory.  This is not necessarily the length of each item
         returned as decoding can take place.
-        """
-        if self._content_consumed:
-            # simulate reading small chunks of the content
-            return iter_slices(self._content, chunk_size)
 
+        If decode_unicode is True, content will be decoded using the best
+        available encoding based on the response.
+        """
         def generate():
             try:
                 # Special case for urllib3.
                 try:
                     for chunk in self.raw.stream(chunk_size, decode_content=True):
                         yield chunk
-                except IncompleteRead as e:
+                except ProtocolError as e:
                     raise ChunkedEncodingError(e)
                 except DecodeError as e:
                     raise ContentDecodingError(e)
+                except ReadTimeoutError as e:
+                    raise ConnectionError(e)
             except AttributeError:
                 # Standard file-like object.
                 while True:
@@ -643,17 +671,26 @@ class Response(object):
 
             self._content_consumed = True
 
-        gen = generate()
+        if self._content_consumed and isinstance(self._content, bool):
+            raise StreamConsumedError()
+        # simulate reading small chunks of the content
+        reused_chunks = iter_slices(self._content, chunk_size)
+
+        stream_chunks = generate()
+
+        chunks = reused_chunks if self._content_consumed else stream_chunks
 
         if decode_unicode:
-            gen = stream_decode_response_unicode(gen, self)
+            chunks = stream_decode_response_unicode(chunks, self)
 
-        return gen
+        return chunks
 
-    def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
+    def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
         """Iterates over the response data, one line at a time.  When
         stream=True is set on the request, this avoids reading the
         content at once into memory for large responses.
+
+        .. note:: This method is not reentrant safe.
         """
 
         pending = None
@@ -662,7 +699,11 @@ class Response(object):
 
             if pending is not None:
                 chunk = pending + chunk
-            lines = chunk.splitlines()
+
+            if delimiter:
+                lines = chunk.split(delimiter)
+            else:
+                lines = chunk.splitlines()
 
             if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
                 pending = lines.pop()
@@ -793,8 +834,8 @@ class Response(object):
             raise HTTPError(http_error_msg, response=self)
 
     def close(self):
-        """Closes the underlying file descriptor and releases the connection
-        back to the pool.
+        """Releases the connection back to the pool. Once this method has been
+        called the underlying ``raw`` object must not be accessed again.
 
         *Note: Should not normally need to be called explicitly.*
         """
diff --git a/autoProcessTV/lib/requests/packages/README.rst b/autoProcessTV/lib/requests/packages/README.rst
deleted file mode 100644
index c42f376b944c9332ec95593d2796d78ecc336c32..0000000000000000000000000000000000000000
--- a/autoProcessTV/lib/requests/packages/README.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-If you are planning to submit a pull request to requests with any changes in 
-this library do not go any further. These are independent libraries which we 
-vendor into requests. Any changes necessary to these libraries must be made in 
-them and submitted as separate pull requests to those libraries.
-
-urllib3 pull requests go here: https://github.com/shazow/urllib3
-
-chardet pull requests go here: https://github.com/chardet/chardet
diff --git a/autoProcessTV/lib/requests/packages/__init__.py b/autoProcessTV/lib/requests/packages/__init__.py
index d62c4b7111b3d547f853379e4840b44cb96c6000..4dcf870f3ba7de37b6c3ac328ac5e06f83b781e8 100644
--- a/autoProcessTV/lib/requests/packages/__init__.py
+++ b/autoProcessTV/lib/requests/packages/__init__.py
@@ -1,3 +1,107 @@
+"""
+Copyright (c) Donald Stufft, pip, and individual contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""
 from __future__ import absolute_import
 
-from . import urllib3
+import sys
+
+
+class VendorAlias(object):
+
+    def __init__(self, package_names):
+        self._package_names = package_names
+        self._vendor_name = __name__
+        self._vendor_pkg = self._vendor_name + "."
+        self._vendor_pkgs = [
+            self._vendor_pkg + name for name in self._package_names
+        ]
+
+    def find_module(self, fullname, path=None):
+        if fullname.startswith(self._vendor_pkg):
+            return self
+
+    def load_module(self, name):
+        # Ensure that this only works for the vendored name
+        if not name.startswith(self._vendor_pkg):
+            raise ImportError(
+                "Cannot import %s, must be a subpackage of '%s'." % (
+                    name, self._vendor_name,
+                )
+            )
+
+        if not (name == self._vendor_name or
+                any(name.startswith(pkg) for pkg in self._vendor_pkgs)):
+            raise ImportError(
+                "Cannot import %s, must be one of %s." % (
+                    name, self._vendor_pkgs
+                )
+            )
+
+        # Check to see if we already have this item in sys.modules, if we do
+        # then simply return that.
+        if name in sys.modules:
+            return sys.modules[name]
+
+        # Check to see if we can import the vendor name
+        try:
+            # We do this dance here because we want to try and import this
+            # module without hitting a recursion error because of a bunch of
+            # VendorAlias instances on sys.meta_path
+            real_meta_path = sys.meta_path[:]
+            try:
+                sys.meta_path = [
+                    m for m in sys.meta_path
+                    if not isinstance(m, VendorAlias)
+                ]
+                __import__(name)
+                module = sys.modules[name]
+            finally:
+                # Re-add any additions to sys.meta_path that were made while
+                # during the import we just did, otherwise things like
+                # requests.packages.urllib3.poolmanager will fail.
+                for m in sys.meta_path:
+                    if m not in real_meta_path:
+                        real_meta_path.append(m)
+
+                # Restore sys.meta_path with any new items.
+                sys.meta_path = real_meta_path
+        except ImportError:
+            # We can't import the vendor name, so we'll try to import the
+            # "real" name.
+            real_name = name[len(self._vendor_pkg):]
+            try:
+                __import__(real_name)
+                module = sys.modules[real_name]
+            except ImportError:
+                raise ImportError("No module named '%s'" % (name,))
+
+        # If we've gotten here we've found the module we're looking for, either
+        # as part of our vendored package, or as the real name, so we'll add
+        # it to sys.modules as the vendored name so that we don't have to do
+        # the lookup again.
+        sys.modules[name] = module
+
+        # Finally, return the loaded module
+        return module
+
+
+sys.meta_path.append(VendorAlias(["urllib3", "chardet"]))
diff --git a/autoProcessTV/lib/requests/packages/chardet/__init__.py b/autoProcessTV/lib/requests/packages/chardet/__init__.py
index e4f0799d621d277d529d057ce680abfc61beb123..82c2a48d2905ce17924cafbf35c09b61e8ee4504 100644
--- a/autoProcessTV/lib/requests/packages/chardet/__init__.py
+++ b/autoProcessTV/lib/requests/packages/chardet/__init__.py
@@ -15,7 +15,7 @@
 # 02110-1301  USA
 ######################### END LICENSE BLOCK #########################
 
-__version__ = "2.2.1"
+__version__ = "2.3.0"
 from sys import version_info
 
 
diff --git a/autoProcessTV/lib/requests/packages/chardet/chardetect.py b/autoProcessTV/lib/requests/packages/chardet/chardetect.py
index ecd0163be726bf513048c69d47770527829e8143..ffe892f25db3c7e2f8a57e29a7c981476a5eecf0 100644
--- a/autoProcessTV/lib/requests/packages/chardet/chardetect.py
+++ b/autoProcessTV/lib/requests/packages/chardet/chardetect.py
@@ -12,34 +12,68 @@ Example::
 If no paths are provided, it takes its input from stdin.
 
 """
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import sys
 from io import open
-from sys import argv, stdin
 
+from chardet import __version__
 from chardet.universaldetector import UniversalDetector
 
 
-def description_of(file, name='stdin'):
-    """Return a string describing the probable encoding of a file."""
+def description_of(lines, name='stdin'):
+    """
+    Return a string describing the probable encoding of a file or
+    list of strings.
+
+    :param lines: The lines to get the encoding of.
+    :type lines: Iterable of bytes
+    :param name: Name of file or collection of lines
+    :type name: str
+    """
     u = UniversalDetector()
-    for line in file:
+    for line in lines:
         u.feed(line)
     u.close()
     result = u.result
     if result['encoding']:
-        return '%s: %s with confidence %s' % (name,
-                                              result['encoding'],
-                                              result['confidence'])
+        return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
+                                                     result['confidence'])
     else:
-        return '%s: no result' % name
+        return '{0}: no result'.format(name)
 
 
-def main():
-    if len(argv) <= 1:
-        print(description_of(stdin))
-    else:
-        for path in argv[1:]:
-            with open(path, 'rb') as f:
-                print(description_of(f, path))
+def main(argv=None):
+    '''
+    Handles command line arguments and gets things started.
+
+    :param argv: List of arguments, as if specified on the command-line.
+                 If None, ``sys.argv[1:]`` is used instead.
+    :type argv: list of str
+    '''
+    # Get command line arguments
+    parser = argparse.ArgumentParser(
+        description="Takes one or more file paths and reports their detected \
+                     encodings",
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+        conflict_handler='resolve')
+    parser.add_argument('input',
+                        help='File whose encoding we would like to determine.',
+                        type=argparse.FileType('rb'), nargs='*',
+                        default=[sys.stdin])
+    parser.add_argument('--version', action='version',
+                        version='%(prog)s {0}'.format(__version__))
+    args = parser.parse_args(argv)
+
+    for f in args.input:
+        if f.isatty():
+            print("You are running chardetect interactively. Press " +
+                  "CTRL-D twice at the start of a blank line to signal the " +
+                  "end of your input. If you want help, run chardetect " +
+                  "--help\n", file=sys.stderr)
+        print(description_of(f, f.name))
 
 
 if __name__ == '__main__':
diff --git a/autoProcessTV/lib/requests/packages/chardet/jpcntx.py b/autoProcessTV/lib/requests/packages/chardet/jpcntx.py
index f7f69ba4cdaba120b5d8c6932e6f7eb4c60a0913..59aeb6a87893b030a0ffceb93a460752eef75e92 100644
--- a/autoProcessTV/lib/requests/packages/chardet/jpcntx.py
+++ b/autoProcessTV/lib/requests/packages/chardet/jpcntx.py
@@ -177,6 +177,12 @@ class JapaneseContextAnalysis:
         return -1, 1
 
 class SJISContextAnalysis(JapaneseContextAnalysis):
+    def __init__(self):
+        self.charset_name = "SHIFT_JIS"
+
+    def get_charset_name(self):
+        return self.charset_name
+
     def get_order(self, aBuf):
         if not aBuf:
             return -1, 1
@@ -184,6 +190,8 @@ class SJISContextAnalysis(JapaneseContextAnalysis):
         first_char = wrap_ord(aBuf[0])
         if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
             charLen = 2
+            if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
+                self.charset_name = "CP932"
         else:
             charLen = 1
 
diff --git a/autoProcessTV/lib/requests/packages/chardet/latin1prober.py b/autoProcessTV/lib/requests/packages/chardet/latin1prober.py
index ad695f57a728421cd920cc3772fb31141d1e0b29..eef3573543c8becb8976c8b1743d9fc6c04d1149 100644
--- a/autoProcessTV/lib/requests/packages/chardet/latin1prober.py
+++ b/autoProcessTV/lib/requests/packages/chardet/latin1prober.py
@@ -129,11 +129,11 @@ class Latin1Prober(CharSetProber):
         if total < 0.01:
             confidence = 0.0
         else:
-            confidence = ((self._mFreqCounter[3] / total)
-                          - (self._mFreqCounter[1] * 20.0 / total))
+            confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
+                          / total)
         if confidence < 0.0:
             confidence = 0.0
         # lower the confidence of latin1 so that other more accurate
         # detector can take priority.
-        confidence = confidence * 0.5
+        confidence = confidence * 0.73
         return confidence
diff --git a/autoProcessTV/lib/requests/packages/chardet/mbcssm.py b/autoProcessTV/lib/requests/packages/chardet/mbcssm.py
index 3f93cfb045c01e072de4c448d2b5bb2e25438b9b..efe678ca0394b2e0faf7051bbd86df86d54f2b51 100644
--- a/autoProcessTV/lib/requests/packages/chardet/mbcssm.py
+++ b/autoProcessTV/lib/requests/packages/chardet/mbcssm.py
@@ -353,7 +353,7 @@ SJIS_cls = (
     2,2,2,2,2,2,2,2,  # 68 - 6f
     2,2,2,2,2,2,2,2,  # 70 - 77
     2,2,2,2,2,2,2,1,  # 78 - 7f
-    3,3,3,3,3,3,3,3,  # 80 - 87
+    3,3,3,3,3,2,2,3,  # 80 - 87
     3,3,3,3,3,3,3,3,  # 88 - 8f
     3,3,3,3,3,3,3,3,  # 90 - 97
     3,3,3,3,3,3,3,3,  # 98 - 9f
@@ -369,9 +369,8 @@ SJIS_cls = (
     2,2,2,2,2,2,2,2,  # d8 - df
     3,3,3,3,3,3,3,3,  # e0 - e7
     3,3,3,3,3,4,4,4,  # e8 - ef
-    4,4,4,4,4,4,4,4,  # f0 - f7
-    4,4,4,4,4,0,0,0   # f8 - ff
-)
+    3,3,3,3,3,3,3,3,  # f0 - f7
+    3,3,3,3,3,0,0,0)  # f8 - ff
 
 
 SJIS_st = (
@@ -571,5 +570,3 @@ UTF8SMModel = {'classTable': UTF8_cls,
                'stateTable': UTF8_st,
                'charLenTable': UTF8CharLenTable,
                'name': 'UTF-8'}
-
-# flake8: noqa
diff --git a/autoProcessTV/lib/requests/packages/chardet/sjisprober.py b/autoProcessTV/lib/requests/packages/chardet/sjisprober.py
index b173614e6827b6f73eda8774a9e87f64e76a952d..cd0e9e7078b38741e9e610d7b9a92a78369a3eb9 100644
--- a/autoProcessTV/lib/requests/packages/chardet/sjisprober.py
+++ b/autoProcessTV/lib/requests/packages/chardet/sjisprober.py
@@ -47,7 +47,7 @@ class SJISProber(MultiByteCharSetProber):
         self._mContextAnalyzer.reset()
 
     def get_charset_name(self):
-        return "SHIFT_JIS"
+        return self._mContextAnalyzer.get_charset_name()
 
     def feed(self, aBuf):
         aLen = len(aBuf)
diff --git a/autoProcessTV/lib/requests/packages/chardet/universaldetector.py b/autoProcessTV/lib/requests/packages/chardet/universaldetector.py
index 9a03ad3d89ac2be24738a70bcf2798608edd56ab..476522b999646b99ace47492210595cb457690c3 100644
--- a/autoProcessTV/lib/requests/packages/chardet/universaldetector.py
+++ b/autoProcessTV/lib/requests/packages/chardet/universaldetector.py
@@ -71,9 +71,9 @@ class UniversalDetector:
 
         if not self._mGotData:
             # If the data starts with BOM, we know it is UTF
-            if aBuf[:3] == codecs.BOM:
+            if aBuf[:3] == codecs.BOM_UTF8:
                 # EF BB BF  UTF-8 with BOM
-                self.result = {'encoding': "UTF-8", 'confidence': 1.0}
+                self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
             elif aBuf[:4] == codecs.BOM_UTF32_LE:
                 # FF FE 00 00  UTF-32, little-endian BOM
                 self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
diff --git a/autoProcessTV/lib/requests/packages/urllib3/__init__.py b/autoProcessTV/lib/requests/packages/urllib3/__init__.py
index 73071f7001b5a7b6973f2d7f8985c98ca23d1d71..0660b9c83a593749284f8444db1c6fc726889ea3 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/__init__.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/__init__.py
@@ -1,16 +1,10 @@
-# urllib3/__init__.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
 """
 urllib3 - Thread-safe connection pooling and re-using.
 """
 
 __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
 __license__ = 'MIT'
-__version__ = 'dev'
+__version__ = '1.10.2'
 
 
 from .connectionpool import (
@@ -23,7 +17,10 @@ from . import exceptions
 from .filepost import encode_multipart_formdata
 from .poolmanager import PoolManager, ProxyManager, proxy_from_url
 from .response import HTTPResponse
-from .util import make_headers, get_host, Timeout
+from .util.request import make_headers
+from .util.url import get_host
+from .util.timeout import Timeout
+from .util.retry import Retry
 
 
 # Set default logging handler to avoid "No handler found" warnings.
@@ -51,8 +48,19 @@ def add_stderr_logger(level=logging.DEBUG):
     handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
     logger.addHandler(handler)
     logger.setLevel(level)
-    logger.debug('Added an stderr logging handler to logger: %s' % __name__)
+    logger.debug('Added a stderr logging handler to logger: %s' % __name__)
     return handler
 
 # ... Clean up.
 del NullHandler
+
+
+# Set security warning to always go off by default.
+import warnings
+warnings.simplefilter('always', exceptions.SecurityWarning)
+
+def disable_warnings(category=exceptions.HTTPWarning):
+    """
+    Helper for quickly disabling all urllib3 warnings.
+    """
+    warnings.simplefilter('ignore', category)
diff --git a/autoProcessTV/lib/requests/packages/urllib3/_collections.py b/autoProcessTV/lib/requests/packages/urllib3/_collections.py
index 5907b0dc7c45a5fa379d312e99e069b147137797..cc424de0f45534ff0a5297be39c2e0d096ebebb1 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/_collections.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/_collections.py
@@ -1,13 +1,7 @@
-# urllib3/_collections.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from collections import MutableMapping
+from collections import Mapping, MutableMapping
 try:
     from threading import RLock
-except ImportError: # Platform-specific: No threads available
+except ImportError:  # Platform-specific: No threads available
     class RLock:
         def __enter__(self):
             pass
@@ -16,13 +10,14 @@ except ImportError: # Platform-specific: No threads available
             pass
 
 
-try: # Python 2.7+
+try:  # Python 2.7+
     from collections import OrderedDict
 except ImportError:
     from .packages.ordered_dict import OrderedDict
+from .packages.six import iterkeys, itervalues, PY3
 
 
-__all__ = ['RecentlyUsedContainer']
+__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
 
 
 _Null = object()
@@ -90,8 +85,7 @@ class RecentlyUsedContainer(MutableMapping):
     def clear(self):
         with self.lock:
             # Copy pointers to all values, then wipe the mapping
-            # under Python 2, this copies the list of values twice :-|
-            values = list(self._container.values())
+            values = list(itervalues(self._container))
             self._container.clear()
 
         if self.dispose_func:
@@ -100,4 +94,227 @@ class RecentlyUsedContainer(MutableMapping):
 
     def keys(self):
         with self.lock:
-            return self._container.keys()
+            return list(iterkeys(self._container))
+
+
+_dict_setitem = dict.__setitem__
+_dict_getitem = dict.__getitem__
+_dict_delitem = dict.__delitem__
+_dict_contains = dict.__contains__
+_dict_setdefault = dict.setdefault
+
+
+class HTTPHeaderDict(dict):
+    """
+    :param headers:
+        An iterable of field-value pairs. Must not contain multiple field names
+        when compared case-insensitively.
+
+    :param kwargs:
+        Additional field-value pairs to pass in to ``dict.update``.
+
+    A ``dict`` like container for storing HTTP Headers.
+
+    Field names are stored and compared case-insensitively in compliance with
+    RFC 7230. Iteration provides the first case-sensitive key seen for each
+    case-insensitive pair.
+
+    Using ``__setitem__`` syntax overwrites fields that compare equal
+    case-insensitively in order to maintain ``dict``'s api. For fields that
+    compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+    in a loop.
+
+    If multiple fields that are equal case-insensitively are passed to the
+    constructor or ``.update``, the behavior is undefined and some will be
+    lost.
+
+    >>> headers = HTTPHeaderDict()
+    >>> headers.add('Set-Cookie', 'foo=bar')
+    >>> headers.add('set-cookie', 'baz=quxx')
+    >>> headers['content-length'] = '7'
+    >>> headers['SET-cookie']
+    'foo=bar, baz=quxx'
+    >>> headers['Content-Length']
+    '7'
+    """
+
+    def __init__(self, headers=None, **kwargs):
+        dict.__init__(self)
+        if headers is not None:
+            if isinstance(headers, HTTPHeaderDict):
+                self._copy_from(headers)
+            else:
+                self.extend(headers)
+        if kwargs:
+            self.extend(kwargs)
+
+    def __setitem__(self, key, val):
+        return _dict_setitem(self, key.lower(), (key, val))
+
+    def __getitem__(self, key):
+        val = _dict_getitem(self, key.lower())
+        return ', '.join(val[1:])
+
+    def __delitem__(self, key):
+        return _dict_delitem(self, key.lower())
+
+    def __contains__(self, key):
+        return _dict_contains(self, key.lower())
+
+    def __eq__(self, other):
+        if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+            return False
+        if not isinstance(other, type(self)):
+            other = type(self)(other)
+        return dict((k1, self[k1]) for k1 in self) == dict((k2, other[k2]) for k2 in other)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    values = MutableMapping.values
+    get = MutableMapping.get
+    update = MutableMapping.update
+    
+    if not PY3: # Python 2
+        iterkeys = MutableMapping.iterkeys
+        itervalues = MutableMapping.itervalues
+
+    __marker = object()
+
+    def pop(self, key, default=__marker):
+        '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+          If key is not found, d is returned if given, otherwise KeyError is raised.
+        '''
+        # Using the MutableMapping function directly fails due to the private marker.
+        # Using ordinary dict.pop would expose the internal structures.
+        # So let's reinvent the wheel.
+        try:
+            value = self[key]
+        except KeyError:
+            if default is self.__marker:
+                raise
+            return default
+        else:
+            del self[key]
+            return value
+
+    def discard(self, key):
+        try:
+            del self[key]
+        except KeyError:
+            pass
+
+    def add(self, key, val):
+        """Adds a (name, value) pair, doesn't overwrite the value if it already
+        exists.
+
+        >>> headers = HTTPHeaderDict(foo='bar')
+        >>> headers.add('Foo', 'baz')
+        >>> headers['foo']
+        'bar, baz'
+        """
+        key_lower = key.lower()
+        new_vals = key, val
+        # Keep the common case aka no item present as fast as possible
+        vals = _dict_setdefault(self, key_lower, new_vals)
+        if new_vals is not vals:
+            # new_vals was not inserted, as there was a previous one
+            if isinstance(vals, list):
+                # If already several items got inserted, we have a list
+                vals.append(val)
+            else:
+                # vals should be a tuple then, i.e. only one item so far
+                # Need to convert the tuple to list for further extension
+                _dict_setitem(self, key_lower, [vals[0], vals[1], val])
+
+    def extend(*args, **kwargs):
+        """Generic import function for any type of header-like object.
+        Adapted version of MutableMapping.update in order to insert items
+        with self.add instead of self.__setitem__
+        """
+        if len(args) > 2:
+            raise TypeError("update() takes at most 2 positional "
+                            "arguments ({} given)".format(len(args)))
+        elif not args:
+            raise TypeError("update() takes at least 1 argument (0 given)")
+        self = args[0]
+        other = args[1] if len(args) >= 2 else ()
+        
+        if isinstance(other, Mapping):
+            for key in other:
+                self.add(key, other[key])
+        elif hasattr(other, "keys"):
+            for key in other.keys():
+                self.add(key, other[key])
+        else:
+            for key, value in other:
+                self.add(key, value)
+
+        for key, value in kwargs.items():
+            self.add(key, value)
+
+    def getlist(self, key):
+        """Returns a list of all the values for the named field. Returns an
+        empty list if the key doesn't exist."""
+        try:
+            vals = _dict_getitem(self, key.lower())
+        except KeyError:
+            return []
+        else:
+            if isinstance(vals, tuple):
+                return [vals[1]]
+            else:
+                return vals[1:]
+
+    # Backwards compatibility for httplib
+    getheaders = getlist
+    getallmatchingheaders = getlist
+    iget = getlist
+
+    def __repr__(self):
+        return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+    def _copy_from(self, other):
+        for key in other:
+            val = _dict_getitem(other, key)
+            if isinstance(val, list):
+                # Don't need to convert tuples
+                val = list(val)
+            _dict_setitem(self, key, val)
+
+    def copy(self):
+        clone = type(self)()
+        clone._copy_from(self)
+        return clone
+
+    def iteritems(self):
+        """Iterate over all header lines, including duplicate ones."""
+        for key in self:
+            vals = _dict_getitem(self, key)
+            for val in vals[1:]:
+                yield vals[0], val
+
+    def itermerged(self):
+        """Iterate over all headers, merging duplicate ones together."""
+        for key in self:
+            val = _dict_getitem(self, key)
+            yield val[0], ', '.join(val[1:])
+
+    def items(self):
+        return list(self.iteritems())
+
+    @classmethod
+    def from_httplib(cls, message, duplicates=('set-cookie',)): # Python 2
+        """Read headers from a Python 2 httplib message object."""
+        ret = cls(message.items())
+        # ret now contains only the last header line for each duplicate.
+        # Importing with all duplicates would be nice, but this would
+        # mean to repeat most of the raw parsing already done, when the
+        # message object was created. Extracting only the headers of interest 
+        # separately, the cookies, should be faster and requires less
+        # extra code.
+        for key in duplicates:
+            ret.discard(key)
+            for val in message.getheaders(key):
+                ret.add(key, val)
+            return ret
diff --git a/autoProcessTV/lib/requests/packages/urllib3/connection.py b/autoProcessTV/lib/requests/packages/urllib3/connection.py
index c7d5b77dcc316f9590184f21c5bf2ff484bb2f3b..e5de769d8c501aaced9e174574efdc67188a3fff 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/connection.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/connection.py
@@ -1,88 +1,155 @@
-# urllib3/connection.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
+import datetime
+import sys
 import socket
 from socket import timeout as SocketTimeout
+import warnings
+from .packages import six
 
-try: # Python 3
+try:  # Python 3
     from http.client import HTTPConnection as _HTTPConnection, HTTPException
 except ImportError:
     from httplib import HTTPConnection as _HTTPConnection, HTTPException
 
+
 class DummyConnection(object):
     "Used to detect a failed ConnectionCls import."
     pass
 
-try: # Compiled with SSL?
-    ssl = None
+
+try:  # Compiled with SSL?
     HTTPSConnection = DummyConnection
+    import ssl
+    BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError):  # Platform-specific: No SSL.
+    ssl = None
 
     class BaseSSLError(BaseException):
         pass
 
-    try: # Python 3
-        from http.client import HTTPSConnection as _HTTPSConnection
-    except ImportError:
-        from httplib import HTTPSConnection as _HTTPSConnection
 
-    import ssl
-    BaseSSLError = ssl.SSLError
+try:  # Python 3:
+    # Not a no-op, we're adding this to the namespace so it can be imported.
+    ConnectionError = ConnectionError
+except NameError:  # Python 2:
+    class ConnectionError(Exception):
+        pass
 
-except (ImportError, AttributeError): # Platform-specific: No SSL.
-    pass
 
 from .exceptions import (
     ConnectTimeoutError,
+    SystemTimeWarning,
+    SecurityWarning,
 )
 from .packages.ssl_match_hostname import match_hostname
-from .util import (
-    assert_fingerprint,
+
+from .util.ssl_ import (
     resolve_cert_reqs,
     resolve_ssl_version,
     ssl_wrap_socket,
+    assert_fingerprint,
 )
 
 
+from .util import connection
+
 port_by_scheme = {
     'http': 80,
     'https': 443,
 }
 
+RECENT_DATE = datetime.date(2014, 1, 1)
+
 
 class HTTPConnection(_HTTPConnection, object):
+    """
+    Based on httplib.HTTPConnection but provides an extra constructor
+    backwards-compatibility layer between older and newer Pythons.
+
+    Additional keyword parameters are used to configure attributes of the connection.
+    Accepted parameters include:
+
+      - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+      - ``source_address``: Set the source address for the current connection.
+
+        .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
+
+      - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+        defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+        Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+        For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+        you might pass::
+
+            HTTPConnection.default_socket_options + [
+                (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+            ]
+
+        Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+    """
+
     default_port = port_by_scheme['http']
 
-    # By default, disable Nagle's Algorithm.
-    tcp_nodelay = 1
+    #: Disable Nagle's algorithm by default.
+    #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+    default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+    #: Whether this connection verifies the host's certificate.
+    is_verified = False
+
+    def __init__(self, *args, **kw):
+        if six.PY3:  # Python 3
+            kw.pop('strict', None)
+
+        # Pre-set source_address in case we have an older Python like 2.6.
+        self.source_address = kw.get('source_address')
+
+        if sys.version_info < (2, 7):  # Python 2.6
+            # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
+            # not newer versions. We can still use it when creating a
+            # connection though, so we pop it *after* we have saved it as
+            # self.source_address.
+            kw.pop('source_address', None)
+
+        #: The socket options provided by the user. If no options are
+        #: provided, we use the default options.
+        self.socket_options = kw.pop('socket_options', self.default_socket_options)
+
+        # Superclass also sets self.source_address in Python 2.7+.
+        _HTTPConnection.__init__(self, *args, **kw)
 
     def _new_conn(self):
-        """ Establish a socket connection and set nodelay settings on it
+        """ Establish a socket connection and set nodelay settings on it.
 
-        :return: a new socket connection
+        :return: New socket connection.
         """
+        extra_kw = {}
+        if self.source_address:
+            extra_kw['source_address'] = self.source_address
+
+        if self.socket_options:
+            extra_kw['socket_options'] = self.socket_options
+
         try:
-            conn = socket.create_connection(
-                (self.host, self.port),
-                self.timeout,
-                self.source_address,
-            )
-        except AttributeError: # Python 2.6
-            conn = socket.create_connection(
-                (self.host, self.port),
-                self.timeout,
-            )
-        conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
-                        self.tcp_nodelay)
+            conn = connection.create_connection(
+                (self.host, self.port), self.timeout, **extra_kw)
+
+        except SocketTimeout:
+            raise ConnectTimeoutError(
+                self, "Connection to %s timed out. (connect timeout=%s)" %
+                (self.host, self.timeout))
+
         return conn
 
     def _prepare_conn(self, conn):
         self.sock = conn
+        # the _tunnel_host attribute was added in python 2.6.3 (via
+        # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
+        # not have them.
         if getattr(self, '_tunnel_host', None):
             # TODO: Fix tunnel so it doesn't depend on self.sock state.
             self._tunnel()
+            # Mark this connection as not reusable
+            self.auto_open = 0
 
     def connect(self):
         conn = self._new_conn()
@@ -93,15 +160,18 @@ class HTTPSConnection(HTTPConnection):
     default_port = port_by_scheme['https']
 
     def __init__(self, host, port=None, key_file=None, cert_file=None,
-                 strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
-                 source_address=None):
-        try:
-            HTTPConnection.__init__(self, host, port, strict, timeout, source_address)
-        except TypeError: # Python 2.6
-            HTTPConnection.__init__(self, host, port, strict, timeout)
+                 strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
+
+        HTTPConnection.__init__(self, host, port, strict=strict,
+                                timeout=timeout, **kw)
+
         self.key_file = key_file
         self.cert_file = cert_file
 
+        # Required property for Google AppEngine 1.9.0 which otherwise causes
+        # HTTPS requests to go out as HTTP. (See Issue #356)
+        self._protocol = 'https'
+
     def connect(self):
         conn = self._new_conn()
         self._prepare_conn(conn)
@@ -116,6 +186,7 @@ class VerifiedHTTPSConnection(HTTPSConnection):
     cert_reqs = None
     ca_certs = None
     ssl_version = None
+    assert_fingerprint = None
 
     def set_cert(self, key_file=None, cert_file=None,
                  cert_reqs=None, ca_certs=None,
@@ -130,46 +201,59 @@ class VerifiedHTTPSConnection(HTTPSConnection):
 
     def connect(self):
         # Add certificate verification
-        try:
-            sock = socket.create_connection(
-                address=(self.host, self.port),
-                timeout=self.timeout,
-            )
-        except SocketTimeout:
-            raise ConnectTimeoutError(
-                self, "Connection to %s timed out. (connect timeout=%s)" %
-                (self.host, self.timeout))
-
-        sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
-                        self.tcp_nodelay)
+        conn = self._new_conn()
 
         resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
         resolved_ssl_version = resolve_ssl_version(self.ssl_version)
 
-        # the _tunnel_host attribute was added in python 2.6.3 (via
-        # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
-        # not have them.
+        hostname = self.host
         if getattr(self, '_tunnel_host', None):
-            self.sock = sock
+            # _tunnel_host was added in Python 2.6.3
+            # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
+
+            self.sock = conn
             # Calls self._set_hostport(), so self.host is
             # self._tunnel_host below.
             self._tunnel()
+            # Mark this connection as not reusable
+            self.auto_open = 0
+
+            # Override the host with the one we're requesting data from.
+            hostname = self._tunnel_host
+
+        is_time_off = datetime.date.today() < RECENT_DATE
+        if is_time_off:
+            warnings.warn((
+                'System time is way off (before {0}). This will probably '
+                'lead to SSL verification errors').format(RECENT_DATE),
+                SystemTimeWarning
+            )
 
         # Wrap socket using verification with the root certs in
         # trusted_root_certs
-        self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
+        self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
                                     cert_reqs=resolved_cert_reqs,
                                     ca_certs=self.ca_certs,
-                                    server_hostname=self.host,
+                                    server_hostname=hostname,
                                     ssl_version=resolved_ssl_version)
 
-        if resolved_cert_reqs != ssl.CERT_NONE:
-            if self.assert_fingerprint:
-                assert_fingerprint(self.sock.getpeercert(binary_form=True),
-                                   self.assert_fingerprint)
-            elif self.assert_hostname is not False:
-                match_hostname(self.sock.getpeercert(),
-                               self.assert_hostname or self.host)
+        if self.assert_fingerprint:
+            assert_fingerprint(self.sock.getpeercert(binary_form=True),
+                               self.assert_fingerprint)
+        elif resolved_cert_reqs != ssl.CERT_NONE \
+                and self.assert_hostname is not False:
+            cert = self.sock.getpeercert()
+            if not cert.get('subjectAltName', ()):
+                warnings.warn((
+                    'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
+                    'This feature is being removed by major browsers and deprecated by RFC 2818. '
+                    '(See https://github.com/shazow/urllib3/issues/497 for details.)'),
+                    SecurityWarning
+                )
+            match_hostname(cert, self.assert_hostname or hostname)
+
+        self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
+                            or self.assert_fingerprint is not None)
 
 
 if ssl:
diff --git a/autoProcessTV/lib/requests/packages/urllib3/connectionpool.py b/autoProcessTV/lib/requests/packages/urllib3/connectionpool.py
index 243d700ee8fef8a0efa0f07620ebecfc292a8c2c..0085345c43d0f84b63f08c643d44b4e6e70c2093 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/connectionpool.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/connectionpool.py
@@ -1,16 +1,12 @@
-# urllib3/connectionpool.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
 import errno
 import logging
+import sys
+import warnings
 
 from socket import error as SocketError, timeout as SocketTimeout
 import socket
 
-try: # Python 3
+try:  # Python 3
     from queue import LifoQueue, Empty, Full
 except ImportError:
     from Queue import LifoQueue, Empty, Full
@@ -19,14 +15,16 @@ except ImportError:
 
 from .exceptions import (
     ClosedPoolError,
-    ConnectTimeoutError,
+    ProtocolError,
     EmptyPoolError,
     HostChangedError,
+    LocationValueError,
     MaxRetryError,
+    ProxyError,
+    ReadTimeoutError,
     SSLError,
     TimeoutError,
-    ReadTimeoutError,
-    ProxyError,
+    InsecureRequestWarning,
 )
 from .packages.ssl_match_hostname import CertificateError
 from .packages import six
@@ -34,16 +32,15 @@ from .connection import (
     port_by_scheme,
     DummyConnection,
     HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
-    HTTPException, BaseSSLError,
+    HTTPException, BaseSSLError, ConnectionError
 )
 from .request import RequestMethods
 from .response import HTTPResponse
-from .util import (
-    assert_fingerprint,
-    get_host,
-    is_connection_dropped,
-    Timeout,
-)
+
+from .util.connection import is_connection_dropped
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host
 
 
 xrange = six.moves.xrange
@@ -52,8 +49,8 @@ log = logging.getLogger(__name__)
 
 _Default = object()
 
-## Pool objects
 
+## Pool objects
 class ConnectionPool(object):
     """
     Base class for all connection pools, such as
@@ -64,19 +61,36 @@ class ConnectionPool(object):
     QueueCls = LifoQueue
 
     def __init__(self, host, port=None):
-        # httplib doesn't like it when we include brackets in ipv6 addresses
-        host = host.strip('[]')
+        if not host:
+            raise LocationValueError("No host specified.")
 
-        self.host = host
+        # httplib doesn't like it when we include brackets in ipv6 addresses
+        self.host = host.strip('[]')
         self.port = port
 
     def __str__(self):
         return '%s(host=%r, port=%r)' % (type(self).__name__,
                                          self.host, self.port)
 
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.close()
+        # Return False to re-raise any potential exceptions
+        return False
+
+    def close():
+        """
+        Close all pooled connections and disable the pool.
+        """
+        pass
+
+
 # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
 _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
 
+
 class HTTPConnectionPool(ConnectionPool, RequestMethods):
     """
     Thread-safe connection pool for one host.
@@ -121,6 +135,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         Headers to include with all requests, unless other headers are given
         explicitly.
 
+    :param retries:
+        Retry configuration to use by default with requests in this pool.
+
     :param _proxy:
         Parsed proxy URL, should not be used directly, instead, see
         :class:`urllib3.connectionpool.ProxyManager`"
@@ -128,6 +145,10 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
     :param _proxy_headers:
         A dictionary with proxy headers, should not be used directly,
         instead, see :class:`urllib3.connectionpool.ProxyManager`"
+
+    :param \**conn_kw:
+        Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+        :class:`urllib3.connection.HTTPSConnection` instances.
     """
 
     scheme = 'http'
@@ -135,18 +156,22 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
 
     def __init__(self, host, port=None, strict=False,
                  timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
-                 headers=None, _proxy=None, _proxy_headers=None):
+                 headers=None, retries=None,
+                 _proxy=None, _proxy_headers=None,
+                 **conn_kw):
         ConnectionPool.__init__(self, host, port)
         RequestMethods.__init__(self, headers)
 
         self.strict = strict
 
-        # This is for backwards compatibility and can be removed once a timeout
-        # can only be set to a Timeout object
         if not isinstance(timeout, Timeout):
             timeout = Timeout.from_float(timeout)
 
+        if retries is None:
+            retries = Retry.DEFAULT
+
         self.timeout = timeout
+        self.retries = retries
 
         self.pool = self.QueueCls(maxsize)
         self.block = block
@@ -161,6 +186,13 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         # These are mostly for testing and debugging purposes.
         self.num_connections = 0
         self.num_requests = 0
+        self.conn_kw = conn_kw
+
+        if self.proxy:
+            # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+            # We cannot know if the user has added default socket options, so we cannot replace the
+            # list.
+            self.conn_kw.setdefault('socket_options', [])
 
     def _new_conn(self):
         """
@@ -170,17 +202,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         log.info("Starting new HTTP connection (%d): %s" %
                  (self.num_connections, self.host))
 
-        extra_params = {}
-        if not six.PY3:  # Python 2
-            extra_params['strict'] = self.strict
-
         conn = self.ConnectionCls(host=self.host, port=self.port,
                                   timeout=self.timeout.connect_timeout,
-                                  **extra_params)
-        if self.proxy is not None:
-            # Enable Nagle's algorithm for proxies, to avoid packet
-            # fragmentation.
-            conn.tcp_nodelay = 0
+                                  strict=self.strict, **self.conn_kw)
         return conn
 
     def _get_conn(self, timeout=None):
@@ -199,7 +223,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         try:
             conn = self.pool.get(block=self.block, timeout=timeout)
 
-        except AttributeError: # self.pool is None
+        except AttributeError:  # self.pool is None
             raise ClosedPoolError(self, "Pool is closed.")
 
         except Empty:
@@ -213,6 +237,11 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         if conn and is_connection_dropped(conn):
             log.info("Resetting dropped connection: %s" % self.host)
             conn.close()
+            if getattr(conn, 'auto_open', 1) == 0:
+                # This is a proxied connection that has been mutated by
+                # httplib._tunnel() and cannot be reused (since it would
+                # attempt to bypass the proxy)
+                conn = None
 
         return conn or self._new_conn()
 
@@ -232,19 +261,30 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         """
         try:
             self.pool.put(conn, block=False)
-            return # Everything is dandy, done.
+            return  # Everything is dandy, done.
         except AttributeError:
             # self.pool is None.
             pass
         except Full:
             # This should never happen if self.block == True
-            log.warning("HttpConnectionPool is full, discarding connection: %s"
-                        % self.host)
+            log.warning(
+                "Connection pool is full, discarding connection: %s" %
+                self.host)
 
         # Connection never got put back into the pool, close it.
         if conn:
             conn.close()
 
+    def _validate_conn(self, conn):
+        """
+        Called right before a request is made, after the socket is created.
+        """
+        pass
+
+    def _prepare_proxy(self, conn):
+        # Nothing to do for HTTP connections.
+        pass
+
     def _get_timeout(self, timeout):
         """ Helper that always returns a :class:`urllib3.util.Timeout` """
         if timeout is _Default:
@@ -257,6 +297,23 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             # can be removed later
             return Timeout.from_float(timeout)
 
+    def _raise_timeout(self, err, url, timeout_value):
+        """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+        if isinstance(err, SocketTimeout):
+            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+        # See the above comment about EAGAIN in Python 3. In Python 2 we have
+        # to specifically catch it and throw the timeout error
+        if hasattr(err, 'errno') and err.errno in _blocking_errnos:
+            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+        # Catch possible read timeouts thrown as SSL errors. If not the
+        # case, rethrow the original. We need to do this because of:
+        # http://bugs.python.org/issue10272
+        if 'timed out' in str(err) or 'did not complete (read)' in str(err):  # Python 2.6
+            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
     def _make_request(self, conn, method, url, timeout=_Default,
                       **httplib_request_kw):
         """
@@ -276,23 +333,26 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         self.num_requests += 1
 
         timeout_obj = self._get_timeout(timeout)
+        timeout_obj.start_connect()
+        conn.timeout = timeout_obj.connect_timeout
 
+        # Trigger any extra validation we need to do.
         try:
-            timeout_obj.start_connect()
-            conn.timeout = timeout_obj.connect_timeout
-            # conn.request() calls httplib.*.request, not the method in
-            # urllib3.request. It also calls makefile (recv) on the socket.
-            conn.request(method, url, **httplib_request_kw)
-        except SocketTimeout:
-            raise ConnectTimeoutError(
-                self, "Connection to %s timed out. (connect timeout=%s)" %
-                (self.host, timeout_obj.connect_timeout))
+            self._validate_conn(conn)
+        except (SocketTimeout, BaseSSLError) as e:
+            # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+            self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+            raise
+
+        # conn.request() calls httplib.*.request, not the method in
+        # urllib3.request. It also calls makefile (recv) on the socket.
+        conn.request(method, url, **httplib_request_kw)
 
         # Reset the timeout for the recv() on the socket
         read_timeout = timeout_obj.read_timeout
 
         # App Engine doesn't have a sock attr
-        if hasattr(conn, 'sock'):
+        if getattr(conn, 'sock', None):
             # In Python 3 socket.py will catch EAGAIN and return None when you
             # try and read into the file pointer created by http.client, which
             # instead raises a BadStatusLine exception. Instead of catching
@@ -300,41 +360,20 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             # timeouts, check for a zero timeout before making the request.
             if read_timeout == 0:
                 raise ReadTimeoutError(
-                    self, url,
-                    "Read timed out. (read timeout=%s)" % read_timeout)
+                    self, url, "Read timed out. (read timeout=%s)" % read_timeout)
             if read_timeout is Timeout.DEFAULT_TIMEOUT:
                 conn.sock.settimeout(socket.getdefaulttimeout())
-            else: # None or a value
+            else:  # None or a value
                 conn.sock.settimeout(read_timeout)
 
         # Receive the response from the server
         try:
-            try: # Python 2.7+, use buffering of HTTP responses
+            try:  # Python 2.7, use buffering of HTTP responses
                 httplib_response = conn.getresponse(buffering=True)
-            except TypeError: # Python 2.6 and older
+            except TypeError:  # Python 2.6 and older
                 httplib_response = conn.getresponse()
-        except SocketTimeout:
-            raise ReadTimeoutError(
-                self, url, "Read timed out. (read timeout=%s)" % read_timeout)
-
-        except BaseSSLError as e:
-            # Catch possible read timeouts thrown as SSL errors. If not the
-            # case, rethrow the original. We need to do this because of:
-            # http://bugs.python.org/issue10272
-            if 'timed out' in str(e) or \
-               'did not complete (read)' in str(e):  # Python 2.6
-                raise ReadTimeoutError(self, url, "Read timed out.")
-
-            raise
-
-        except SocketError as e: # Platform-specific: Python 2
-            # See the above comment about EAGAIN in Python 3. In Python 2 we
-            # have to specifically catch it and throw the timeout error
-            if e.errno in _blocking_errnos:
-                raise ReadTimeoutError(
-                    self, url,
-                    "Read timed out. (read timeout=%s)" % read_timeout)
-
+        except (SocketTimeout, BaseSSLError, SocketError) as e:
+            self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
             raise
 
         # AppEngine doesn't have a version attr.
@@ -358,7 +397,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                     conn.close()
 
         except Empty:
-            pass # Done.
+            pass  # Done.
 
     def is_same_host(self, url):
         """
@@ -379,7 +418,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
 
         return (scheme, host, port) == (self.scheme, self.host, self.port)
 
-    def urlopen(self, method, url, body=None, headers=None, retries=3,
+    def urlopen(self, method, url, body=None, headers=None, retries=None,
                 redirect=True, assert_same_host=True, timeout=_Default,
                 pool_timeout=None, release_conn=None, **response_kw):
         """
@@ -413,11 +452,25 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             these headers completely replace any pool-specific headers.
 
         :param retries:
-            Number of retries to allow before raising a MaxRetryError exception.
+            Configure the number of retries to allow before raising a
+            :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+            Pass ``None`` to retry until you receive a response. Pass a
+            :class:`~urllib3.util.retry.Retry` object for fine-grained control
+            over different types of retries.
+            Pass an integer number to retry connection errors that many times,
+            but no other types of errors. Pass zero to never retry.
+
+            If ``False``, then retries are disabled and any exception is raised
+            immediately. Also, instead of raising a MaxRetryError on redirects,
+            the redirect response will be returned.
+
+        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
 
         :param redirect:
             If True, automatically handle redirects (status codes 301, 302,
-            303, 307, 308). Each redirect counts as a retry.
+            303, 307, 308). Each redirect counts as a retry. Disabling retries
+            will disable redirect, too.
 
         :param assert_same_host:
             If ``True``, will make sure that the host of the pool requests is
@@ -451,15 +504,15 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         if headers is None:
             headers = self.headers
 
-        if retries < 0:
-            raise MaxRetryError(self, url)
+        if not isinstance(retries, Retry):
+            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
 
         if release_conn is None:
             release_conn = response_kw.get('preload_content', True)
 
         # Check host
         if assert_same_host and not self.is_same_host(url):
-            raise HostChangedError(self, url, retries - 1)
+            raise HostChangedError(self, url, retries)
 
         conn = None
 
@@ -470,13 +523,24 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             headers = headers.copy()
             headers.update(self.proxy_headers)
 
+        # Must keep the exception bound to a separate variable or else Python 3
+        # complains about UnboundLocalError.
+        err = None
+
         try:
-            # Request a connection from the queue
+            # Request a connection from the queue.
+            timeout_obj = self._get_timeout(timeout)
             conn = self._get_conn(timeout=pool_timeout)
 
-            # Make the request on the httplib connection object
+            conn.timeout = timeout_obj.connect_timeout
+
+            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
+            if is_new_proxy_conn:
+                self._prepare_proxy(conn)
+
+            # Make the request on the httplib connection object.
             httplib_response = self._make_request(conn, method, url,
-                                                  timeout=timeout,
+                                                  timeout=timeout_obj,
                                                   body=body, headers=headers)
 
             # If we're going to release the connection in ``finally:``, then
@@ -497,37 +561,44 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             #     ``response.read()``)
 
         except Empty:
-            # Timed out by queue
+            # Timed out by queue.
             raise EmptyPoolError(self, "No pool connections are available.")
 
-        except BaseSSLError as e:
+        except (BaseSSLError, CertificateError) as e:
+            # Close the connection. If a connection is reused on which there
+            # was a Certificate error, the next request will certainly raise
+            # another Certificate error.
+            if conn:
+                conn.close()
+                conn = None
             raise SSLError(e)
 
-        except CertificateError as e:
-            # Name mismatch
-            raise SSLError(e)
+        except SSLError:
+            # Treat SSLError separately from BaseSSLError to preserve
+            # traceback.
+            if conn:
+                conn.close()
+                conn = None
+            raise
 
-        except TimeoutError as e:
-            # Connection broken, discard.
-            conn = None
-            # Save the error off for retry logic.
-            err = e
+        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
+            if conn:
+                # Discard the connection for these exceptions. It will be
+                # be replaced during the next _get_conn() call.
+                conn.close()
+                conn = None
 
-            if retries == 0:
-                raise
+            if isinstance(e, SocketError) and self.proxy:
+                e = ProxyError('Cannot connect to proxy.', e)
+            elif isinstance(e, (SocketError, HTTPException)):
+                e = ProtocolError('Connection aborted.', e)
 
-        except (HTTPException, SocketError) as e:
-            # Connection broken, discard. It will be replaced next _get_conn().
-            conn = None
-            # This is necessary so we can access e below
-            err = e
+            retries = retries.increment(method, url, error=e, _pool=self,
+                                        _stacktrace=sys.exc_info()[2])
+            retries.sleep()
 
-            if retries == 0:
-                if isinstance(e, SocketError) and self.proxy is not None:
-                    raise ProxyError('Cannot connect to proxy. '
-                                     'Socket error: %s.' % e)
-                else:
-                    raise MaxRetryError(self, url, e)
+            # Keep track of the error for the retry warning.
+            err = e
 
         finally:
             if release_conn:
@@ -538,9 +609,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
 
         if not conn:
             # Try again
-            log.warn("Retrying (%d attempts remain) after connection "
-                     "broken by '%r': %s" % (retries, err, url))
-            return self.urlopen(method, url, body, headers, retries - 1,
+            log.warning("Retrying (%r) after connection "
+                        "broken by '%r': %s" % (retries, err, url))
+            return self.urlopen(method, url, body, headers, retries,
                                 redirect, assert_same_host,
                                 timeout=timeout, pool_timeout=pool_timeout,
                                 release_conn=release_conn, **response_kw)
@@ -550,11 +621,31 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
         if redirect_location:
             if response.status == 303:
                 method = 'GET'
+
+            try:
+                retries = retries.increment(method, url, response=response, _pool=self)
+            except MaxRetryError:
+                if retries.raise_on_redirect:
+                    raise
+                return response
+
             log.info("Redirecting %s -> %s" % (url, redirect_location))
             return self.urlopen(method, redirect_location, body, headers,
-                                retries - 1, redirect, assert_same_host,
-                                timeout=timeout, pool_timeout=pool_timeout,
-                                release_conn=release_conn, **response_kw)
+                    retries=retries, redirect=redirect,
+                    assert_same_host=assert_same_host,
+                    timeout=timeout, pool_timeout=pool_timeout,
+                    release_conn=release_conn, **response_kw)
+
+        # Check if we should retry the HTTP response.
+        if retries.is_forced_retry(method, status_code=response.status):
+            retries = retries.increment(method, url, response=response, _pool=self)
+            retries.sleep()
+            log.info("Forced retry: %s" % url)
+            return self.urlopen(method, url, body, headers,
+                    retries=retries, redirect=redirect,
+                    assert_same_host=assert_same_host,
+                    timeout=timeout, pool_timeout=pool_timeout,
+                    release_conn=release_conn, **response_kw)
 
         return response
 
@@ -581,15 +672,17 @@ class HTTPSConnectionPool(HTTPConnectionPool):
     ConnectionCls = HTTPSConnection
 
     def __init__(self, host, port=None,
-                 strict=False, timeout=None, maxsize=1,
-                 block=False, headers=None,
+                 strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
+                 block=False, headers=None, retries=None,
                  _proxy=None, _proxy_headers=None,
                  key_file=None, cert_file=None, cert_reqs=None,
                  ca_certs=None, ssl_version=None,
-                 assert_hostname=None, assert_fingerprint=None):
+                 assert_hostname=None, assert_fingerprint=None,
+                 **conn_kw):
 
         HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
-                                    block, headers, _proxy, _proxy_headers)
+                                    block, headers, retries, _proxy, _proxy_headers,
+                                    **conn_kw)
         self.key_file = key_file
         self.cert_file = cert_file
         self.cert_reqs = cert_reqs
@@ -613,18 +706,25 @@ class HTTPSConnectionPool(HTTPConnectionPool):
                           assert_fingerprint=self.assert_fingerprint)
             conn.ssl_version = self.ssl_version
 
-        if self.proxy is not None:
-            # Python 2.7+
-            try:
-                set_tunnel = conn.set_tunnel
-            except AttributeError:  # Platform-specific: Python 2.6
-                set_tunnel = conn._set_tunnel
+        return conn
+
+    def _prepare_proxy(self, conn):
+        """
+        Establish tunnel connection early, because otherwise httplib
+        would improperly set Host: header to proxy's IP:port.
+        """
+        # Python 2.7+
+        try:
+            set_tunnel = conn.set_tunnel
+        except AttributeError:  # Platform-specific: Python 2.6
+            set_tunnel = conn._set_tunnel
+
+        if sys.version_info <= (2, 6, 4) and not self.proxy_headers:   # Python 2.6.4 and older
+            set_tunnel(self.host, self.port)
+        else:
             set_tunnel(self.host, self.port, self.proxy_headers)
-            # Establish tunnel connection early, because otherwise httplib
-            # would improperly set Host: header to proxy's IP:port.
-            conn.connect()
 
-        return conn
+        conn.connect()
 
     def _new_conn(self):
         """
@@ -645,20 +745,29 @@ class HTTPSConnectionPool(HTTPConnectionPool):
             actual_host = self.proxy.host
             actual_port = self.proxy.port
 
-        extra_params = {}
-        if not six.PY3:  # Python 2
-            extra_params['strict'] = self.strict
-
         conn = self.ConnectionCls(host=actual_host, port=actual_port,
                                   timeout=self.timeout.connect_timeout,
-                                  **extra_params)
-        if self.proxy is not None:
-            # Enable Nagle's algorithm for proxies, to avoid packet
-            # fragmentation.
-            conn.tcp_nodelay = 0
+                                  strict=self.strict, **self.conn_kw)
 
         return self._prepare_conn(conn)
 
+    def _validate_conn(self, conn):
+        """
+        Called right before a request is made, after the socket is created.
+        """
+        super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+        # Force connect early to allow us to validate the connection.
+        if not getattr(conn, 'sock', None):  # AppEngine might not have  `.sock`
+            conn.connect()
+
+        if not conn.is_verified:
+            warnings.warn((
+                'Unverified HTTPS request is being made. '
+                'Adding certificate verification is strongly advised. See: '
+                'https://urllib3.readthedocs.org/en/latest/security.html'),
+                InsecureRequestWarning)
+
 
 def connection_from_url(url, **kw):
     """
@@ -675,7 +784,7 @@ def connection_from_url(url, **kw):
         :class:`.ConnectionPool`. Useful for specifying things like
         timeout, maxsize, headers, etc.
 
-    Example: ::
+    Example::
 
         >>> conn = connection_from_url('http://google.com/')
         >>> r = conn.request('GET', '/')
diff --git a/autoProcessTV/lib/requests/packages/urllib3/contrib/ntlmpool.py b/autoProcessTV/lib/requests/packages/urllib3/contrib/ntlmpool.py
index b8cd933034cc48ef733209850380ec4ebdc79e10..c6b266f5d1803a036f6e9de6bdc7ec4be7af6350 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/contrib/ntlmpool.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/contrib/ntlmpool.py
@@ -1,9 +1,3 @@
-# urllib3/contrib/ntlmpool.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
 """
 NTLM authenticating pool, contributed by erikcederstran
 
diff --git a/autoProcessTV/lib/requests/packages/urllib3/contrib/pyopenssl.py b/autoProcessTV/lib/requests/packages/urllib3/contrib/pyopenssl.py
index c3df278b1e3e00ee1528b66203e3368a3f71a00a..ee657fb3f2905560165e005280d9af9b5b58628d 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/contrib/pyopenssl.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/contrib/pyopenssl.py
@@ -1,4 +1,7 @@
-'''SSL with SNI_-support for Python 2.
+'''SSL with SNI_-support for Python 2. Follow these instructions if you would
+like to verify SSL certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
 
 This needs the following packages installed:
 
@@ -6,9 +9,15 @@ This needs the following packages installed:
 * ndg-httpsclient (tested with 0.3.2)
 * pyasn1 (tested with 0.1.6)
 
-To activate it call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3`.
-This can be done in a ``sitecustomize`` module, or at any other time before
-your application begins using ``urllib3``, like this::
+You can install them with the following command:
+
+    pip install pyopenssl ndg-httpsclient pyasn1
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this::
 
     try:
         import urllib3.contrib.pyopenssl
@@ -20,7 +29,7 @@ Now you can use :mod:`urllib3` as you normally would, and it will support SNI
 when the required modules are installed.
 
 Activating this module also has the positive side effect of disabling SSL/TLS
-encryption in Python 2 (see `CRIME attack`_).
+compression in Python 2 (see `CRIME attack`_).
 
 If you want to configure the default list of supported cipher suites, you can
 set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
@@ -29,24 +38,26 @@ Module Variables
 ----------------
 
 :var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
-    Default: ``EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA256
-    EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EDH+aRSA EECDH RC4 !aNULL !eNULL !LOW !3DES
-    !MD5 !EXP !PSK !SRP !DSS'``
+    Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
+    ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
 
 .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
 .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
 
 '''
 
-from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
-from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
+try:
+    from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
+    from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
+except SyntaxError as e:
+    raise ImportError(e)
+
 import OpenSSL.SSL
 from pyasn1.codec.der import decoder as der_decoder
 from pyasn1.type import univ, constraint
-from socket import _fileobject
+from socket import _fileobject, timeout
 import ssl
 import select
-from cStringIO import StringIO
 
 from .. import connection
 from .. import util
@@ -74,12 +85,22 @@ _openssl_verify = {
                        + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
 }
 
-# Default SSL/TLS cipher list.
-# Recommendation by https://community.qualys.com/blogs/securitylabs/2013/08/05/
-# configuring-apache-nginx-and-openssl-for-forward-secrecy
-DEFAULT_SSL_CIPHER_LIST = 'EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM ' + \
-        'EECDH+ECDSA+SHA256 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EDH+aRSA ' + \
-        'EECDH RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS'
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM over any AES-CBC for better performance and security,
+# - use 3DES as fallback which is secure but slow,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
+    "ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
+    "!aNULL:!MD5:!DSS"
 
 
 orig_util_HAS_SNI = util.HAS_SNI
@@ -142,193 +163,73 @@ def get_subj_alt_name(peer_cert):
     return dns_name
 
 
-class fileobject(_fileobject):
-
-    def read(self, size=-1):
-        # Use max, disallow tiny reads in a loop as they are very inefficient.
-        # We never leave read() with any leftover data from a new recv() call
-        # in our internal buffer.
-        rbufsize = max(self._rbufsize, self.default_bufsize)
-        # Our use of StringIO rather than lists of string objects returned by
-        # recv() minimizes memory usage and fragmentation that occurs when
-        # rbufsize is large compared to the typical return value of recv().
-        buf = self._rbuf
-        buf.seek(0, 2)  # seek end
-        if size < 0:
-            # Read until EOF
-            self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
-            while True:
-                try:
-                    data = self._sock.recv(rbufsize)
-                except OpenSSL.SSL.WantReadError:
-                    continue
-                if not data:
-                    break
-                buf.write(data)
-            return buf.getvalue()
-        else:
-            # Read until size bytes or EOF seen, whichever comes first
-            buf_len = buf.tell()
-            if buf_len >= size:
-                # Already have size bytes in our buffer?  Extract and return.
-                buf.seek(0)
-                rv = buf.read(size)
-                self._rbuf = StringIO()
-                self._rbuf.write(buf.read())
-                return rv
-
-            self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
-            while True:
-                left = size - buf_len
-                # recv() will malloc the amount of memory given as its
-                # parameter even though it often returns much less data
-                # than that.  The returned data string is short lived
-                # as we copy it into a StringIO and free it.  This avoids
-                # fragmentation issues on many platforms.
-                try:
-                    data = self._sock.recv(left)
-                except OpenSSL.SSL.WantReadError:
-                    continue
-                if not data:
-                    break
-                n = len(data)
-                if n == size and not buf_len:
-                    # Shortcut.  Avoid buffer data copies when:
-                    # - We have no data in our buffer.
-                    # AND
-                    # - Our call to recv returned exactly the
-                    #   number of bytes we were asked to read.
-                    return data
-                if n == left:
-                    buf.write(data)
-                    del data  # explicit free
-                    break
-                assert n <= left, "recv(%d) returned %d bytes" % (left, n)
-                buf.write(data)
-                buf_len += n
-                del data  # explicit free
-                #assert buf_len == buf.tell()
-            return buf.getvalue()
-
-    def readline(self, size=-1):
-        buf = self._rbuf
-        buf.seek(0, 2)  # seek end
-        if buf.tell() > 0:
-            # check if we already have it in our buffer
-            buf.seek(0)
-            bline = buf.readline(size)
-            if bline.endswith('\n') or len(bline) == size:
-                self._rbuf = StringIO()
-                self._rbuf.write(buf.read())
-                return bline
-            del bline
-        if size < 0:
-            # Read until \n or EOF, whichever comes first
-            if self._rbufsize <= 1:
-                # Speed up unbuffered case
-                buf.seek(0)
-                buffers = [buf.read()]
-                self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
-                data = None
-                recv = self._sock.recv
-                while True:
-                    try:
-                        while data != "\n":
-                            data = recv(1)
-                            if not data:
-                                break
-                            buffers.append(data)
-                    except OpenSSL.SSL.WantReadError:
-                        continue
-                    break
-                return "".join(buffers)
-
-            buf.seek(0, 2)  # seek end
-            self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
-            while True:
-                try:
-                    data = self._sock.recv(self._rbufsize)
-                except OpenSSL.SSL.WantReadError:
-                    continue
-                if not data:
-                    break
-                nl = data.find('\n')
-                if nl >= 0:
-                    nl += 1
-                    buf.write(data[:nl])
-                    self._rbuf.write(data[nl:])
-                    del data
-                    break
-                buf.write(data)
-            return buf.getvalue()
-        else:
-            # Read until size bytes or \n or EOF seen, whichever comes first
-            buf.seek(0, 2)  # seek end
-            buf_len = buf.tell()
-            if buf_len >= size:
-                buf.seek(0)
-                rv = buf.read(size)
-                self._rbuf = StringIO()
-                self._rbuf.write(buf.read())
-                return rv
-            self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
-            while True:
-                try:
-                    data = self._sock.recv(self._rbufsize)
-                except OpenSSL.SSL.WantReadError:
-                        continue
-                if not data:
-                    break
-                left = size - buf_len
-                # did we just receive a newline?
-                nl = data.find('\n', 0, left)
-                if nl >= 0:
-                    nl += 1
-                    # save the excess data to _rbuf
-                    self._rbuf.write(data[nl:])
-                    if buf_len:
-                        buf.write(data[:nl])
-                        break
-                    else:
-                        # Shortcut.  Avoid data copy through buf when returning
-                        # a substring of our first recv().
-                        return data[:nl]
-                n = len(data)
-                if n == size and not buf_len:
-                    # Shortcut.  Avoid data copy through buf when
-                    # returning exactly all of our first recv().
-                    return data
-                if n >= left:
-                    buf.write(data[:left])
-                    self._rbuf.write(data[left:])
-                    break
-                buf.write(data)
-                buf_len += n
-                #assert buf_len == buf.tell()
-            return buf.getvalue()
-
-
 class WrappedSocket(object):
-    '''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
+    '''API-compatibility wrapper for Python OpenSSL's Connection-class.
 
-    def __init__(self, connection, socket):
+    Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+    collector of pypy.
+    '''
+
+    def __init__(self, connection, socket, suppress_ragged_eofs=True):
         self.connection = connection
         self.socket = socket
+        self.suppress_ragged_eofs = suppress_ragged_eofs
+        self._makefile_refs = 0
 
     def fileno(self):
         return self.socket.fileno()
 
     def makefile(self, mode, bufsize=-1):
-        return fileobject(self.connection, mode, bufsize)
+        self._makefile_refs += 1
+        return _fileobject(self, mode, bufsize, close=True)
+
+    def recv(self, *args, **kwargs):
+        try:
+            data = self.connection.recv(*args, **kwargs)
+        except OpenSSL.SSL.SysCallError as e:
+            if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+                return b''
+            else:
+                raise
+        except OpenSSL.SSL.ZeroReturnError as e:
+            if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+                return b''
+            else:
+                raise
+        except OpenSSL.SSL.WantReadError:
+            rd, wd, ed = select.select(
+                [self.socket], [], [], self.socket.gettimeout())
+            if not rd:
+                raise timeout('The read operation timed out')
+            else:
+                return self.recv(*args, **kwargs)
+        else:
+            return data
 
     def settimeout(self, timeout):
         return self.socket.settimeout(timeout)
 
+    def _send_until_done(self, data):
+        while True:
+            try:
+                return self.connection.send(data)
+            except OpenSSL.SSL.WantWriteError:
+                _, wlist, _ = select.select([], [self.socket], [],
+                                            self.socket.gettimeout())
+                if not wlist:
+                    raise timeout()
+                continue
+
     def sendall(self, data):
-        return self.connection.sendall(data)
+        while len(data):
+            sent = self._send_until_done(data)
+            data = data[sent:]
 
     def close(self):
-        return self.connection.shutdown()
+        if self._makefile_refs < 1:
+            return self.connection.shutdown()
+        else:
+            self._makefile_refs -= 1
 
     def getpeercert(self, binary_form=False):
         x509 = self.connection.get_peer_certificate()
@@ -351,6 +252,15 @@ class WrappedSocket(object):
             ]
         }
 
+    def _reuse(self):
+        self._makefile_refs += 1
+
+    def _drop(self):
+        if self._makefile_refs < 1:
+            self.close()
+        else:
+            self._makefile_refs -= 1
+
 
 def _verify_callback(cnx, x509, err_no, err_depth, return_code):
     return err_no == 0
@@ -361,6 +271,7 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
                     ssl_version=None):
     ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
     if certfile:
+        keyfile = keyfile or certfile  # Match behaviour of the normal python ssl library
         ctx.use_certificate_file(certfile)
     if keyfile:
         ctx.use_privatekey_file(keyfile)
@@ -371,6 +282,8 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
             ctx.load_verify_locations(ca_certs, None)
         except OpenSSL.SSL.Error as e:
             raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
+    else:
+        ctx.set_default_verify_paths()
 
     # Disable TLS compression to migitate CRIME attack (issue #309)
     OP_NO_COMPRESSION = 0x20000
diff --git a/autoProcessTV/lib/requests/packages/urllib3/exceptions.py b/autoProcessTV/lib/requests/packages/urllib3/exceptions.py
index 98ef9abc7f054d586dc44b08f21476f54b00e79b..5d52301122e42dcd44f266be1257abceaf36f829 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/exceptions.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/exceptions.py
@@ -1,9 +1,3 @@
-# urllib3/exceptions.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
 
 ## Base Exceptions
 
@@ -11,6 +5,11 @@ class HTTPError(Exception):
     "Base exception used by this module."
     pass
 
+class HTTPWarning(Warning):
+    "Base warning used by this module."
+    pass
+
+
 
 class PoolError(HTTPError):
     "Base exception for errors caused within a pool."
@@ -49,19 +48,32 @@ class DecodeError(HTTPError):
     pass
 
 
+class ProtocolError(HTTPError):
+    "Raised when something unexpected happens mid-request/response."
+    pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
 ## Leaf Exceptions
 
 class MaxRetryError(RequestError):
-    "Raised when the maximum number of retries is exceeded."
+    """Raised when the maximum number of retries is exceeded.
+
+    :param pool: The connection pool
+    :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+    :param string url: The requested Url
+    :param exceptions.Exception reason: The underlying error
+
+    """
 
     def __init__(self, pool, url, reason=None):
         self.reason = reason
 
-        message = "Max retries exceeded with url: %s" % url
-        if reason:
-            message += " (Caused by %s: %s)" % (type(reason), reason)
-        else:
-            message += " (Caused by redirect)"
+        message = "Max retries exceeded with url: %s (Caused by %r)" % (
+            url, reason)
 
         RequestError.__init__(self, pool, url, message)
 
@@ -111,7 +123,12 @@ class ClosedPoolError(PoolError):
     pass
 
 
-class LocationParseError(ValueError, HTTPError):
+class LocationValueError(ValueError, HTTPError):
+    "Raised when there is something wrong with a given URL input."
+    pass
+
+
+class LocationParseError(LocationValueError):
     "Raised when get_host or similar fails to parse the URL input."
 
     def __init__(self, location):
@@ -119,3 +136,29 @@ class LocationParseError(ValueError, HTTPError):
         HTTPError.__init__(self, message)
 
         self.location = location
+
+
+class ResponseError(HTTPError):
+    "Used as a container for an error reason supplied in a MaxRetryError."
+    GENERIC_ERROR = 'too many error responses'
+    SPECIFIC_ERROR = 'too many {status_code} error responses'
+
+
+class SecurityWarning(HTTPWarning):
+    "Warned when perfoming security reducing actions"
+    pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+    "Warned when making an unverified HTTPS request."
+    pass
+
+
+class SystemTimeWarning(SecurityWarning):
+    "Warned when system time is suspected to be wrong"
+    pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+    "Warned when certain SSL configuration is not available on a platform."
+    pass
diff --git a/autoProcessTV/lib/requests/packages/urllib3/fields.py b/autoProcessTV/lib/requests/packages/urllib3/fields.py
index ed017657a29baae4193aa15ebe5d35c9185f084e..c853f8d56b5b2b5cb0d81dbb15e5e7245a1806d2 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/fields.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/fields.py
@@ -1,9 +1,3 @@
-# urllib3/fields.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
 import email.utils
 import mimetypes
 
@@ -15,7 +9,7 @@ def guess_content_type(filename, default='application/octet-stream'):
     Guess the "Content-Type" of a file.
 
     :param filename:
-        The filename to guess the "Content-Type" of using :mod:`mimetimes`.
+        The filename to guess the "Content-Type" of using :mod:`mimetypes`.
     :param default:
         If no "Content-Type" can be guessed, default to `default`.
     """
@@ -78,9 +72,10 @@ class RequestField(object):
         """
         A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
 
-        Supports constructing :class:`~urllib3.fields.RequestField` from parameter
-        of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type)
-        tuple where the MIME type is optional. For example: ::
+        Supports constructing :class:`~urllib3.fields.RequestField` from
+        parameter of key/value strings AND key/filetuple. A filetuple is a
+        (filename, data, MIME type) tuple where the MIME type is optional.
+        For example::
 
             'foo': 'bar',
             'fakefile': ('foofile.txt', 'contents of foofile'),
@@ -125,8 +120,8 @@ class RequestField(object):
         'Content-Disposition' fields.
 
         :param header_parts:
-            A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as
-            `k1="v1"; k2="v2"; ...`.
+            A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
+            as `k1="v1"; k2="v2"; ...`.
         """
         parts = []
         iterable = header_parts
@@ -158,7 +153,8 @@ class RequestField(object):
         lines.append('\r\n')
         return '\r\n'.join(lines)
 
-    def make_multipart(self, content_disposition=None, content_type=None, content_location=None):
+    def make_multipart(self, content_disposition=None, content_type=None,
+                       content_location=None):
         """
         Makes this request field into a multipart request field.
 
@@ -172,6 +168,10 @@ class RequestField(object):
 
         """
         self.headers['Content-Disposition'] = content_disposition or 'form-data'
-        self.headers['Content-Disposition'] += '; '.join(['', self._render_parts((('name', self._name), ('filename', self._filename)))])
+        self.headers['Content-Disposition'] += '; '.join([
+            '', self._render_parts(
+                (('name', self._name), ('filename', self._filename))
+            )
+        ])
         self.headers['Content-Type'] = content_type
         self.headers['Content-Location'] = content_location
diff --git a/autoProcessTV/lib/requests/packages/urllib3/filepost.py b/autoProcessTV/lib/requests/packages/urllib3/filepost.py
index e8b30bddf2e631e3d349854a44434e45ebf0a2a7..0fbf488dfeeb92abdf5b2923f82566fd8ee0216a 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/filepost.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/filepost.py
@@ -1,11 +1,4 @@
-# urllib3/filepost.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
 import codecs
-import mimetypes
 
 from uuid import uuid4
 from io import BytesIO
@@ -38,10 +31,10 @@ def iter_field_objects(fields):
         i = iter(fields)
 
     for field in i:
-      if isinstance(field, RequestField):
-        yield field
-      else:
-        yield RequestField.from_tuples(*field)
+        if isinstance(field, RequestField):
+            yield field
+        else:
+            yield RequestField.from_tuples(*field)
 
 
 def iter_fields(fields):
diff --git a/autoProcessTV/lib/requests/packages/urllib3/packages/ordered_dict.py b/autoProcessTV/lib/requests/packages/urllib3/packages/ordered_dict.py
index 7f8ee15436061292aa8227e691f91903c6210a04..4479363cc45b5c7e0588072ccc19cab03fecce28 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/packages/ordered_dict.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/packages/ordered_dict.py
@@ -2,7 +2,6 @@
 # Passes Python2.7's test suite and incorporates all the latest updates.
 # Copyright 2009 Raymond Hettinger, released under the MIT License.
 # http://code.activestate.com/recipes/576693/
-
 try:
     from thread import get_ident as _get_ident
 except ImportError:
diff --git a/autoProcessTV/lib/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py b/autoProcessTV/lib/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
index 3aa5b2e19024c6e247e6a55f7aefb643c357b3f7..dd59a75fd305dda8a588bc9a6c98471edafbf88c 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
@@ -7,7 +7,7 @@ except ImportError:
         from backports.ssl_match_hostname import CertificateError, match_hostname
     except ImportError:
         # Our vendored copy
-        from _implementation import CertificateError, match_hostname
+        from ._implementation import CertificateError, match_hostname
 
 # Not needed, but documenting what we provide.
 __all__ = ('CertificateError', 'match_hostname')
diff --git a/autoProcessTV/lib/requests/packages/urllib3/poolmanager.py b/autoProcessTV/lib/requests/packages/urllib3/poolmanager.py
index f18ff2bb7ef2a6407322c5db011957600bccfcab..b8d1e745d14997f084d44edf0c17fc646ca8d7bc 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/poolmanager.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/poolmanager.py
@@ -1,9 +1,3 @@
-# urllib3/poolmanager.py
-# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
 import logging
 
 try:  # Python 3
@@ -14,8 +8,10 @@ except ImportError:
 from ._collections import RecentlyUsedContainer
 from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
 from .connectionpool import port_by_scheme
+from .exceptions import LocationValueError, MaxRetryError
 from .request import RequestMethods
-from .util import parse_url
+from .util.url import parse_url
+from .util.retry import Retry
 
 
 __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
@@ -49,7 +45,7 @@ class PoolManager(RequestMethods):
         Additional parameters are used to create fresh
         :class:`urllib3.connectionpool.ConnectionPool` instances.
 
-    Example: ::
+    Example::
 
         >>> manager = PoolManager(num_pools=2)
         >>> r = manager.request('GET', 'http://google.com/')
@@ -68,6 +64,14 @@ class PoolManager(RequestMethods):
         self.pools = RecentlyUsedContainer(num_pools,
                                            dispose_func=lambda p: p.close())
 
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.clear()
+        # Return False to re-raise any potential exceptions
+        return False
+
     def _new_pool(self, scheme, host, port):
         """
         Create a new :class:`ConnectionPool` based on host, port and scheme.
@@ -102,10 +106,11 @@ class PoolManager(RequestMethods):
         ``urllib3.connectionpool.port_by_scheme``.
         """
 
-        scheme = scheme or 'http'
+        if not host:
+            raise LocationValueError("No host specified.")
 
+        scheme = scheme or 'http'
         port = port or port_by_scheme.get(scheme, 80)
-
         pool_key = (scheme, host, port)
 
         with self.pools.lock:
@@ -118,6 +123,7 @@ class PoolManager(RequestMethods):
             # Make a fresh ConnectionPool of the desired type
             pool = self._new_pool(scheme, host, port)
             self.pools[pool_key] = pool
+
         return pool
 
     def connection_from_url(self, url):
@@ -161,13 +167,25 @@ class PoolManager(RequestMethods):
         # Support relative URLs for redirecting.
         redirect_location = urljoin(url, redirect_location)
 
-        # RFC 2616, Section 10.3.4
+        # RFC 7231, Section 6.4.4
         if response.status == 303:
             method = 'GET'
 
-        log.info("Redirecting %s -> %s" % (url, redirect_location))
-        kw['retries'] = kw.get('retries', 3) - 1  # Persist retries countdown
+        retries = kw.get('retries')
+        if not isinstance(retries, Retry):
+            retries = Retry.from_int(retries, redirect=redirect)
+
+        try:
+            retries = retries.increment(method, url, response=response, _pool=conn)
+        except MaxRetryError:
+            if retries.raise_on_redirect:
+                raise
+            return response
+
+        kw['retries'] = retries
         kw['redirect'] = redirect
+
+        log.info("Redirecting %s -> %s" % (url, redirect_location))
         return self.urlopen(method, redirect_location, **kw)
 
 
@@ -208,12 +226,16 @@ class ProxyManager(PoolManager):
         if not proxy.port:
             port = port_by_scheme.get(proxy.scheme, 80)
             proxy = proxy._replace(port=port)
+
+        assert proxy.scheme in ("http", "https"), \
+            'Not supported proxy scheme %s' % proxy.scheme
+
         self.proxy = proxy
         self.proxy_headers = proxy_headers or {}
-        assert self.proxy.scheme in ("http", "https"), \
-            'Not supported proxy scheme %s' % self.proxy.scheme
+
         connection_pool_kw['_proxy'] = self.proxy
         connection_pool_kw['_proxy_headers'] = self.proxy_headers
+
         super(ProxyManager, self).__init__(
             num_pools, headers, **connection_pool_kw)
 
@@ -248,10 +270,10 @@ class ProxyManager(PoolManager):
             # For proxied HTTPS requests, httplib sets the necessary headers
             # on the CONNECT to the proxy. For HTTP, we'll definitely
             # need to set 'Host' at the very least.
-            kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
-                                                                self.headers))
+            headers = kw.get('headers', self.headers)
+            kw['headers'] = self._set_proxy_headers(url, headers)
 
-        return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
+        return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
 
 
 def proxy_from_url(url, **kw):
diff --git a/autoProcessTV/lib/requests/packages/urllib3/request.py b/autoProcessTV/lib/requests/packages/urllib3/request.py
index 2a92cc208343fcf2110ab59d654afaabeaeaab59..b08d6c92746a0d9952b6d9e3875dd125b7416af8 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/request.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/request.py
@@ -1,9 +1,3 @@
-# urllib3/request.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
 try:
     from urllib.parse import urlencode
 except ImportError:
@@ -26,8 +20,8 @@ class RequestMethods(object):
 
     Specifically,
 
-    :meth:`.request_encode_url` is for sending requests whose fields are encoded
-    in the URL (such as GET, HEAD, DELETE).
+    :meth:`.request_encode_url` is for sending requests whose fields are
+    encoded in the URL (such as GET, HEAD, DELETE).
 
     :meth:`.request_encode_body` is for sending requests whose fields are
     encoded in the *body* of the request using multipart or www-form-urlencoded
@@ -51,7 +45,7 @@ class RequestMethods(object):
 
     def urlopen(self, method, url, body=None, headers=None,
                 encode_multipart=True, multipart_boundary=None,
-                **kw): # Abstract
+                **kw):  # Abstract
         raise NotImplemented("Classes extending RequestMethods must implement "
                              "their own ``urlopen`` method.")
 
@@ -61,8 +55,8 @@ class RequestMethods(object):
         ``fields`` based on the ``method`` used.
 
         This is a convenience method that requires the least amount of manual
-        effort. It can be used in most situations, while still having the option
-        to drop down to more specific methods when necessary, such as
+        effort. It can be used in most situations, while still having the
+        option to drop down to more specific methods when necessary, such as
         :meth:`request_encode_url`, :meth:`request_encode_body`,
         or even the lowest level :meth:`urlopen`.
         """
@@ -70,12 +64,12 @@ class RequestMethods(object):
 
         if method in self._encode_url_methods:
             return self.request_encode_url(method, url, fields=fields,
-                                            headers=headers,
-                                            **urlopen_kw)
+                                           headers=headers,
+                                           **urlopen_kw)
         else:
             return self.request_encode_body(method, url, fields=fields,
-                                             headers=headers,
-                                             **urlopen_kw)
+                                            headers=headers,
+                                            **urlopen_kw)
 
     def request_encode_url(self, method, url, fields=None, **urlopen_kw):
         """
@@ -94,18 +88,18 @@ class RequestMethods(object):
         the body. This is useful for request methods like POST, PUT, PATCH, etc.
 
         When ``encode_multipart=True`` (default), then
-        :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the
-        payload with the appropriate content type. Otherwise
+        :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
+        the payload with the appropriate content type. Otherwise
         :meth:`urllib.urlencode` is used with the
         'application/x-www-form-urlencoded' content type.
 
         Multipart encoding must be used when posting files, and it's reasonably
-        safe to use it in other times too. However, it may break request signing,
-        such as with OAuth.
+        safe to use it in other times too. However, it may break request
+        signing, such as with OAuth.
 
         Supports an optional ``fields`` parameter of key/value strings AND
         key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
-        the MIME type is optional. For example: ::
+        the MIME type is optional. For example::
 
             fields = {
                 'foo': 'bar',
@@ -119,23 +113,29 @@ class RequestMethods(object):
         When uploading a file, providing a filename (the first parameter of the
         tuple) is optional but recommended to best mimick behavior of browsers.
 
-        Note that if ``headers`` are supplied, the 'Content-Type' header will be
-        overwritten because it depends on the dynamic random boundary string
+        Note that if ``headers`` are supplied, the 'Content-Type' header will
+        be overwritten because it depends on the dynamic random boundary string
         which is used to compose the body of the request. The random boundary
         string can be explicitly set with the ``multipart_boundary`` parameter.
         """
-        if encode_multipart:
-            body, content_type = encode_multipart_formdata(fields or {},
-                                    boundary=multipart_boundary)
-        else:
-            body, content_type = (urlencode(fields or {}),
-                                    'application/x-www-form-urlencoded')
-
         if headers is None:
             headers = self.headers
 
-        headers_ = {'Content-Type': content_type}
-        headers_.update(headers)
+        extra_kw = {'headers': {}}
+
+        if fields:
+            if 'body' in urlopen_kw:
+                raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.')
+
+            if encode_multipart:
+                body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
+            else:
+                body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
+
+            extra_kw['body'] = body
+            extra_kw['headers'] = {'Content-Type': content_type}
+
+        extra_kw['headers'].update(headers)
+        extra_kw.update(urlopen_kw)
 
-        return self.urlopen(method, url, body=body, headers=headers_,
-                            **urlopen_kw)
+        return self.urlopen(method, url, **extra_kw)
diff --git a/autoProcessTV/lib/requests/packages/urllib3/response.py b/autoProcessTV/lib/requests/packages/urllib3/response.py
index 6a1fe1a77ccc5c3a2702d978cc70cb74b0cbbc85..34cd3d7057fc71855db223b4e5b8fb939211a700 100644
--- a/autoProcessTV/lib/requests/packages/urllib3/response.py
+++ b/autoProcessTV/lib/requests/packages/urllib3/response.py
@@ -1,20 +1,12 @@
-# urllib3/response.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-import logging
 import zlib
 import io
+from socket import timeout as SocketTimeout
 
-from .exceptions import DecodeError
-from .packages.six import string_types as basestring, binary_type
-from .util import is_fp_closed
-
-
-log = logging.getLogger(__name__)
+from ._collections import HTTPHeaderDict
+from .exceptions import ProtocolError, DecodeError, ReadTimeoutError
+from .packages.six import string_types as basestring, binary_type, PY3
+from .connection import HTTPException, BaseSSLError
+from .util.response import is_fp_closed
 
 
 class DeflateDecoder(object):
@@ -28,6 +20,9 @@ class DeflateDecoder(object):
         return getattr(self._obj, name)
 
     def decompress(self, data):
+        if not data:
+            return data
+
         if not self._first_try:
             return self._obj.decompress(data)
 
@@ -43,9 +38,23 @@ class DeflateDecoder(object):
                 self._data = None
 
 
+class GzipDecoder(object):
+
+    def __init__(self):
+        self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+    def __getattr__(self, name):
+        return getattr(self._obj, name)
+
+    def decompress(self, data):
+        if not data:
+            return data
+        return self._obj.decompress(data)
+
+
 def _get_decoder(mode):
     if mode == 'gzip':
-        return zlib.decompressobj(16 + zlib.MAX_WBITS)
+        return GzipDecoder()
 
     return DeflateDecoder()
 
@@ -55,7 +64,10 @@ class HTTPResponse(io.IOBase):
     HTTP Response container.
 
     Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
-    loaded and decoded on-demand when the ``data`` property is accessed.
+    loaded and decoded on-demand when the ``data`` property is accessed.  This
+    class is also compatible with the Python standard library's :mod:`io`
+    module, and can hence be treated as a readable object in the context of that
+    framework.
 
     Extra parameters for behaviour not present in httplib.HTTPResponse:
 
@@ -79,7 +91,11 @@ class HTTPResponse(io.IOBase):
     def __init__(self, body='', headers=None, status=0, version=0, reason=None,
                  strict=0, preload_content=True, decode_content=True,
                  original_response=None, pool=None, connection=None):
-        self.headers = headers or {}
+
+        if isinstance(headers, HTTPHeaderDict):
+            self.headers = headers
+        else:
+            self.headers = HTTPHeaderDict(headers)
         self.status = status
         self.version = version
         self.reason = reason
@@ -87,11 +103,14 @@ class HTTPResponse(io.IOBase):
         self.decode_content = decode_content
 
         self._decoder = None
-        self._body = body if body and isinstance(body, basestring) else None
+        self._body = None
         self._fp = None
         self._original_response = original_response
         self._fp_bytes_read = 0
 
+        if body and isinstance(body, (basestring, binary_type)):
+            self._body = body
+
         self._pool = pool
         self._connection = connection
 
@@ -159,8 +178,8 @@ class HTTPResponse(io.IOBase):
             after having ``.read()`` the file object. (Overridden if ``amt`` is
             set.)
         """
-        # Note: content-encoding value should be case-insensitive, per RFC 2616
-        # Section 3.5
+        # Note: content-encoding value should be case-insensitive, per RFC 7230
+        # Section 3.2
         content_encoding = self.headers.get('content-encoding', '').lower()
         if self._decoder is None:
             if content_encoding in self.CONTENT_DECODERS:
@@ -174,23 +193,42 @@ class HTTPResponse(io.IOBase):
         flush_decoder = False
 
         try:
-            if amt is None:
-                # cStringIO doesn't like amt=None
-                data = self._fp.read()
-                flush_decoder = True
-            else:
-                cache_content = False
-                data = self._fp.read(amt)
-                if amt != 0 and not data:  # Platform-specific: Buggy versions of Python.
-                    # Close the connection when no data is returned
-                    #
-                    # This is redundant to what httplib/http.client _should_
-                    # already do.  However, versions of python released before
-                    # December 15, 2012 (http://bugs.python.org/issue16298) do not
-                    # properly close the connection in all cases. There is no harm
-                    # in redundantly calling close.
-                    self._fp.close()
+            try:
+                if amt is None:
+                    # cStringIO doesn't like amt=None
+                    data = self._fp.read()
                     flush_decoder = True
+                else:
+                    cache_content = False
+                    data = self._fp.read(amt)
+                    if amt != 0 and not data:  # Platform-specific: Buggy versions of Python.
+                        # Close the connection when no data is returned
+                        #
+                        # This is redundant to what httplib/http.client _should_
+                        # already do.  However, versions of python released before
+                        # December 15, 2012 (http://bugs.python.org/issue16298) do
+                        # not properly close the connection in all cases. There is
+                        # no harm in redundantly calling close.
+                        self._fp.close()
+                        flush_decoder = True
+
+            except SocketTimeout:
+                # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+                # there is yet no clean way to get at it from this context.
+                raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+            except BaseSSLError as e:
+                # FIXME: Is there a better way to differentiate between SSLErrors?
+                if 'read operation timed out' not in str(e):  # Defensive:
+                    # This shouldn't happen but just in case we're missing an edge
+                    # case, let's avoid swallowing SSL errors.
+                    raise
+
+                raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+            except HTTPException as e:
+                # This includes IncompleteRead.
+                raise ProtocolError('Connection broken: %r' % e, e)
 
             self._fp_bytes_read += len(data)
 
@@ -200,8 +238,7 @@ class HTTPResponse(io.IOBase):
             except (IOError, zlib.error) as e:
                 raise DecodeError(
                     "Received response with content-encoding: %s, but "
-                    "failed to decode it." % content_encoding,
-                    e)
+                    "failed to decode it." % content_encoding, e)
 
             if flush_decoder and decode_content and self._decoder:
                 buf = self._decoder.decompress(binary_type())
@@ -238,7 +275,6 @@ class HTTPResponse(io.IOBase):
             if data:
                 yield data
 
-
     @classmethod
     def from_httplib(ResponseCls, r, **response_kw):
         """
@@ -248,22 +284,16 @@ class HTTPResponse(io.IOBase):
         Remaining parameters are passed to the HTTPResponse constructor, along
         with ``original_response=r``.
         """
-
-        # Normalize headers between different versions of Python
-        headers = {}
-        for k, v in r.getheaders():
-            # Python 3: Header keys are returned capitalised
-            k = k.lower()
-
-            has_value = headers.get(k)
-            if has_value: # Python 3: Repeating header keys are unmerged.
-                v = ', '.join([has_value, v])
-
-            headers[k] = v
+        headers = r.msg
+        if not isinstance(headers, HTTPHeaderDict):
+            if PY3: # Python 3
+                headers = HTTPHeaderDict(headers.items())
+            else: # Python 2
+                headers = HTTPHeaderDict.from_httplib(headers)
 
         # HTTPResponse objects in Python 3 don't have a .strict attribute
         strict = getattr(r, 'strict', 0)
-        return ResponseCls(body=r,
+        resp = ResponseCls(body=r,
                            headers=headers,
                            status=r.status,
                            version=r.version,
@@ -271,6 +301,7 @@ class HTTPResponse(io.IOBase):
                            strict=strict,
                            original_response=r,
                            **response_kw)
+        return resp
 
     # Backwards-compatibility methods for httplib.HTTPResponse
     def getheaders(self):
@@ -301,7 +332,7 @@ class HTTPResponse(io.IOBase):
         elif hasattr(self._fp, "fileno"):
             return self._fp.fileno()
         else:
-            raise IOError("The file-like object  this HTTPResponse is wrapped "
+            raise IOError("The file-like object this HTTPResponse is wrapped "
                           "around has no file descriptor")
 
     def flush(self):
@@ -309,4 +340,14 @@ class HTTPResponse(io.IOBase):
             return self._fp.flush()
 
     def readable(self):
+        # This method is required for `io` module compatibility.
         return True
+
+    def readinto(self, b):
+        # This method is required for `io` module compatibility.
+        temp = self.read(len(b))
+        if len(temp) == 0:
+            return 0
+        else:
+            b[:len(temp)] = temp
+            return len(temp)
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util.py b/autoProcessTV/lib/requests/packages/urllib3/util.py
deleted file mode 100644
index bd266317ff5e89535df7d101eff489b7ac3e9a90..0000000000000000000000000000000000000000
--- a/autoProcessTV/lib/requests/packages/urllib3/util.py
+++ /dev/null
@@ -1,648 +0,0 @@
-# urllib3/util.py
-# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
-#
-# This module is part of urllib3 and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-from base64 import b64encode
-from binascii import hexlify, unhexlify
-from collections import namedtuple
-from hashlib import md5, sha1
-from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT
-import time
-
-try:
-    from select import poll, POLLIN
-except ImportError:  # `poll` doesn't exist on OSX and other platforms
-    poll = False
-    try:
-        from select import select
-    except ImportError:  # `select` doesn't exist on AppEngine.
-        select = False
-
-try:  # Test for SSL features
-    SSLContext = None
-    HAS_SNI = False
-
-    import ssl
-    from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
-    from ssl import SSLContext  # Modern SSL?
-    from ssl import HAS_SNI  # Has SNI?
-except ImportError:
-    pass
-
-from .packages import six
-from .exceptions import LocationParseError, SSLError, TimeoutStateError
-
-
-_Default = object()
-# The default timeout to use for socket connections. This is the attribute used
-# by httplib to define the default timeout
-
-
-def current_time():
-    """
-    Retrieve the current time, this function is mocked out in unit testing.
-    """
-    return time.time()
-
-
-class Timeout(object):
-    """
-    Utility object for storing timeout values.
-
-    Example usage:
-
-    .. code-block:: python
-
-        timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
-        pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
-        pool.request(...) # Etc, etc
-
-    :param connect:
-        The maximum amount of time to wait for a connection attempt to a server
-        to succeed. Omitting the parameter will default the connect timeout to
-        the system default, probably `the global default timeout in socket.py
-        <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
-        None will set an infinite timeout for connection attempts.
-
-    :type connect: integer, float, or None
-
-    :param read:
-        The maximum amount of time to wait between consecutive
-        read operations for a response from the server. Omitting
-        the parameter will default the read timeout to the system
-        default, probably `the global default timeout in socket.py
-        <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
-        None will set an infinite timeout.
-
-    :type read: integer, float, or None
-
-    :param total:
-        This combines the connect and read timeouts into one; the read timeout
-        will be set to the time leftover from the connect attempt. In the
-        event that both a connect timeout and a total are specified, or a read
-        timeout and a total are specified, the shorter timeout will be applied.
-
-        Defaults to None.
-
-    :type total: integer, float, or None
-
-    .. note::
-
-        Many factors can affect the total amount of time for urllib3 to return
-        an HTTP response. Specifically, Python's DNS resolver does not obey the
-        timeout specified on the socket. Other factors that can affect total
-        request time include high CPU load, high swap, the program running at a
-        low priority level, or other behaviors. The observed running time for
-        urllib3 to return a response may be greater than the value passed to
-        `total`.
-
-        In addition, the read and total timeouts only measure the time between
-        read operations on the socket connecting the client and the server,
-        not the total amount of time for the request to return a complete
-        response. For most requests, the timeout is raised because the server
-        has not sent the first byte in the specified time. This is not always
-        the case; if a server streams one byte every fifteen seconds, a timeout
-        of 20 seconds will not ever trigger, even though the request will
-        take several minutes to complete.
-
-        If your goal is to cut off any request after a set amount of wall clock
-        time, consider having a second "watcher" thread to cut off a slow
-        request.
-    """
-
-    #: A sentinel object representing the default timeout value
-    DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
-
-    def __init__(self, total=None, connect=_Default, read=_Default):
-        self._connect = self._validate_timeout(connect, 'connect')
-        self._read = self._validate_timeout(read, 'read')
-        self.total = self._validate_timeout(total, 'total')
-        self._start_connect = None
-
-    def __str__(self):
-        return '%s(connect=%r, read=%r, total=%r)' % (
-            type(self).__name__, self._connect, self._read, self.total)
-
-
-    @classmethod
-    def _validate_timeout(cls, value, name):
-        """ Check that a timeout attribute is valid
-
-        :param value: The timeout value to validate
-        :param name: The name of the timeout attribute to validate. This is used
-            for clear error messages
-        :return: the value
-        :raises ValueError: if the type is not an integer or a float, or if it
-            is a numeric value less than zero
-        """
-        if value is _Default:
-            return cls.DEFAULT_TIMEOUT
-
-        if value is None or value is cls.DEFAULT_TIMEOUT:
-            return value
-
-        try:
-            float(value)
-        except (TypeError, ValueError):
-            raise ValueError("Timeout value %s was %s, but it must be an "
-                             "int or float." % (name, value))
-
-        try:
-            if value < 0:
-                raise ValueError("Attempted to set %s timeout to %s, but the "
-                                 "timeout cannot be set to a value less "
-                                 "than 0." % (name, value))
-        except TypeError: # Python 3
-            raise ValueError("Timeout value %s was %s, but it must be an "
-                             "int or float." % (name, value))
-
-        return value
-
-    @classmethod
-    def from_float(cls, timeout):
-        """ Create a new Timeout from a legacy timeout value.
-
-        The timeout value used by httplib.py sets the same timeout on the
-        connect(), and recv() socket requests. This creates a :class:`Timeout`
-        object that sets the individual timeouts to the ``timeout`` value passed
-        to this function.
-
-        :param timeout: The legacy timeout value
-        :type timeout: integer, float, sentinel default object, or None
-        :return: a Timeout object
-        :rtype: :class:`Timeout`
-        """
-        return Timeout(read=timeout, connect=timeout)
-
-    def clone(self):
-        """ Create a copy of the timeout object
-
-        Timeout properties are stored per-pool but each request needs a fresh
-        Timeout object to ensure each one has its own start/stop configured.
-
-        :return: a copy of the timeout object
-        :rtype: :class:`Timeout`
-        """
-        # We can't use copy.deepcopy because that will also create a new object
-        # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
-        # detect the user default.
-        return Timeout(connect=self._connect, read=self._read,
-                       total=self.total)
-
-    def start_connect(self):
-        """ Start the timeout clock, used during a connect() attempt
-
-        :raises urllib3.exceptions.TimeoutStateError: if you attempt
-            to start a timer that has been started already.
-        """
-        if self._start_connect is not None:
-            raise TimeoutStateError("Timeout timer has already been started.")
-        self._start_connect = current_time()
-        return self._start_connect
-
-    def get_connect_duration(self):
-        """ Gets the time elapsed since the call to :meth:`start_connect`.
-
-        :return: the elapsed time
-        :rtype: float
-        :raises urllib3.exceptions.TimeoutStateError: if you attempt
-            to get duration for a timer that hasn't been started.
-        """
-        if self._start_connect is None:
-            raise TimeoutStateError("Can't get connect duration for timer "
-                                    "that has not started.")
-        return current_time() - self._start_connect
-
-    @property
-    def connect_timeout(self):
-        """ Get the value to use when setting a connection timeout.
-
-        This will be a positive float or integer, the value None
-        (never timeout), or the default system timeout.
-
-        :return: the connect timeout
-        :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
-        """
-        if self.total is None:
-            return self._connect
-
-        if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
-            return self.total
-
-        return min(self._connect, self.total)
-
-    @property
-    def read_timeout(self):
-        """ Get the value for the read timeout.
-
-        This assumes some time has elapsed in the connection timeout and
-        computes the read timeout appropriately.
-
-        If self.total is set, the read timeout is dependent on the amount of
-        time taken by the connect timeout. If the connection time has not been
-        established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
-        raised.
-
-        :return: the value to use for the read timeout
-        :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
-        :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
-            has not yet been called on this object.
-        """
-        if (self.total is not None and
-            self.total is not self.DEFAULT_TIMEOUT and
-            self._read is not None and
-            self._read is not self.DEFAULT_TIMEOUT):
-            # in case the connect timeout has not yet been established.
-            if self._start_connect is None:
-                return self._read
-            return max(0, min(self.total - self.get_connect_duration(),
-                              self._read))
-        elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
-            return max(0, self.total - self.get_connect_duration())
-        else:
-            return self._read
-
-
-class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
-    """
-    Datastructure for representing an HTTP URL. Used as a return value for
-    :func:`parse_url`.
-    """
-    slots = ()
-
-    def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
-        return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
-
-    @property
-    def hostname(self):
-        """For backwards-compatibility with urlparse. We're nice like that."""
-        return self.host
-
-    @property
-    def request_uri(self):
-        """Absolute path including the query string."""
-        uri = self.path or '/'
-
-        if self.query is not None:
-            uri += '?' + self.query
-
-        return uri
-
-    @property
-    def netloc(self):
-        """Network location including host and port"""
-        if self.port:
-            return '%s:%d' % (self.host, self.port)
-        return self.host
-
-
-def split_first(s, delims):
-    """
-    Given a string and an iterable of delimiters, split on the first found
-    delimiter. Return two split parts and the matched delimiter.
-
-    If not found, then the first part is the full input string.
-
-    Example: ::
-
-        >>> split_first('foo/bar?baz', '?/=')
-        ('foo', 'bar?baz', '/')
-        >>> split_first('foo/bar?baz', '123')
-        ('foo/bar?baz', '', None)
-
-    Scales linearly with number of delims. Not ideal for large number of delims.
-    """
-    min_idx = None
-    min_delim = None
-    for d in delims:
-        idx = s.find(d)
-        if idx < 0:
-            continue
-
-        if min_idx is None or idx < min_idx:
-            min_idx = idx
-            min_delim = d
-
-    if min_idx is None or min_idx < 0:
-        return s, '', None
-
-    return s[:min_idx], s[min_idx+1:], min_delim
-
-
-def parse_url(url):
-    """
-    Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
-    performed to parse incomplete urls. Fields not provided will be None.
-
-    Partly backwards-compatible with :mod:`urlparse`.
-
-    Example: ::
-
-        >>> parse_url('http://google.com/mail/')
-        Url(scheme='http', host='google.com', port=None, path='/', ...)
-        >>> parse_url('google.com:80')
-        Url(scheme=None, host='google.com', port=80, path=None, ...)
-        >>> parse_url('/foo?bar')
-        Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
-    """
-
-    # While this code has overlap with stdlib's urlparse, it is much
-    # simplified for our needs and less annoying.
-    # Additionally, this implementations does silly things to be optimal
-    # on CPython.
-
-    scheme = None
-    auth = None
-    host = None
-    port = None
-    path = None
-    fragment = None
-    query = None
-
-    # Scheme
-    if '://' in url:
-        scheme, url = url.split('://', 1)
-
-    # Find the earliest Authority Terminator
-    # (http://tools.ietf.org/html/rfc3986#section-3.2)
-    url, path_, delim = split_first(url, ['/', '?', '#'])
-
-    if delim:
-        # Reassemble the path
-        path = delim + path_
-
-    # Auth
-    if '@' in url:
-        # Last '@' denotes end of auth part
-        auth, url = url.rsplit('@', 1)
-
-    # IPv6
-    if url and url[0] == '[':
-        host, url = url.split(']', 1)
-        host += ']'
-
-    # Port
-    if ':' in url:
-        _host, port = url.split(':', 1)
-
-        if not host:
-            host = _host
-
-        if port:
-            # If given, ports must be integers.
-            if not port.isdigit():
-                raise LocationParseError("Failed to parse: %s" % url)
-            port = int(port)
-        else:
-            # Blank ports are cool, too. (rfc3986#section-3.2.3)
-            port = None
-
-    elif not host and url:
-        host = url
-
-    if not path:
-        return Url(scheme, auth, host, port, path, query, fragment)
-
-    # Fragment
-    if '#' in path:
-        path, fragment = path.split('#', 1)
-
-    # Query
-    if '?' in path:
-        path, query = path.split('?', 1)
-
-    return Url(scheme, auth, host, port, path, query, fragment)
-
-
-def get_host(url):
-    """
-    Deprecated. Use :func:`.parse_url` instead.
-    """
-    p = parse_url(url)
-    return p.scheme or 'http', p.hostname, p.port
-
-
-def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
-                 basic_auth=None, proxy_basic_auth=None):
-    """
-    Shortcuts for generating request headers.
-
-    :param keep_alive:
-        If ``True``, adds 'connection: keep-alive' header.
-
-    :param accept_encoding:
-        Can be a boolean, list, or string.
-        ``True`` translates to 'gzip,deflate'.
-        List will get joined by comma.
-        String will be used as provided.
-
-    :param user_agent:
-        String representing the user-agent you want, such as
-        "python-urllib3/0.6"
-
-    :param basic_auth:
-        Colon-separated username:password string for 'authorization: basic ...'
-        auth header.
-
-    :param proxy_basic_auth:
-        Colon-separated username:password string for 'proxy-authorization: basic ...'
-        auth header.
-
-    Example: ::
-
-        >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
-        {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
-        >>> make_headers(accept_encoding=True)
-        {'accept-encoding': 'gzip,deflate'}
-    """
-    headers = {}
-    if accept_encoding:
-        if isinstance(accept_encoding, str):
-            pass
-        elif isinstance(accept_encoding, list):
-            accept_encoding = ','.join(accept_encoding)
-        else:
-            accept_encoding = 'gzip,deflate'
-        headers['accept-encoding'] = accept_encoding
-
-    if user_agent:
-        headers['user-agent'] = user_agent
-
-    if keep_alive:
-        headers['connection'] = 'keep-alive'
-
-    if basic_auth:
-        headers['authorization'] = 'Basic ' + \
-            b64encode(six.b(basic_auth)).decode('utf-8')
-
-    if proxy_basic_auth:
-        headers['proxy-authorization'] = 'Basic ' + \
-            b64encode(six.b(proxy_basic_auth)).decode('utf-8')
-
-    return headers
-
-
-def is_connection_dropped(conn):  # Platform-specific
-    """
-    Returns True if the connection is dropped and should be closed.
-
-    :param conn:
-        :class:`httplib.HTTPConnection` object.
-
-    Note: For platforms like AppEngine, this will always return ``False`` to
-    let the platform handle connection recycling transparently for us.
-    """
-    sock = getattr(conn, 'sock', False)
-    if not sock: # Platform-specific: AppEngine
-        return False
-
-    if not poll:
-        if not select: # Platform-specific: AppEngine
-            return False
-
-        try:
-            return select([sock], [], [], 0.0)[0]
-        except SocketError:
-            return True
-
-    # This version is better on platforms that support it.
-    p = poll()
-    p.register(sock, POLLIN)
-    for (fno, ev) in p.poll(0.0):
-        if fno == sock.fileno():
-            # Either data is buffered (bad), or the connection is dropped.
-            return True
-
-
-def resolve_cert_reqs(candidate):
-    """
-    Resolves the argument to a numeric constant, which can be passed to
-    the wrap_socket function/method from the ssl module.
-    Defaults to :data:`ssl.CERT_NONE`.
-    If given a string it is assumed to be the name of the constant in the
-    :mod:`ssl` module or its abbrevation.
-    (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
-    If it's neither `None` nor a string we assume it is already the numeric
-    constant which can directly be passed to wrap_socket.
-    """
-    if candidate is None:
-        return CERT_NONE
-
-    if isinstance(candidate, str):
-        res = getattr(ssl, candidate, None)
-        if res is None:
-            res = getattr(ssl, 'CERT_' + candidate)
-        return res
-
-    return candidate
-
-
-def resolve_ssl_version(candidate):
-    """
-    like resolve_cert_reqs
-    """
-    if candidate is None:
-        return PROTOCOL_SSLv23
-
-    if isinstance(candidate, str):
-        res = getattr(ssl, candidate, None)
-        if res is None:
-            res = getattr(ssl, 'PROTOCOL_' + candidate)
-        return res
-
-    return candidate
-
-
-def assert_fingerprint(cert, fingerprint):
-    """
-    Checks if given fingerprint matches the supplied certificate.
-
-    :param cert:
-        Certificate as bytes object.
-    :param fingerprint:
-        Fingerprint as string of hexdigits, can be interspersed by colons.
-    """
-
-    # Maps the length of a digest to a possible hash function producing
-    # this digest.
-    hashfunc_map = {
-        16: md5,
-        20: sha1
-    }
-
-    fingerprint = fingerprint.replace(':', '').lower()
-
-    digest_length, rest = divmod(len(fingerprint), 2)
-
-    if rest or digest_length not in hashfunc_map:
-        raise SSLError('Fingerprint is of invalid length.')
-
-    # We need encode() here for py32; works on py2 and p33.
-    fingerprint_bytes = unhexlify(fingerprint.encode())
-
-    hashfunc = hashfunc_map[digest_length]
-
-    cert_digest = hashfunc(cert).digest()
-
-    if not cert_digest == fingerprint_bytes:
-        raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
-                       .format(hexlify(fingerprint_bytes),
-                               hexlify(cert_digest)))
-
-def is_fp_closed(obj):
-    """
-    Checks whether a given file-like object is closed.
-
-    :param obj:
-        The file-like object to check.
-    """
-    if hasattr(obj, 'fp'):
-        # Object is a container for another file-like object that gets released
-        # on exhaustion (e.g. HTTPResponse)
-        return obj.fp is None
-
-    return obj.closed
-
-
-if SSLContext is not None:  # Python 3.2+
-    def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
-                        ca_certs=None, server_hostname=None,
-                        ssl_version=None):
-        """
-        All arguments except `server_hostname` have the same meaning as for
-        :func:`ssl.wrap_socket`
-
-        :param server_hostname:
-            Hostname of the expected certificate
-        """
-        context = SSLContext(ssl_version)
-        context.verify_mode = cert_reqs
-
-        # Disable TLS compression to migitate CRIME attack (issue #309)
-        OP_NO_COMPRESSION = 0x20000
-        context.options |= OP_NO_COMPRESSION
-
-        if ca_certs:
-            try:
-                context.load_verify_locations(ca_certs)
-            # Py32 raises IOError
-            # Py33 raises FileNotFoundError
-            except Exception as e:  # Reraise as SSLError
-                raise SSLError(e)
-        if certfile:
-            # FIXME: This block needs a test.
-            context.load_cert_chain(certfile, keyfile)
-        if HAS_SNI:  # Platform-specific: OpenSSL with enabled SNI
-            return context.wrap_socket(sock, server_hostname=server_hostname)
-        return context.wrap_socket(sock)
-
-else:  # Python 3.1 and earlier
-    def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
-                        ca_certs=None, server_hostname=None,
-                        ssl_version=None):
-        return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
-                           ca_certs=ca_certs, cert_reqs=cert_reqs,
-                           ssl_version=ssl_version)
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util/__init__.py b/autoProcessTV/lib/requests/packages/urllib3/util/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8becc81433b79c81d2206de64d99443625aff300
--- /dev/null
+++ b/autoProcessTV/lib/requests/packages/urllib3/util/__init__.py
@@ -0,0 +1,24 @@
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import make_headers
+from .response import is_fp_closed
+from .ssl_ import (
+    SSLContext,
+    HAS_SNI,
+    assert_fingerprint,
+    resolve_cert_reqs,
+    resolve_ssl_version,
+    ssl_wrap_socket,
+)
+from .timeout import (
+    current_time,
+    Timeout,
+)
+
+from .retry import Retry
+from .url import (
+    get_host,
+    parse_url,
+    split_first,
+    Url,
+)
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util/connection.py b/autoProcessTV/lib/requests/packages/urllib3/util/connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..859aec6ee6be0f6e2e59753ad7fb41c9370a3073
--- /dev/null
+++ b/autoProcessTV/lib/requests/packages/urllib3/util/connection.py
@@ -0,0 +1,98 @@
+import socket
+try:
+    from select import poll, POLLIN
+except ImportError:  # `poll` doesn't exist on OSX and other platforms
+    poll = False
+    try:
+        from select import select
+    except ImportError:  # `select` doesn't exist on AppEngine.
+        select = False
+
+
+def is_connection_dropped(conn):  # Platform-specific
+    """
+    Returns True if the connection is dropped and should be closed.
+
+    :param conn:
+        :class:`httplib.HTTPConnection` object.
+
+    Note: For platforms like AppEngine, this will always return ``False`` to
+    let the platform handle connection recycling transparently for us.
+    """
+    sock = getattr(conn, 'sock', False)
+    if sock is False:  # Platform-specific: AppEngine
+        return False
+    if sock is None:  # Connection already closed (such as by httplib).
+        return True
+
+    if not poll:
+        if not select:  # Platform-specific: AppEngine
+            return False
+
+        try:
+            return select([sock], [], [], 0.0)[0]
+        except socket.error:
+            return True
+
+    # This version is better on platforms that support it.
+    p = poll()
+    p.register(sock, POLLIN)
+    for (fno, ev) in p.poll(0.0):
+        if fno == sock.fileno():
+            # Either data is buffered (bad), or the connection is dropped.
+            return True
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+                      source_address=None, socket_options=None):
+    """Connect to *address* and return the socket object.
+
+    Convenience function.  Connect to *address* (a 2-tuple ``(host,
+    port)``) and return the socket object.  Passing the optional
+    *timeout* parameter will set the timeout on the socket instance
+    before attempting to connect.  If no *timeout* is supplied, the
+    global default timeout setting returned by :func:`getdefaulttimeout`
+    is used.  If *source_address* is set it must be a tuple of (host, port)
+    for the socket to bind as a source address before making the connection.
+    An host of '' or port 0 tells the OS to use the default.
+    """
+
+    host, port = address
+    err = None
+    for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+        af, socktype, proto, canonname, sa = res
+        sock = None
+        try:
+            sock = socket.socket(af, socktype, proto)
+
+            # If provided, set socket level options before connecting.
+            # This is the only addition urllib3 makes to this function.
+            _set_socket_options(sock, socket_options)
+
+            if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+                sock.settimeout(timeout)
+            if source_address:
+                sock.bind(source_address)
+            sock.connect(sa)
+            return sock
+
+        except socket.error as _:
+            err = _
+            if sock is not None:
+                sock.close()
+                sock = None
+
+    if err is not None:
+        raise err
+    else:
+        raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+    if options is None:
+        return
+
+    for opt in options:
+        sock.setsockopt(*opt)
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util/request.py b/autoProcessTV/lib/requests/packages/urllib3/util/request.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc64f6b1fb6440f90e4f2fbb3220a82f9899816f
--- /dev/null
+++ b/autoProcessTV/lib/requests/packages/urllib3/util/request.py
@@ -0,0 +1,71 @@
+from base64 import b64encode
+
+from ..packages.six import b
+
+ACCEPT_ENCODING = 'gzip,deflate'
+
+
+def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
+                 basic_auth=None, proxy_basic_auth=None, disable_cache=None):
+    """
+    Shortcuts for generating request headers.
+
+    :param keep_alive:
+        If ``True``, adds 'connection: keep-alive' header.
+
+    :param accept_encoding:
+        Can be a boolean, list, or string.
+        ``True`` translates to 'gzip,deflate'.
+        List will get joined by comma.
+        String will be used as provided.
+
+    :param user_agent:
+        String representing the user-agent you want, such as
+        "python-urllib3/0.6"
+
+    :param basic_auth:
+        Colon-separated username:password string for 'authorization: basic ...'
+        auth header.
+
+    :param proxy_basic_auth:
+        Colon-separated username:password string for 'proxy-authorization: basic ...'
+        auth header.
+
+    :param disable_cache:
+        If ``True``, adds 'cache-control: no-cache' header.
+
+    Example::
+
+        >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+        {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+        >>> make_headers(accept_encoding=True)
+        {'accept-encoding': 'gzip,deflate'}
+    """
+    headers = {}
+    if accept_encoding:
+        if isinstance(accept_encoding, str):
+            pass
+        elif isinstance(accept_encoding, list):
+            accept_encoding = ','.join(accept_encoding)
+        else:
+            accept_encoding = ACCEPT_ENCODING
+        headers['accept-encoding'] = accept_encoding
+
+    if user_agent:
+        headers['user-agent'] = user_agent
+
+    if keep_alive:
+        headers['connection'] = 'keep-alive'
+
+    if basic_auth:
+        headers['authorization'] = 'Basic ' + \
+            b64encode(b(basic_auth)).decode('utf-8')
+
+    if proxy_basic_auth:
+        headers['proxy-authorization'] = 'Basic ' + \
+            b64encode(b(proxy_basic_auth)).decode('utf-8')
+
+    if disable_cache:
+        headers['cache-control'] = 'no-cache'
+
+    return headers
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util/response.py b/autoProcessTV/lib/requests/packages/urllib3/util/response.py
new file mode 100644
index 0000000000000000000000000000000000000000..45fff55246e591cc3337f731d989c37133847850
--- /dev/null
+++ b/autoProcessTV/lib/requests/packages/urllib3/util/response.py
@@ -0,0 +1,22 @@
+def is_fp_closed(obj):
+    """
+    Checks whether a given file-like object is closed.
+
+    :param obj:
+        The file-like object to check.
+    """
+
+    try:
+        # Check via the official file-like-object way.
+        return obj.closed
+    except AttributeError:
+        pass
+
+    try:
+        # Check if the object is a container for another file-like object that
+        # gets released on exhaustion (e.g. HTTPResponse).
+        return obj.fp is None
+    except AttributeError:
+        pass
+
+    raise ValueError("Unable to determine whether fp is closed.")
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util/retry.py b/autoProcessTV/lib/requests/packages/urllib3/util/retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e0959df37c1be566c2ff83bf1bbf84a11467a45
--- /dev/null
+++ b/autoProcessTV/lib/requests/packages/urllib3/util/retry.py
@@ -0,0 +1,285 @@
+import time
+import logging
+
+from ..exceptions import (
+    ConnectTimeoutError,
+    MaxRetryError,
+    ProtocolError,
+    ReadTimeoutError,
+    ResponseError,
+)
+from ..packages import six
+
+
+log = logging.getLogger(__name__)
+
+
+class Retry(object):
+    """ Retry configuration.
+
+    Each retry attempt will create a new Retry object with updated values, so
+    they can be safely reused.
+
+    Retries can be defined as a default for a pool::
+
+        retries = Retry(connect=5, read=2, redirect=5)
+        http = PoolManager(retries=retries)
+        response = http.request('GET', 'http://example.com/')
+
+    Or per-request (which overrides the default for the pool)::
+
+        response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+    Retries can be disabled by passing ``False``::
+
+        response = http.request('GET', 'http://example.com/', retries=False)
+
+    Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+    retries are disabled, in which case the causing exception will be raised.
+
+    :param int total:
+        Total number of retries to allow. Takes precedence over other counts.
+
+        Set to ``None`` to remove this constraint and fall back on other
+        counts. It's a good idea to set this to some sensibly-high value to
+        account for unexpected edge cases and avoid infinite retry loops.
+
+        Set to ``0`` to fail on the first retry.
+
+        Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+    :param int connect:
+        How many connection-related errors to retry on.
+
+        These are errors raised before the request is sent to the remote server,
+        which we assume has not triggered the server to process the request.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+    :param int read:
+        How many times to retry on read errors.
+
+        These errors are raised after the request was sent to the server, so the
+        request may have side-effects.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+    :param int redirect:
+        How many redirects to perform. Limit this to avoid infinite redirect
+        loops.
+
+        A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+        308.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+        Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+    :param iterable method_whitelist:
+        Set of uppercased HTTP method verbs that we should retry on.
+
+        By default, we only retry on methods which are considered to be
+        indempotent (multiple requests with the same parameters end with the
+        same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
+
+    :param iterable status_forcelist:
+        A set of HTTP status codes that we should force a retry on.
+
+        By default, this is disabled with ``None``.
+
+    :param float backoff_factor:
+        A backoff factor to apply between attempts. urllib3 will sleep for::
+
+            {backoff factor} * (2 ^ ({number of total retries} - 1))
+
+        seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+        for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
+        than :attr:`Retry.MAX_BACKOFF`.
+
+        By default, backoff is disabled (set to 0).
+
+    :param bool raise_on_redirect: Whether, if the number of redirects is
+        exhausted, to raise a MaxRetryError, or to return a response with a
+        response code in the 3xx range.
+    """
+
+    DEFAULT_METHOD_WHITELIST = frozenset([
+        'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
+
+    #: Maximum backoff time.
+    BACKOFF_MAX = 120
+
+    def __init__(self, total=10, connect=None, read=None, redirect=None,
+                 method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
+                 backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
+
+        self.total = total
+        self.connect = connect
+        self.read = read
+
+        if redirect is False or total is False:
+            redirect = 0
+            raise_on_redirect = False
+
+        self.redirect = redirect
+        self.status_forcelist = status_forcelist or set()
+        self.method_whitelist = method_whitelist
+        self.backoff_factor = backoff_factor
+        self.raise_on_redirect = raise_on_redirect
+        self._observed_errors = _observed_errors # TODO: use .history instead?
+
+    def new(self, **kw):
+        params = dict(
+            total=self.total,
+            connect=self.connect, read=self.read, redirect=self.redirect,
+            method_whitelist=self.method_whitelist,
+            status_forcelist=self.status_forcelist,
+            backoff_factor=self.backoff_factor,
+            raise_on_redirect=self.raise_on_redirect,
+            _observed_errors=self._observed_errors,
+        )
+        params.update(kw)
+        return type(self)(**params)
+
+    @classmethod
+    def from_int(cls, retries, redirect=True, default=None):
+        """ Backwards-compatibility for the old retries format."""
+        if retries is None:
+            retries = default if default is not None else cls.DEFAULT
+
+        if isinstance(retries, Retry):
+            return retries
+
+        redirect = bool(redirect) and None
+        new_retries = cls(retries, redirect=redirect)
+        log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
+        return new_retries
+
+    def get_backoff_time(self):
+        """ Formula for computing the current backoff
+
+        :rtype: float
+        """
+        if self._observed_errors <= 1:
+            return 0
+
+        backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
+        return min(self.BACKOFF_MAX, backoff_value)
+
+    def sleep(self):
+        """ Sleep between retry attempts using an exponential backoff.
+
+        By default, the backoff factor is 0 and this method will return
+        immediately.
+        """
+        backoff = self.get_backoff_time()
+        if backoff <= 0:
+            return
+        time.sleep(backoff)
+
+    def _is_connection_error(self, err):
+        """ Errors when we're fairly sure that the server did not receive the
+        request, so it should be safe to retry.
+        """
+        return isinstance(err, ConnectTimeoutError)
+
+    def _is_read_error(self, err):
+        """ Errors that occur after the request has been started, so we should
+        assume that the server began processing it.
+        """
+        return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+    def is_forced_retry(self, method, status_code):
+        """ Is this method/status code retryable? (Based on method/codes whitelists)
+        """
+        if self.method_whitelist and method.upper() not in self.method_whitelist:
+            return False
+
+        return self.status_forcelist and status_code in self.status_forcelist
+
+    def is_exhausted(self):
+        """ Are we out of retries? """
+        retry_counts = (self.total, self.connect, self.read, self.redirect)
+        retry_counts = list(filter(None, retry_counts))
+        if not retry_counts:
+            return False
+
+        return min(retry_counts) < 0
+
+    def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
+        """ Return a new Retry object with incremented retry counters.
+
+        :param response: A response object, or None, if the server did not
+            return a response.
+        :type response: :class:`~urllib3.response.HTTPResponse`
+        :param Exception error: An error encountered during the request, or
+            None if the response was received successfully.
+
+        :return: A new ``Retry`` object.
+        """
+        if self.total is False and error:
+            # Disabled, indicate to re-raise the error.
+            raise six.reraise(type(error), error, _stacktrace)
+
+        total = self.total
+        if total is not None:
+            total -= 1
+
+        _observed_errors = self._observed_errors
+        connect = self.connect
+        read = self.read
+        redirect = self.redirect
+        cause = 'unknown'
+
+        if error and self._is_connection_error(error):
+            # Connect retry?
+            if connect is False:
+                raise six.reraise(type(error), error, _stacktrace)
+            elif connect is not None:
+                connect -= 1
+            _observed_errors += 1
+
+        elif error and self._is_read_error(error):
+            # Read retry?
+            if read is False:
+                raise six.reraise(type(error), error, _stacktrace)
+            elif read is not None:
+                read -= 1
+            _observed_errors += 1
+
+        elif response and response.get_redirect_location():
+            # Redirect retry?
+            if redirect is not None:
+                redirect -= 1
+            cause = 'too many redirects'
+
+        else:
+            # Incrementing because of a server error like a 500 in
+            # status_forcelist and a the given method is in the whitelist
+            _observed_errors += 1
+            cause = ResponseError.GENERIC_ERROR
+            if response and response.status:
+                cause = ResponseError.SPECIFIC_ERROR.format(
+                    status_code=response.status)
+
+        new_retry = self.new(
+            total=total,
+            connect=connect, read=read, redirect=redirect,
+            _observed_errors=_observed_errors)
+
+        if new_retry.is_exhausted():
+            raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+        log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
+
+        return new_retry
+
+
+    def __repr__(self):
+        return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
+                'read={self.read}, redirect={self.redirect})').format(
+                    cls=type(self), self=self)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util/ssl_.py b/autoProcessTV/lib/requests/packages/urllib3/util/ssl_.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7e7dfae1881d7303e584fb61fbbd1fc62300ae8
--- /dev/null
+++ b/autoProcessTV/lib/requests/packages/urllib3/util/ssl_.py
@@ -0,0 +1,266 @@
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import SSLError, InsecurePlatformWarning
+
+
+SSLContext = None
+HAS_SNI = False
+create_default_context = None
+
+import errno
+import ssl
+import warnings
+
+try:  # Test for SSL features
+    from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
+    from ssl import HAS_SNI  # Has SNI?
+except ImportError:
+    pass
+
+
+try:
+    from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
+except ImportError:
+    OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+    OP_NO_COMPRESSION = 0x20000
+
+try:
+    from ssl import _DEFAULT_CIPHERS
+except ImportError:
+    _DEFAULT_CIPHERS = (
+        'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
+        'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
+        '!eNULL:!MD5'
+    )
+
+try:
+    from ssl import SSLContext  # Modern SSL?
+except ImportError:
+    import sys
+
+    class SSLContext(object):  # Platform-specific: Python 2 & 3.1
+        supports_set_ciphers = sys.version_info >= (2, 7)
+
+        def __init__(self, protocol_version):
+            self.protocol = protocol_version
+            # Use default values from a real SSLContext
+            self.check_hostname = False
+            self.verify_mode = ssl.CERT_NONE
+            self.ca_certs = None
+            self.options = 0
+            self.certfile = None
+            self.keyfile = None
+            self.ciphers = None
+
+        def load_cert_chain(self, certfile, keyfile):
+            self.certfile = certfile
+            self.keyfile = keyfile
+
+        def load_verify_locations(self, location):
+            self.ca_certs = location
+
+        def set_ciphers(self, cipher_suite):
+            if not self.supports_set_ciphers:
+                raise TypeError(
+                    'Your version of Python does not support setting '
+                    'a custom cipher suite. Please upgrade to Python '
+                    '2.7, 3.2, or later if you need this functionality.'
+                )
+            self.ciphers = cipher_suite
+
+        def wrap_socket(self, socket, server_hostname=None):
+            warnings.warn(
+                'A true SSLContext object is not available. This prevents '
+                'urllib3 from configuring SSL appropriately and may cause '
+                'certain SSL connections to fail. For more information, see '
+                'https://urllib3.readthedocs.org/en/latest/security.html'
+                '#insecureplatformwarning.',
+                InsecurePlatformWarning
+            )
+            kwargs = {
+                'keyfile': self.keyfile,
+                'certfile': self.certfile,
+                'ca_certs': self.ca_certs,
+                'cert_reqs': self.verify_mode,
+                'ssl_version': self.protocol,
+            }
+            if self.supports_set_ciphers:  # Platform-specific: Python 2.7+
+                return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+            else:  # Platform-specific: Python 2.6
+                return wrap_socket(socket, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+    """
+    Checks if given fingerprint matches the supplied certificate.
+
+    :param cert:
+        Certificate as bytes object.
+    :param fingerprint:
+        Fingerprint as string of hexdigits, can be interspersed by colons.
+    """
+
+    # Maps the length of a digest to a possible hash function producing
+    # this digest.
+    hashfunc_map = {
+        16: md5,
+        20: sha1,
+        32: sha256,
+    }
+
+    fingerprint = fingerprint.replace(':', '').lower()
+    digest_length, odd = divmod(len(fingerprint), 2)
+
+    if odd or digest_length not in hashfunc_map:
+        raise SSLError('Fingerprint is of invalid length.')
+
+    # We need encode() here for py32; works on py2 and p33.
+    fingerprint_bytes = unhexlify(fingerprint.encode())
+
+    hashfunc = hashfunc_map[digest_length]
+
+    cert_digest = hashfunc(cert).digest()
+
+    if not cert_digest == fingerprint_bytes:
+        raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
+                       .format(hexlify(fingerprint_bytes),
+                               hexlify(cert_digest)))
+
+
+def resolve_cert_reqs(candidate):
+    """
+    Resolves the argument to a numeric constant, which can be passed to
+    the wrap_socket function/method from the ssl module.
+    Defaults to :data:`ssl.CERT_NONE`.
+    If given a string it is assumed to be the name of the constant in the
+    :mod:`ssl` module or its abbrevation.
+    (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+    If it's neither `None` nor a string we assume it is already the numeric
+    constant which can directly be passed to wrap_socket.
+    """
+    if candidate is None:
+        return CERT_NONE
+
+    if isinstance(candidate, str):
+        res = getattr(ssl, candidate, None)
+        if res is None:
+            res = getattr(ssl, 'CERT_' + candidate)
+        return res
+
+    return candidate
+
+
+def resolve_ssl_version(candidate):
+    """
+    like resolve_cert_reqs
+    """
+    if candidate is None:
+        return PROTOCOL_SSLv23
+
+    if isinstance(candidate, str):
+        res = getattr(ssl, candidate, None)
+        if res is None:
+            res = getattr(ssl, 'PROTOCOL_' + candidate)
+        return res
+
+    return candidate
+
+
+def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
+                           options=None, ciphers=None):
+    """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+    By default, this function does a lot of the same work that
+    ``ssl.create_default_context`` does on Python 3.4+. It:
+
+    - Disables SSLv2, SSLv3, and compression
+    - Sets a restricted set of server ciphers
+
+    If you wish to enable SSLv3, you can do::
+
+        from urllib3.util import ssl_
+        context = ssl_.create_urllib3_context()
+        context.options &= ~ssl_.OP_NO_SSLv3
+
+    You can do the same to enable compression (substituting ``COMPRESSION``
+    for ``SSLv3`` in the last line above).
+
+    :param ssl_version:
+        The desired protocol version to use. This will default to
+        PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+        the server and your installation of OpenSSL support.
+    :param cert_reqs:
+        Whether to require the certificate verification. This defaults to
+        ``ssl.CERT_REQUIRED``.
+    :param options:
+        Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+        ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
+    :param ciphers:
+        Which cipher suites to allow the server to select.
+    :returns:
+        Constructed SSLContext object with specified options
+    :rtype: SSLContext
+    """
+    context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
+
+    if options is None:
+        options = 0
+        # SSLv2 is easily broken and is considered harmful and dangerous
+        options |= OP_NO_SSLv2
+        # SSLv3 has several problems and is now dangerous
+        options |= OP_NO_SSLv3
+        # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+        # (issue #309)
+        options |= OP_NO_COMPRESSION
+
+    context.options |= options
+
+    if getattr(context, 'supports_set_ciphers', True):  # Platform-specific: Python 2.6
+        context.set_ciphers(ciphers or _DEFAULT_CIPHERS)
+
+    context.verify_mode = cert_reqs
+    if getattr(context, 'check_hostname', None) is not None:  # Platform-specific: Python 3.2
+        # We do our own verification, including fingerprints and alternative
+        # hostnames. So disable it here
+        context.check_hostname = False
+    return context
+
+
+def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
+                    ca_certs=None, server_hostname=None,
+                    ssl_version=None, ciphers=None, ssl_context=None):
+    """
+    All arguments except for server_hostname and ssl_context have the same
+    meaning as they do when using :func:`ssl.wrap_socket`.
+
+    :param server_hostname:
+        When SNI is supported, the expected hostname of the certificate
+    :param ssl_context:
+        A pre-made :class:`SSLContext` object. If none is provided, one will
+        be created using :func:`create_urllib3_context`.
+    :param ciphers:
+        A string of ciphers we wish the client to support. This is not
+        supported on Python 2.6 as the ssl module does not support it.
+    """
+    context = ssl_context
+    if context is None:
+        context = create_urllib3_context(ssl_version, cert_reqs,
+                                         ciphers=ciphers)
+
+    if ca_certs:
+        try:
+            context.load_verify_locations(ca_certs)
+        except IOError as e:  # Platform-specific: Python 2.6, 2.7, 3.2
+            raise SSLError(e)
+        # Py33 raises FileNotFoundError which subclasses OSError
+        # These are not equivalent unless we check the errno attribute
+        except OSError as e:  # Platform-specific: Python 3.3 and beyond
+            if e.errno == errno.ENOENT:
+                raise SSLError(e)
+            raise
+    if certfile:
+        context.load_cert_chain(certfile, keyfile)
+    if HAS_SNI:  # Platform-specific: OpenSSL with enabled SNI
+        return context.wrap_socket(sock, server_hostname=server_hostname)
+    return context.wrap_socket(sock)
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util/timeout.py b/autoProcessTV/lib/requests/packages/urllib3/util/timeout.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea7027f3f5f531aef55069ff848612df266821ee
--- /dev/null
+++ b/autoProcessTV/lib/requests/packages/urllib3/util/timeout.py
@@ -0,0 +1,240 @@
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+import time
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+def current_time():
+    """
+    Retrieve the current time. This function is mocked out in unit testing.
+    """
+    return time.time()
+
+
+class Timeout(object):
+    """ Timeout configuration.
+
+    Timeouts can be defined as a default for a pool::
+
+        timeout = Timeout(connect=2.0, read=7.0)
+        http = PoolManager(timeout=timeout)
+        response = http.request('GET', 'http://example.com/')
+
+    Or per-request (which overrides the default for the pool)::
+
+        response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+    Timeouts can be disabled by setting all the parameters to ``None``::
+
+        no_timeout = Timeout(connect=None, read=None)
+        response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+    :param total:
+        This combines the connect and read timeouts into one; the read timeout
+        will be set to the time leftover from the connect attempt. In the
+        event that both a connect timeout and a total are specified, or a read
+        timeout and a total are specified, the shorter timeout will be applied.
+
+        Defaults to None.
+
+    :type total: integer, float, or None
+
+    :param connect:
+        The maximum amount of time to wait for a connection attempt to a server
+        to succeed. Omitting the parameter will default the connect timeout to
+        the system default, probably `the global default timeout in socket.py
+        <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+        None will set an infinite timeout for connection attempts.
+
+    :type connect: integer, float, or None
+
+    :param read:
+        The maximum amount of time to wait between consecutive
+        read operations for a response from the server. Omitting
+        the parameter will default the read timeout to the system
+        default, probably `the global default timeout in socket.py
+        <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+        None will set an infinite timeout.
+
+    :type read: integer, float, or None
+
+    .. note::
+
+        Many factors can affect the total amount of time for urllib3 to return
+        an HTTP response.
+
+        For example, Python's DNS resolver does not obey the timeout specified
+        on the socket. Other factors that can affect total request time include
+        high CPU load, high swap, the program running at a low priority level,
+        or other behaviors.
+
+        In addition, the read and total timeouts only measure the time between
+        read operations on the socket connecting the client and the server,
+        not the total amount of time for the request to return a complete
+        response. For most requests, the timeout is raised because the server
+        has not sent the first byte in the specified time. This is not always
+        the case; if a server streams one byte every fifteen seconds, a timeout
+        of 20 seconds will not trigger, even though the request will take
+        several minutes to complete.
+
+        If your goal is to cut off any request after a set amount of wall clock
+        time, consider having a second "watcher" thread to cut off a slow
+        request.
+    """
+
+    #: A sentinel object representing the default timeout value
+    DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+    def __init__(self, total=None, connect=_Default, read=_Default):
+        self._connect = self._validate_timeout(connect, 'connect')
+        self._read = self._validate_timeout(read, 'read')
+        self.total = self._validate_timeout(total, 'total')
+        self._start_connect = None
+
+    def __str__(self):
+        return '%s(connect=%r, read=%r, total=%r)' % (
+            type(self).__name__, self._connect, self._read, self.total)
+
+    @classmethod
+    def _validate_timeout(cls, value, name):
+        """ Check that a timeout attribute is valid.
+
+        :param value: The timeout value to validate
+        :param name: The name of the timeout attribute to validate. This is
+            used to specify in error messages.
+        :return: The validated and casted version of the given value.
+        :raises ValueError: If the type is not an integer or a float, or if it
+            is a numeric value less than zero.
+        """
+        if value is _Default:
+            return cls.DEFAULT_TIMEOUT
+
+        if value is None or value is cls.DEFAULT_TIMEOUT:
+            return value
+
+        try:
+            float(value)
+        except (TypeError, ValueError):
+            raise ValueError("Timeout value %s was %s, but it must be an "
+                             "int or float." % (name, value))
+
+        try:
+            if value < 0:
+                raise ValueError("Attempted to set %s timeout to %s, but the "
+                                 "timeout cannot be set to a value less "
+                                 "than 0." % (name, value))
+        except TypeError:  # Python 3
+            raise ValueError("Timeout value %s was %s, but it must be an "
+                             "int or float." % (name, value))
+
+        return value
+
+    @classmethod
+    def from_float(cls, timeout):
+        """ Create a new Timeout from a legacy timeout value.
+
+        The timeout value used by httplib.py sets the same timeout on the
+        connect(), and recv() socket requests. This creates a :class:`Timeout`
+        object that sets the individual timeouts to the ``timeout`` value
+        passed to this function.
+
+        :param timeout: The legacy timeout value.
+        :type timeout: integer, float, sentinel default object, or None
+        :return: Timeout object
+        :rtype: :class:`Timeout`
+        """
+        return Timeout(read=timeout, connect=timeout)
+
+    def clone(self):
+        """ Create a copy of the timeout object
+
+        Timeout properties are stored per-pool but each request needs a fresh
+        Timeout object to ensure each one has its own start/stop configured.
+
+        :return: a copy of the timeout object
+        :rtype: :class:`Timeout`
+        """
+        # We can't use copy.deepcopy because that will also create a new object
+        # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+        # detect the user default.
+        return Timeout(connect=self._connect, read=self._read,
+                       total=self.total)
+
+    def start_connect(self):
+        """ Start the timeout clock, used during a connect() attempt
+
+        :raises urllib3.exceptions.TimeoutStateError: if you attempt
+            to start a timer that has been started already.
+        """
+        if self._start_connect is not None:
+            raise TimeoutStateError("Timeout timer has already been started.")
+        self._start_connect = current_time()
+        return self._start_connect
+
+    def get_connect_duration(self):
+        """ Gets the time elapsed since the call to :meth:`start_connect`.
+
+        :return: Elapsed time.
+        :rtype: float
+        :raises urllib3.exceptions.TimeoutStateError: if you attempt
+            to get duration for a timer that hasn't been started.
+        """
+        if self._start_connect is None:
+            raise TimeoutStateError("Can't get connect duration for timer "
+                                    "that has not started.")
+        return current_time() - self._start_connect
+
+    @property
+    def connect_timeout(self):
+        """ Get the value to use when setting a connection timeout.
+
+        This will be a positive float or integer, the value None
+        (never timeout), or the default system timeout.
+
+        :return: Connect timeout.
+        :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+        """
+        if self.total is None:
+            return self._connect
+
+        if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+            return self.total
+
+        return min(self._connect, self.total)
+
+    @property
+    def read_timeout(self):
+        """ Get the value for the read timeout.
+
+        This assumes some time has elapsed in the connection timeout and
+        computes the read timeout appropriately.
+
+        If self.total is set, the read timeout is dependent on the amount of
+        time taken by the connect timeout. If the connection time has not been
+        established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+        raised.
+
+        :return: Value to use for the read timeout.
+        :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+        :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+            has not yet been called on this object.
+        """
+        if (self.total is not None and
+            self.total is not self.DEFAULT_TIMEOUT and
+            self._read is not None and
+            self._read is not self.DEFAULT_TIMEOUT):
+            # In case the connect timeout has not yet been established.
+            if self._start_connect is None:
+                return self._read
+            return max(0, min(self.total - self.get_connect_duration(),
+                              self._read))
+        elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+            return max(0, self.total - self.get_connect_duration())
+        else:
+            return self._read
diff --git a/autoProcessTV/lib/requests/packages/urllib3/util/url.py b/autoProcessTV/lib/requests/packages/urllib3/util/url.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2ec834fe721a55195c25d4495b48c1bdaefcd5f
--- /dev/null
+++ b/autoProcessTV/lib/requests/packages/urllib3/util/url.py
@@ -0,0 +1,212 @@
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+
+
+url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
+
+
+class Url(namedtuple('Url', url_attrs)):
+    """
+    Datastructure for representing an HTTP URL. Used as a return value for
+    :func:`parse_url`.
+    """
+    slots = ()
+
+    def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
+                query=None, fragment=None):
+        return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
+                                       query, fragment)
+
+    @property
+    def hostname(self):
+        """For backwards-compatibility with urlparse. We're nice like that."""
+        return self.host
+
+    @property
+    def request_uri(self):
+        """Absolute path including the query string."""
+        uri = self.path or '/'
+
+        if self.query is not None:
+            uri += '?' + self.query
+
+        return uri
+
+    @property
+    def netloc(self):
+        """Network location including host and port"""
+        if self.port:
+            return '%s:%d' % (self.host, self.port)
+        return self.host
+
+    @property
+    def url(self):
+        """
+        Convert self into a url
+
+        This function should more or less round-trip with :func:`.parse_url`. The
+        returned url may not be exactly the same as the url inputted to
+        :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+        with a blank port will have : removed).
+
+        Example: ::
+
+            >>> U = parse_url('http://google.com/mail/')
+            >>> U.url
+            'http://google.com/mail/'
+            >>> Url('http', 'username:password', 'host.com', 80,
+            ... '/path', 'query', 'fragment').url
+            'http://username:password@host.com:80/path?query#fragment'
+        """
+        scheme, auth, host, port, path, query, fragment = self
+        url = ''
+
+        # We use "is not None" we want things to happen with empty strings (or 0 port)
+        if scheme is not None:
+            url += scheme + '://'
+        if auth is not None:
+            url += auth + '@'
+        if host is not None:
+            url += host
+        if port is not None:
+            url += ':' + str(port)
+        if path is not None:
+            url += path
+        if query is not None:
+            url += '?' + query
+        if fragment is not None:
+            url += '#' + fragment
+
+        return url
+
+    def __str__(self):
+        return self.url
+
+def split_first(s, delims):
+    """
+    Given a string and an iterable of delimiters, split on the first found
+    delimiter. Return two split parts and the matched delimiter.
+
+    If not found, then the first part is the full input string.
+
+    Example::
+
+        >>> split_first('foo/bar?baz', '?/=')
+        ('foo', 'bar?baz', '/')
+        >>> split_first('foo/bar?baz', '123')
+        ('foo/bar?baz', '', None)
+
+    Scales linearly with number of delims. Not ideal for large number of delims.
+    """
+    min_idx = None
+    min_delim = None
+    for d in delims:
+        idx = s.find(d)
+        if idx < 0:
+            continue
+
+        if min_idx is None or idx < min_idx:
+            min_idx = idx
+            min_delim = d
+
+    if min_idx is None or min_idx < 0:
+        return s, '', None
+
+    return s[:min_idx], s[min_idx+1:], min_delim
+
+
+def parse_url(url):
+    """
+    Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+    performed to parse incomplete urls. Fields not provided will be None.
+
+    Partly backwards-compatible with :mod:`urlparse`.
+
+    Example::
+
+        >>> parse_url('http://google.com/mail/')
+        Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+        >>> parse_url('google.com:80')
+        Url(scheme=None, host='google.com', port=80, path=None, ...)
+        >>> parse_url('/foo?bar')
+        Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+    """
+
+    # While this code has overlap with stdlib's urlparse, it is much
+    # simplified for our needs and less annoying.
+    # Additionally, this implementations does silly things to be optimal
+    # on CPython.
+
+    if not url:
+        # Empty
+        return Url()
+
+    scheme = None
+    auth = None
+    host = None
+    port = None
+    path = None
+    fragment = None
+    query = None
+
+    # Scheme
+    if '://' in url:
+        scheme, url = url.split('://', 1)
+
+    # Find the earliest Authority Terminator
+    # (http://tools.ietf.org/html/rfc3986#section-3.2)
+    url, path_, delim = split_first(url, ['/', '?', '#'])
+
+    if delim:
+        # Reassemble the path
+        path = delim + path_
+
+    # Auth
+    if '@' in url:
+        # Last '@' denotes end of auth part
+        auth, url = url.rsplit('@', 1)
+
+    # IPv6
+    if url and url[0] == '[':
+        host, url = url.split(']', 1)
+        host += ']'
+
+    # Port
+    if ':' in url:
+        _host, port = url.split(':', 1)
+
+        if not host:
+            host = _host
+
+        if port:
+            # If given, ports must be integers.
+            if not port.isdigit():
+                raise LocationParseError(url)
+            port = int(port)
+        else:
+            # Blank ports are cool, too. (rfc3986#section-3.2.3)
+            port = None
+
+    elif not host and url:
+        host = url
+
+    if not path:
+        return Url(scheme, auth, host, port, path, query, fragment)
+
+    # Fragment
+    if '#' in path:
+        path, fragment = path.split('#', 1)
+
+    # Query
+    if '?' in path:
+        path, query = path.split('?', 1)
+
+    return Url(scheme, auth, host, port, path, query, fragment)
+
+def get_host(url):
+    """
+    Deprecated. Use :func:`.parse_url` instead.
+    """
+    p = parse_url(url)
+    return p.scheme or 'http', p.hostname, p.port
diff --git a/autoProcessTV/lib/requests/sessions.py b/autoProcessTV/lib/requests/sessions.py
index 425db22ca6fbe6e422b1e6dd61f9185915adf372..ef3f22bc5c5b7075301d5a162de00061041f2417 100644
--- a/autoProcessTV/lib/requests/sessions.py
+++ b/autoProcessTV/lib/requests/sessions.py
@@ -12,24 +12,32 @@ import os
 from collections import Mapping
 from datetime import datetime
 
-from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str
+from .auth import _basic_auth_str
+from .compat import cookielib, OrderedDict, urljoin, urlparse
 from .cookies import (
     cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
 from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
 from .hooks import default_hooks, dispatch_hook
 from .utils import to_key_val_list, default_headers, to_native_string
-from .exceptions import TooManyRedirects, InvalidSchema
+from .exceptions import (
+    TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
+from .packages.urllib3._collections import RecentlyUsedContainer
 from .structures import CaseInsensitiveDict
 
 from .adapters import HTTPAdapter
 
-from .utils import requote_uri, get_environ_proxies, get_netrc_auth
+from .utils import (
+    requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
+    get_auth_from_url
+)
 
 from .status_codes import codes
 
 # formerly defined here, reexposed here for backward compatibility
 from .models import REDIRECT_STATI
 
+REDIRECT_CACHE_SIZE = 1000
+
 
 def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
     """
@@ -86,11 +94,21 @@ class SessionRedirectMixin(object):
         """Receives a Response. Returns a generator of Responses."""
 
         i = 0
+        hist = [] # keep track of history
 
         while resp.is_redirect:
             prepared_request = req.copy()
 
-            resp.content  # Consume socket so it can be released
+            if i > 0:
+                # Update history and keep track of redirects.
+                hist.append(resp)
+                new_hist = list(hist)
+                resp.history = new_hist
+
+            try:
+                resp.content  # Consume socket so it can be released
+            except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
+                resp.raw.read(decode_content=False)
 
             if i >= self.max_redirects:
                 raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
@@ -110,17 +128,20 @@ class SessionRedirectMixin(object):
             parsed = urlparse(url)
             url = parsed.geturl()
 
-            # Facilitate non-RFC2616-compliant 'location' headers
+            # Facilitate relative 'location' headers, as allowed by RFC 7231.
             # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
             # Compliant with RFC3986, we percent encode the url.
-            if not urlparse(url).netloc:
+            if not parsed.netloc:
                 url = urljoin(resp.url, requote_uri(url))
             else:
                 url = requote_uri(url)
 
             prepared_request.url = to_native_string(url)
+            # Cache the url, unless it redirects to itself.
+            if resp.is_permanent_redirect and req.url != prepared_request.url:
+                self.redirect_cache[req.url] = prepared_request.url
 
-            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
+            # http://tools.ietf.org/html/rfc7231#section-6.4.4
             if (resp.status_code == codes.see_other and
                     method != 'HEAD'):
                 method = 'GET'
@@ -138,7 +159,7 @@ class SessionRedirectMixin(object):
             prepared_request.method = method
 
             # https://github.com/kennethreitz/requests/issues/1084
-            if resp.status_code not in (codes.temporary, codes.resume):
+            if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
                 if 'Content-Length' in prepared_request.headers:
                     del prepared_request.headers['Content-Length']
 
@@ -150,26 +171,22 @@ class SessionRedirectMixin(object):
             except KeyError:
                 pass
 
-            extract_cookies_to_jar(prepared_request._cookies, prepared_request, resp.raw)
+            # Extract any cookies sent on the response to the cookiejar
+            # in the new request. Because we've mutated our copied prepared
+            # request, use the old one that we haven't yet touched.
+            extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
             prepared_request._cookies.update(self.cookies)
             prepared_request.prepare_cookies(prepared_request._cookies)
 
-            if 'Authorization' in headers:
-                # If we get redirected to a new host, we should strip out any
-                # authentication headers.
-                original_parsed = urlparse(resp.request.url)
-                redirect_parsed = urlparse(url)
-
-                if (original_parsed.hostname != redirect_parsed.hostname):
-                    del headers['Authorization']
+            # Rebuild auth and proxy information.
+            proxies = self.rebuild_proxies(prepared_request, proxies)
+            self.rebuild_auth(prepared_request, resp)
 
-            # .netrc might have more auth for us.
-            new_auth = get_netrc_auth(url) if self.trust_env else None
-            if new_auth is not None:
-                prepared_request.prepare_auth(new_auth)
+            # Override the original request.
+            req = prepared_request
 
             resp = self.send(
-                prepared_request,
+                req,
                 stream=stream,
                 timeout=timeout,
                 verify=verify,
@@ -183,6 +200,68 @@ class SessionRedirectMixin(object):
             i += 1
             yield resp
 
+    def rebuild_auth(self, prepared_request, response):
+        """
+        When being redirected we may want to strip authentication from the
+        request to avoid leaking credentials. This method intelligently removes
+        and reapplies authentication where possible to avoid credential loss.
+        """
+        headers = prepared_request.headers
+        url = prepared_request.url
+
+        if 'Authorization' in headers:
+            # If we get redirected to a new host, we should strip out any
+            # authentication headers.
+            original_parsed = urlparse(response.request.url)
+            redirect_parsed = urlparse(url)
+
+            if (original_parsed.hostname != redirect_parsed.hostname):
+                del headers['Authorization']
+
+        # .netrc might have more auth for us on our new host.
+        new_auth = get_netrc_auth(url) if self.trust_env else None
+        if new_auth is not None:
+            prepared_request.prepare_auth(new_auth)
+
+        return
+
+    def rebuild_proxies(self, prepared_request, proxies):
+        """
+        This method re-evaluates the proxy configuration by considering the
+        environment variables. If we are redirected to a URL covered by
+        NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
+        proxy keys for this URL (in case they were stripped by a previous
+        redirect).
+
+        This method also replaces the Proxy-Authorization header where
+        necessary.
+        """
+        headers = prepared_request.headers
+        url = prepared_request.url
+        scheme = urlparse(url).scheme
+        new_proxies = proxies.copy() if proxies is not None else {}
+
+        if self.trust_env and not should_bypass_proxies(url):
+            environ_proxies = get_environ_proxies(url)
+
+            proxy = environ_proxies.get(scheme)
+
+            if proxy:
+                new_proxies.setdefault(scheme, environ_proxies[scheme])
+
+        if 'Proxy-Authorization' in headers:
+            del headers['Proxy-Authorization']
+
+        try:
+            username, password = get_auth_from_url(new_proxies[scheme])
+        except KeyError:
+            username, password = None, None
+
+        if username and password:
+            headers['Proxy-Authorization'] = _basic_auth_str(username, password)
+
+        return new_proxies
+
 
 class Session(SessionRedirectMixin):
     """A Requests session.
@@ -198,9 +277,10 @@ class Session(SessionRedirectMixin):
     """
 
     __attrs__ = [
-        'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',
-        'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream',
-        'trust_env', 'max_redirects']
+        'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
+        'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
+        'max_redirects',
+    ]
 
     def __init__(self):
 
@@ -253,6 +333,9 @@ class Session(SessionRedirectMixin):
         self.mount('https://', HTTPAdapter())
         self.mount('http://', HTTPAdapter())
 
+        # Only store 1000 redirects to prevent using infinite memory
+        self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
+
     def __enter__(self):
         return self
 
@@ -290,6 +373,7 @@ class Session(SessionRedirectMixin):
             url=request.url,
             files=request.files,
             data=request.data,
+            json=request.json,
             headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
             params=merge_setting(request.params, self.params),
             auth=merge_setting(auth, self.auth),
@@ -311,7 +395,8 @@ class Session(SessionRedirectMixin):
         hooks=None,
         stream=None,
         verify=None,
-        cert=None):
+        cert=None,
+        json=None):
         """Constructs a :class:`Request <Request>`, prepares it and sends it.
         Returns :class:`Response <Response>` object.
 
@@ -321,17 +406,22 @@ class Session(SessionRedirectMixin):
             string for the :class:`Request`.
         :param data: (optional) Dictionary or bytes to send in the body of the
             :class:`Request`.
+        :param json: (optional) json to send in the body of the
+            :class:`Request`.
         :param headers: (optional) Dictionary of HTTP Headers to send with the
             :class:`Request`.
         :param cookies: (optional) Dict or CookieJar object to send with the
             :class:`Request`.
-        :param files: (optional) Dictionary of 'filename': file-like-objects
+        :param files: (optional) Dictionary of ``'filename': file-like-objects``
             for multipart encoding upload.
         :param auth: (optional) Auth tuple or callable to enable
             Basic/Digest/Custom HTTP Auth.
-        :param timeout: (optional) Float describing the timeout of the
-            request in seconds.
-        :param allow_redirects: (optional) Boolean. Set to True by default.
+        :param timeout: (optional) How long to wait for the server to send
+            data before giving up, as a float, or a (`connect timeout, read
+            timeout <user/advanced.html#timeouts>`_) tuple.
+        :type timeout: float or tuple
+        :param allow_redirects: (optional) Set to True by default.
+        :type allow_redirects: bool
         :param proxies: (optional) Dictionary mapping protocol to the URL of
             the proxy.
         :param stream: (optional) whether to immediately download the response
@@ -342,7 +432,7 @@ class Session(SessionRedirectMixin):
             If Tuple, ('cert', 'key') pair.
         """
 
-        method = builtin_str(method)
+        method = to_native_string(method)
 
         # Create the Request.
         req = Request(
@@ -351,6 +441,7 @@ class Session(SessionRedirectMixin):
             headers = headers,
             files = files,
             data = data or {},
+            json = json,
             params = params or {},
             auth = auth,
             cookies = cookies,
@@ -360,36 +451,16 @@ class Session(SessionRedirectMixin):
 
         proxies = proxies or {}
 
-        # Gather clues from the surrounding environment.
-        if self.trust_env:
-            # Set environment's proxies.
-            env_proxies = get_environ_proxies(url) or {}
-            for (k, v) in env_proxies.items():
-                proxies.setdefault(k, v)
-
-            # Look for configuration.
-            if not verify and verify is not False:
-                verify = os.environ.get('REQUESTS_CA_BUNDLE')
-
-            # Curl compatibility.
-            if not verify and verify is not False:
-                verify = os.environ.get('CURL_CA_BUNDLE')
-
-        # Merge all the kwargs.
-        proxies = merge_setting(proxies, self.proxies)
-        stream = merge_setting(stream, self.stream)
-        verify = merge_setting(verify, self.verify)
-        cert = merge_setting(cert, self.cert)
+        settings = self.merge_environment_settings(
+            prep.url, proxies, stream, verify, cert
+        )
 
         # Send the request.
         send_kwargs = {
-            'stream': stream,
             'timeout': timeout,
-            'verify': verify,
-            'cert': cert,
-            'proxies': proxies,
             'allow_redirects': allow_redirects,
         }
+        send_kwargs.update(settings)
         resp = self.send(prep, **send_kwargs)
 
         return resp
@@ -424,15 +495,16 @@ class Session(SessionRedirectMixin):
         kwargs.setdefault('allow_redirects', False)
         return self.request('HEAD', url, **kwargs)
 
-    def post(self, url, data=None, **kwargs):
+    def post(self, url, data=None, json=None, **kwargs):
         """Sends a POST request. Returns :class:`Response` object.
 
         :param url: URL for the new :class:`Request` object.
         :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
+        :param json: (optional) json to send in the body of the :class:`Request`.
         :param \*\*kwargs: Optional arguments that ``request`` takes.
         """
 
-        return self.request('POST', url, data=data, **kwargs)
+        return self.request('POST', url, data=data, json=json, **kwargs)
 
     def put(self, url, data=None, **kwargs):
         """Sends a PUT request. Returns :class:`Response` object.
@@ -477,6 +549,14 @@ class Session(SessionRedirectMixin):
         if not isinstance(request, PreparedRequest):
             raise ValueError('You can only send PreparedRequests.')
 
+        checked_urls = set()
+        while request.url in self.redirect_cache:
+            checked_urls.add(request.url)
+            new_url = self.redirect_cache.get(request.url)
+            if new_url in checked_urls:
+                break
+            request.url = new_url
+
         # Set up variables needed for resolve_redirects and dispatching of hooks
         allow_redirects = kwargs.pop('allow_redirects', True)
         stream = kwargs.get('stream')
@@ -527,10 +607,37 @@ class Session(SessionRedirectMixin):
             history.insert(0, r)
             # Get the last request made
             r = history.pop()
-            r.history = tuple(history)
+            r.history = history
+
+        if not stream:
+            r.content
 
         return r
 
+    def merge_environment_settings(self, url, proxies, stream, verify, cert):
+        """Check the environment and merge it with some settings."""
+        # Gather clues from the surrounding environment.
+        if self.trust_env:
+            # Set environment's proxies.
+            env_proxies = get_environ_proxies(url) or {}
+            for (k, v) in env_proxies.items():
+                proxies.setdefault(k, v)
+
+            # Look for requests environment configuration and be compatible
+            # with cURL.
+            if verify is True or verify is None:
+                verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
+                          os.environ.get('CURL_CA_BUNDLE'))
+
+        # Merge all the kwargs.
+        proxies = merge_setting(proxies, self.proxies)
+        stream = merge_setting(stream, self.stream)
+        verify = merge_setting(verify, self.verify)
+        cert = merge_setting(cert, self.cert)
+
+        return {'verify': verify, 'proxies': proxies, 'stream': stream,
+                'cert': cert}
+
     def get_adapter(self, url):
         """Returns the appropriate connnection adapter for the given URL."""
         for (prefix, adapter) in self.adapters.items():
@@ -558,12 +665,19 @@ class Session(SessionRedirectMixin):
             self.adapters[key] = self.adapters.pop(key)
 
     def __getstate__(self):
-        return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
+        state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
+        state['redirect_cache'] = dict(self.redirect_cache)
+        return state
 
     def __setstate__(self, state):
+        redirect_cache = state.pop('redirect_cache', {})
         for attr, value in state.items():
             setattr(self, attr, value)
 
+        self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
+        for redirect, to in redirect_cache.items():
+            self.redirect_cache[redirect] = to
+
 
 def session():
     """Returns a :class:`Session` for context-management."""
diff --git a/autoProcessTV/lib/requests/status_codes.py b/autoProcessTV/lib/requests/status_codes.py
index ed7a8660a6f6085861ba3fea10a50487dfb5d351..e0887f210a716d620bde38056db40fe987af9bbe 100644
--- a/autoProcessTV/lib/requests/status_codes.py
+++ b/autoProcessTV/lib/requests/status_codes.py
@@ -30,7 +30,8 @@ _codes = {
     305: ('use_proxy',),
     306: ('switch_proxy',),
     307: ('temporary_redirect', 'temporary_moved', 'temporary'),
-    308: ('resume_incomplete', 'resume'),
+    308: ('permanent_redirect',
+          'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
 
     # Client Error.
     400: ('bad_request', 'bad'),
diff --git a/autoProcessTV/lib/requests/structures.py b/autoProcessTV/lib/requests/structures.py
index a1759137aa0e30e91f428b6129d6712f6e06f643..3e5f2faa2ecab02bd720bbff2179d80077af8e55 100644
--- a/autoProcessTV/lib/requests/structures.py
+++ b/autoProcessTV/lib/requests/structures.py
@@ -8,30 +8,7 @@ Data structures that power Requests.
 
 """
 
-import os
 import collections
-from itertools import islice
-
-
-class IteratorProxy(object):
-    """docstring for IteratorProxy"""
-    def __init__(self, i):
-        self.i = i
-        # self.i = chain.from_iterable(i)
-
-    def __iter__(self):
-        return self.i
-
-    def __len__(self):
-        if hasattr(self.i, '__len__'):
-            return len(self.i)
-        if hasattr(self.i, 'len'):
-            return self.i.len
-        if hasattr(self.i, 'fileno'):
-            return os.fstat(self.i.fileno()).st_size
-
-    def read(self, n):
-        return "".join(islice(self.i, None, n))
 
 
 class CaseInsensitiveDict(collections.MutableMapping):
@@ -46,7 +23,7 @@ class CaseInsensitiveDict(collections.MutableMapping):
     case of the last key to be set, and ``iter(instance)``,
     ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
     will contain case-sensitive keys. However, querying and contains
-    testing is case insensitive:
+    testing is case insensitive::
 
         cid = CaseInsensitiveDict()
         cid['Accept'] = 'application/json'
@@ -106,8 +83,7 @@ class CaseInsensitiveDict(collections.MutableMapping):
         return CaseInsensitiveDict(self._store.values())
 
     def __repr__(self):
-        return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
-
+        return str(dict(self.items()))
 
 class LookupDict(dict):
     """Dictionary lookup object."""
diff --git a/autoProcessTV/lib/requests/utils.py b/autoProcessTV/lib/requests/utils.py
index 4d648bc5fa92a56d3995ac5d560545b90529c348..8fba62dd82aec65e9339f8f25ea19e68a403d7b5 100644
--- a/autoProcessTV/lib/requests/utils.py
+++ b/autoProcessTV/lib/requests/utils.py
@@ -19,15 +19,17 @@ import re
 import sys
 import socket
 import struct
+import warnings
 
 from . import __version__
 from . import certs
 from .compat import parse_http_list as _parse_list_header
 from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
-                     builtin_str, getproxies, proxy_bypass)
+                     builtin_str, getproxies, proxy_bypass, urlunparse,
+                     basestring)
 from .cookies import RequestsCookieJar, cookiejar_from_dict
 from .structures import CaseInsensitiveDict
-from .exceptions import MissingSchema, InvalidURL
+from .exceptions import InvalidURL
 
 _hush_pyflakes = (RequestsCookieJar,)
 
@@ -61,7 +63,7 @@ def super_len(o):
             return os.fstat(fileno).st_size
 
     if hasattr(o, 'getvalue'):
-        # e.g. BytesIO, cStringIO.StringI
+        # e.g. BytesIO, cStringIO.StringIO
         return len(o.getvalue())
 
 
@@ -114,7 +116,8 @@ def get_netrc_auth(url):
 def guess_filename(obj):
     """Tries to guess the filename of the given object."""
     name = getattr(obj, 'name', None)
-    if name and name[0] != '<' and name[-1] != '>':
+    if (name and isinstance(name, basestring) and name[0] != '<' and
+            name[-1] != '>'):
         return os.path.basename(name)
 
 
@@ -287,6 +290,11 @@ def get_encodings_from_content(content):
 
     :param content: bytestring to extract encodings from.
     """
+    warnings.warn((
+        'In requests 3.0, get_encodings_from_content will be removed. For '
+        'more information, please see the discussion on issue #2266. (This'
+        ' warning should only appear once.)'),
+        DeprecationWarning)
 
     charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
     pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
@@ -351,12 +359,14 @@ def get_unicode_from_response(r):
     Tried:
 
     1. charset from content-type
-
-    2. every encodings from ``<meta ... charset=XXX>``
-
-    3. fall back and replace all unicode characters
+    2. fall back and replace all unicode characters
 
     """
+    warnings.warn((
+        'In requests 3.0, get_unicode_from_response will be removed. For '
+        'more information, please see the discussion on issue #2266. (This'
+        ' warning should only appear once.)'),
+        DeprecationWarning)
 
     tried_encodings = []
 
@@ -410,10 +420,18 @@ def requote_uri(uri):
     This function passes the given URI through an unquote/quote cycle to
     ensure that it is fully and consistently quoted.
     """
-    # Unquote only the unreserved characters
-    # Then quote only illegal characters (do not quote reserved, unreserved,
-    # or '%')
-    return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
+    safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
+    safe_without_percent = "!#$&'()*+,/:;=?@[]~"
+    try:
+        # Unquote only the unreserved characters
+        # Then quote only illegal characters (do not quote reserved,
+        # unreserved, or '%')
+        return quote(unquote_unreserved(uri), safe=safe_with_percent)
+    except InvalidURL:
+        # We couldn't unquote the given URI, so let's try quoting it, but
+        # there may be unquoted '%'s in the URI. We need to make sure they're
+        # properly quoted so they do not cause issues elsewhere.
+        return quote(uri, safe=safe_without_percent)
 
 
 def address_in_network(ip, net):
@@ -466,9 +484,10 @@ def is_valid_cidr(string_network):
     return True
 
 
-def get_environ_proxies(url):
-    """Return a dict of environment proxies."""
-
+def should_bypass_proxies(url):
+    """
+    Returns whether we should bypass proxies or not.
+    """
     get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
 
     # First check whether no_proxy is defined. If it is, check that the URL
@@ -486,13 +505,13 @@ def get_environ_proxies(url):
             for proxy_ip in no_proxy:
                 if is_valid_cidr(proxy_ip):
                     if address_in_network(ip, proxy_ip):
-                        return {}
+                        return True
         else:
             for host in no_proxy:
                 if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
                     # The URL does match something in no_proxy, so we don't want
                     # to apply the proxies on this URL.
-                    return {}
+                    return True
 
     # If the system proxy settings indicate that this URL should be bypassed,
     # don't proxy.
@@ -506,12 +525,16 @@ def get_environ_proxies(url):
         bypass = False
 
     if bypass:
-        return {}
+        return True
 
-    # If we get here, we either didn't have no_proxy set or we're not going
-    # anywhere that no_proxy applies to, and the system settings don't require
-    # bypassing the proxy for the current URL.
-    return getproxies()
+    return False
+
+def get_environ_proxies(url):
+    """Return a dict of environment proxies."""
+    if should_bypass_proxies(url):
+        return {}
+    else:
+        return getproxies()
 
 
 def default_user_agent(name="python-requests"):
@@ -549,7 +572,8 @@ def default_headers():
     return CaseInsensitiveDict({
         'User-Agent': default_user_agent(),
         'Accept-Encoding': ', '.join(('gzip', 'deflate')),
-        'Accept': '*/*'
+        'Accept': '*/*',
+        'Connection': 'keep-alive',
     })
 
 
@@ -564,7 +588,7 @@ def parse_header_links(value):
 
     replace_chars = " '\""
 
-    for val in value.split(","):
+    for val in re.split(", *<", value):
         try:
             url, params = val.split(";", 1)
         except ValueError:
@@ -622,13 +646,18 @@ def guess_json_utf(data):
     return None
 
 
-def except_on_missing_scheme(url):
-    """Given a URL, raise a MissingSchema exception if the scheme is missing.
-    """
-    scheme, netloc, path, params, query, fragment = urlparse(url)
+def prepend_scheme_if_needed(url, new_scheme):
+    '''Given a URL that may or may not have a scheme, prepend the given scheme.
+    Does not replace a present scheme with the one provided as an argument.'''
+    scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
 
-    if not scheme:
-        raise MissingSchema('Proxy URLs must have explicit schemes.')
+    # urlparse is a finicky beast, and sometimes decides that there isn't a
+    # netloc present. Assume that it's being over-cautious, and switch netloc
+    # and path if urlparse decided there was no netloc.
+    if not netloc:
+        netloc, path = path, netloc
+
+    return urlunparse((scheme, netloc, path, params, query, fragment))
 
 
 def get_auth_from_url(url):
@@ -661,3 +690,18 @@ def to_native_string(string, encoding='ascii'):
             out = string.decode(encoding)
 
     return out
+
+
+def urldefragauth(url):
+    """
+    Given a url remove the fragment and the authentication part
+    """
+    scheme, netloc, path, params, query, fragment = urlparse(url)
+
+    # see func:`prepend_scheme_if_needed`
+    if not netloc:
+        netloc, path = path, netloc
+
+    netloc = netloc.rsplit('@', 1)[-1]
+
+    return urlunparse((scheme, netloc, path, params, query, ''))
diff --git a/gui/slick/interfaces/default/config_notifications.tmpl b/gui/slick/interfaces/default/config_notifications.tmpl
index 9c989c1b6ece2238f097d5a3f81df789cf0c78e7..eb392ee48353c8e26150ff84f7b883ec56fcb07c 100644
--- a/gui/slick/interfaces/default/config_notifications.tmpl
+++ b/gui/slick/interfaces/default/config_notifications.tmpl
@@ -1448,15 +1448,18 @@
                                     <span class="component-desc">username of your Trakt account.</span>
                                 </p>
                             </div>
+                            <input type="hidden" id="trakt_pin_url" value="$sickbeard.TRAKT_PIN_URL">
+                            <input type="button"#if $sickbeard.TRAKT_ACCESS_TOKEN then "class=\"btn hide\"" else "class=\"btn\""# value="Get Trakt PIN" id="TraktGetPin" />
                             <div class="field-pair">
-                                <label for="trakt_password">
-                                    <span class="component-title">Trakt password</span>
-                                    <input type="password" name="trakt_password" id="trakt_password" value="$sickbeard.TRAKT_PASSWORD" class="form-control input-sm input250" />
+                                <label for="trakt_pin">
+                                    <span class="component-title">Trakt PIN</span>
+                                    <input type="text" name="trakt_pin" id="trakt_pin" value="" class="form-control input-sm input250" />
                                 </label>
                                 <p>
-                                    <span class="component-desc">password of your Trakt account.</span>
+                                    <span class="component-desc">PIN code to authorize SickRage to access Trakt on your behalf.</span>
                                 </p>
                             </div>
+                            <input type="button" class="btn hide" value="Authorize SickRage" id="authTrakt" />                            
                             <div class="field-pair">
                                 <label for="trakt_disable_ssl_verify">
                                     <span class="component-title">Disable SSL Verification:</span>
@@ -1494,7 +1497,7 @@
                                     <span class="component-title">Sync libraries:</span>
                                     <span class="component-desc">
                                         <input type="checkbox" class="enabler" name="trakt_sync" id="trakt_sync" #if $sickbeard.TRAKT_SYNC then "checked=\"checked\"" else ""# />
-                                        <p>Sync your SickRage show Library with your Trakt Show Collection.</p>
+                                        <p>sync your SickRage show library with your trakt show library.</p>
                                     </span>
                                 </label>
                             </div>
diff --git a/gui/slick/interfaces/default/inc_top.tmpl b/gui/slick/interfaces/default/inc_top.tmpl
index 748554c99a10c30cd38d7bfceece86a378d92ff4..f46ad2d63d7d2eb66fd1eacb81262034c3cb4cef 100644
--- a/gui/slick/interfaces/default/inc_top.tmpl
+++ b/gui/slick/interfaces/default/inc_top.tmpl
@@ -232,6 +232,7 @@
                                 <li><a href="$sbRoot/home/status/"><i class="menu-icon-help"></i>&nbsp;Server Status</a></li>
 							</ul>
 						</li>
+			            <li id="donate"><a href="http://sr-upgrade.appspot.com" rel="noreferrer" onclick="window.open('${sickbeard.ANON_REDIRECT}' + this.href); return false;"><img src="$sbRoot/images/donate.jpg" alt="[donate]" class="navbaricon hidden-xs" /></a></li>
 					</ul>
 			#end if
 				</div><!-- /.navbar-collapse -->
diff --git a/gui/slick/js/configNotifications.js b/gui/slick/js/configNotifications.js
index 00713117d29f1f13caf76b270ba182c60a7eb85d..801d73df3b243de22516a2cb4abd18ac8253a459 100644
--- a/gui/slick/js/configNotifications.js
+++ b/gui/slick/js/configNotifications.js
@@ -316,23 +316,63 @@ $(document).ready(function(){
             });
     });
 
+    $('#TraktGetPin').click(function () { 
+        var trakt_pin_url = $('#trakt_pin_url').val();
+        var w;
+        w = window.open(trakt_pin_url, "popUp", "toolbar=no, scrollbars=no, resizable=no, top=200, left=200, width=650, height=550");
+         $('#trakt_pin').removeClass('hide');
+    });  
+    
+    $('#trakt_pin').change(function() {
+        var trakt_pin = $('#trakt_pin').val();
+        
+        if (trakt_pin.length !=0) {
+            $('#TraktGetPin').addClass('hide');
+            $('#authTrakt').removeClass('hide');
+        }
+        else {
+            $('#TraktGetPin').removeClass('hide');
+            $('#authTrakt').addClass('hide');
+        }
+    });
+    
+    $('#trakt_pin').keyup(function () {
+        var trakt_pin = $('#trakt_pin').val();
+        
+        if (trakt_pin.length !=0) {
+            $('#TraktGetPin').addClass('hide');
+            $('#authTrakt').removeClass('hide');
+        }
+        else {
+            $('#TraktGetPin').removeClass('hide');
+            $('#authTrakt').addClass('hide');
+        }
+    });
+    
+    $('#authTrakt').click(function() {
+        var trakt_pin = $('#trakt_pin').val();
+        if (trakt_pin.length !=0) {
+            $.get(sbRoot + '/home/getTraktToken', { "trakt_pin": trakt_pin })
+                .done(function (data) {
+                    $('#testTrakt-result').html(data);
+                    $('#authTrakt').addClass('hide');
+                    $('#trakt_pin').addClass('hide');               
+                    $('#TraktGetPin').addClass('hide');                 
+            });              
+        }
+    });
+    
     $('#testTrakt').click(function () {
         var trakt_username = $.trim($('#trakt_username').val());
-        var trakt_password = $.trim($('#trakt_password').val());
         var trakt_trending_blacklist = $.trim($('#trakt_blacklist_name').val());
         var trakt_disable_ssl_verify = $('#trakt_disable_ssl_verify').is(':checked');
-        if (!trakt_username || !trakt_password) {
+        if (!trakt_username) {
             $('#testTrakt-result').html('Please fill out the necessary fields above.');
 			if (!trakt_username) {
 				$('#trakt_username').addClass('warning');
 			} else {
 				$('#trakt_username').removeClass('warning');
 			}
-			if (!trakt_password) {
-				$('#trakt_password').addClass('warning');
-			} else {
-				$('#trakt_password').removeClass('warning');
-			}
             return;
         }
 
@@ -341,11 +381,11 @@ $(document).ready(function(){
 	    $('#trakt_blacklist_name').addClass('warning');
             return;
         }
-		$('#trakt_username,#trakt_password').removeClass('warning');
+		$('#trakt_username').removeClass('warning');
 	        $('#trakt_blacklist_name').removeClass('warning');
         $(this).prop('disabled', true);
         $('#testTrakt-result').html(loading);
-        $.get(sbRoot + '/home/testTrakt', {'username': trakt_username, 'password': trakt_password, 'disable_ssl': trakt_disable_ssl_verify, 'blacklist_name': trakt_trending_blacklist})
+        $.get(sbRoot + '/home/testTrakt', {'username': trakt_username, 'disable_ssl': trakt_disable_ssl_verify, 'blacklist_name': trakt_trending_blacklist})
             .done(function (data) {
                 $('#testTrakt-result').html(data);
                 $('#testTrakt').prop('disabled', false);
diff --git a/gui/slick/js/fuzzyMoment.js b/gui/slick/js/fuzzyMoment.js
index dcf64b6ff6771172303d5cb0e5213ea2d89890a8..60c83bb825bcc7c9e2eaffa2ed399d6d995d6b81 100644
--- a/gui/slick/js/fuzzyMoment.js
+++ b/gui/slick/js/fuzzyMoment.js
@@ -98,12 +98,12 @@
         moment.lang('en', {
             calendar: {
                 lastDay:dateA + 'Yesterday' + timeA, sameDay:dateA + 'Today' + timeA, nextDay:dateA + 'Tomorrow' + timeA,
-                lastWeek:dateA + 'last] ddd' + timeB, nextWeek:dateA + 'on] ddd' + timeB,
+                lastWeek:dateA + 'Last] ddd' + timeB, nextWeek:dateA + 'On] ddd' + timeB,
                 sameElse:dateA + ']ddd, MMM D YYYY[' + timeA
             },
             relativeTime: {
-                future:'in %s', past:'%s ago', s:'seconds', m:'a minute', mm:'%d minutes', h:'an hour', hh:'%d hours',
-                d:'a day', dd:'%d days', M:'a month', MM:'%d months', y:'a year', yy:'%d years'
+                future:'In %s', past:'%s ago', s:'seconds', m:'A minute', mm:'%d minutes', h:'An hour', hh:'%d hours',
+                d:'A day', dd:'%d days', M:'A month', MM:'%d months', y:'A year', yy:'%d years'
             }
         });
 
@@ -126,11 +126,11 @@
             if (fuzzer)
                 result = result.replace(/\bOn\b/i, 'Next');
 
-        } else if (! /\b((yester|to)day\b|tomo|last\b)/i.test(result)) {
+        } else if (! /\b((Yester|To)day\b|Tomo|Last\b)/i.test(result)) {
             if (14 > day)
                 result = airdate.from(today) + (dateWithTime ? dtGlue + airdatetime.format(timeToken) : '');
             else if (4 > week) {
-                result = (isPast ? '' : 'in ') + (1 == week ? 'a' : week) + ' week' + (1 == week ? '' : 's') + (isPast ? ' ago' : '');
+                result = (isPast ? '' : 'In ') + (1 == week ? 'A' : week) + ' week' + (1 == week ? '' : 's') + (isPast ? ' ago' : '');
                 qTipTime = true;
             } else {
                 result = airdate.from(today);
@@ -141,14 +141,15 @@
             }
             titleThis = true;
         }
+        result = day < 7 ? result.replace(/\bLast \b/i, '') : result;
 
         var n = false; // disable for prod
         $(this).html(result);
-        if (dateWithTime && /(yester|to)day/i.test(result))
+        if (dateWithTime && /(Yester|To)day/i.test(result))
             $(this).find('.fd').attr('title',(n?'1) ':'') + moment.duration(airdatetime.diff(moment(),'seconds'),'seconds').humanize(true)).each(addQTip);
         else if (dateWithTime)
             $(this).find('.fd').attr('title',(n?'2) ':'') + airdate.from(today)).each(addQTip);
-        else if (! /today/i.test(result))
+        else if (! /Today/i.test(result))
             $(this).find('.fd').attr('title',(n?'3) ':'') + airdate.from(today)).each(addQTip);
         else
             titleThis = false;
diff --git a/lib/pynma/pynma.py b/lib/pynma/pynma.py
index fc7d8de2eeb4cf90ec5e6ceae4fb60c6925fd3ac..8da9b740248863548c0b772b0a19924f7ebefe28 100644
--- a/lib/pynma/pynma.py
+++ b/lib/pynma/pynma.py
@@ -6,7 +6,7 @@ from urllib import urlencode
 
 __version__ = "0.1"
 
-API_SERVER = 'nma.usk.bz'
+API_SERVER = 'www.notifymyandroid.com'
 ADD_PATH   = '/publicapi/notify'
 
 USER_AGENT="PyNMA/v%s"%__version__
diff --git a/lib/requests/__init__.py b/lib/requests/__init__.py
index 0ec356603c48601fc1d30e9e61d430473a427063..446500bfd4fc15eb24144a37d565df34cb1f34ab 100644
--- a/lib/requests/__init__.py
+++ b/lib/requests/__init__.py
@@ -42,8 +42,8 @@ is at <http://python-requests.org>.
 """
 
 __title__ = 'requests'
-__version__ = '2.5.1'
-__build__ = 0x020501
+__version__ = '2.6.0'
+__build__ = 0x020503
 __author__ = 'Kenneth Reitz'
 __license__ = 'Apache 2.0'
 __copyright__ = 'Copyright 2015 Kenneth Reitz'
diff --git a/lib/requests/adapters.py b/lib/requests/adapters.py
index c892853b295c339005c846ca3777a1f1479d1dee..02e0dd1f1d169bb57d5b3b8996b1868108d93523 100644
--- a/lib/requests/adapters.py
+++ b/lib/requests/adapters.py
@@ -11,10 +11,10 @@ and maintain connections.
 import socket
 
 from .models import Response
-from .packages.urllib3 import Retry
 from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
 from .packages.urllib3.response import HTTPResponse
 from .packages.urllib3.util import Timeout as TimeoutSauce
+from .packages.urllib3.util.retry import Retry
 from .compat import urlparse, basestring
 from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
                     prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
diff --git a/lib/requests/api.py b/lib/requests/api.py
index 1469b05c4997c5347ee8b25d4598f0d4e06b58b6..98c92298eb02122e6be3aa132a9d06106aa6d657 100644
--- a/lib/requests/api.py
+++ b/lib/requests/api.py
@@ -16,7 +16,6 @@ from . import sessions
 
 def request(method, url, **kwargs):
     """Constructs and sends a :class:`Request <Request>`.
-    Returns :class:`Response <Response>` object.
 
     :param method: method for the new :class:`Request` object.
     :param url: URL for the new :class:`Request` object.
@@ -37,6 +36,8 @@ def request(method, url, **kwargs):
     :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
     :param stream: (optional) if ``False``, the response content will be immediately downloaded.
     :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
 
     Usage::
 
@@ -55,10 +56,12 @@ def request(method, url, **kwargs):
 
 
 def get(url, **kwargs):
-    """Sends a GET request. Returns :class:`Response` object.
+    """Sends a GET request.
 
     :param url: URL for the new :class:`Request` object.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     kwargs.setdefault('allow_redirects', True)
@@ -66,10 +69,12 @@ def get(url, **kwargs):
 
 
 def options(url, **kwargs):
-    """Sends a OPTIONS request. Returns :class:`Response` object.
+    """Sends a OPTIONS request.
 
     :param url: URL for the new :class:`Request` object.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     kwargs.setdefault('allow_redirects', True)
@@ -77,10 +82,12 @@ def options(url, **kwargs):
 
 
 def head(url, **kwargs):
-    """Sends a HEAD request. Returns :class:`Response` object.
+    """Sends a HEAD request.
 
     :param url: URL for the new :class:`Request` object.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     kwargs.setdefault('allow_redirects', False)
@@ -88,44 +95,52 @@ def head(url, **kwargs):
 
 
 def post(url, data=None, json=None, **kwargs):
-    """Sends a POST request. Returns :class:`Response` object.
+    """Sends a POST request.
 
     :param url: URL for the new :class:`Request` object.
     :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
     :param json: (optional) json data to send in the body of the :class:`Request`.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     return request('post', url, data=data, json=json, **kwargs)
 
 
 def put(url, data=None, **kwargs):
-    """Sends a PUT request. Returns :class:`Response` object.
+    """Sends a PUT request.
 
     :param url: URL for the new :class:`Request` object.
     :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     return request('put', url, data=data, **kwargs)
 
 
 def patch(url, data=None, **kwargs):
-    """Sends a PATCH request. Returns :class:`Response` object.
+    """Sends a PATCH request.
 
     :param url: URL for the new :class:`Request` object.
     :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     return request('patch', url,  data=data, **kwargs)
 
 
 def delete(url, **kwargs):
-    """Sends a DELETE request. Returns :class:`Response` object.
+    """Sends a DELETE request.
 
     :param url: URL for the new :class:`Request` object.
     :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
     """
 
     return request('delete', url, **kwargs)
diff --git a/lib/requests/auth.py b/lib/requests/auth.py
index b950181d9e4839a7c1ace822db7f401438317e66..d1c482517667e511a16cfb0208b98a3260caf95e 100644
--- a/lib/requests/auth.py
+++ b/lib/requests/auth.py
@@ -124,13 +124,15 @@ class HTTPDigestAuth(AuthBase):
         s += os.urandom(8)
 
         cnonce = (hashlib.sha1(s).hexdigest()[:16])
-        noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
         if _algorithm == 'MD5-SESS':
             HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
 
         if qop is None:
             respdig = KD(HA1, "%s:%s" % (nonce, HA2))
         elif qop == 'auth' or 'auth' in qop.split(','):
+            noncebit = "%s:%s:%s:%s:%s" % (
+                nonce, ncvalue, cnonce, 'auth', HA2
+                )
             respdig = KD(HA1, noncebit)
         else:
             # XXX handle auth-int.
diff --git a/lib/requests/compat.py b/lib/requests/compat.py
index c07726ee45ddd8140c6037ff9d493b283e8070b2..70edff784951c903d89c48697ac77e08759a3098 100644
--- a/lib/requests/compat.py
+++ b/lib/requests/compat.py
@@ -21,58 +21,6 @@ is_py2 = (_ver[0] == 2)
 #: Python 3.x?
 is_py3 = (_ver[0] == 3)
 
-#: Python 3.0.x
-is_py30 = (is_py3 and _ver[1] == 0)
-
-#: Python 3.1.x
-is_py31 = (is_py3 and _ver[1] == 1)
-
-#: Python 3.2.x
-is_py32 = (is_py3 and _ver[1] == 2)
-
-#: Python 3.3.x
-is_py33 = (is_py3 and _ver[1] == 3)
-
-#: Python 3.4.x
-is_py34 = (is_py3 and _ver[1] == 4)
-
-#: Python 2.7.x
-is_py27 = (is_py2 and _ver[1] == 7)
-
-#: Python 2.6.x
-is_py26 = (is_py2 and _ver[1] == 6)
-
-#: Python 2.5.x
-is_py25 = (is_py2 and _ver[1] == 5)
-
-#: Python 2.4.x
-is_py24 = (is_py2 and _ver[1] == 4)   # I'm assuming this is not by choice.
-
-
-# ---------
-# Platforms
-# ---------
-
-
-# Syntax sugar.
-_ver = sys.version.lower()
-
-is_pypy = ('pypy' in _ver)
-is_jython = ('jython' in _ver)
-is_ironpython = ('iron' in _ver)
-
-# Assume CPython, if nothing else.
-is_cpython = not any((is_pypy, is_jython, is_ironpython))
-
-# Windows-based system.
-is_windows = 'win32' in str(sys.platform).lower()
-
-# Standard Linux 2+ system.
-is_linux = ('linux' in str(sys.platform).lower())
-is_osx = ('darwin' in str(sys.platform).lower())
-is_hpux = ('hpux' in str(sys.platform).lower())   # Complete guess.
-is_solaris = ('solar==' in str(sys.platform).lower())   # Complete guess.
-
 try:
     import simplejson as json
 except (ImportError, SyntaxError):
@@ -99,7 +47,6 @@ if is_py2:
     basestring = basestring
     numeric_types = (int, long, float)
 
-
 elif is_py3:
     from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
     from urllib.request import parse_http_list, getproxies, proxy_bypass
diff --git a/lib/requests/models.py b/lib/requests/models.py
index b728c84e41b2e547ab12e37caa235f94b756f630..419cf0a8b5ee8bd7cbec48daddaf928943974968 100644
--- a/lib/requests/models.py
+++ b/lib/requests/models.py
@@ -143,12 +143,13 @@ class RequestEncodingMixin(object):
             else:
                 fn = guess_filename(v) or k
                 fp = v
-            if isinstance(fp, str):
-                fp = StringIO(fp)
-            if isinstance(fp, bytes):
-                fp = BytesIO(fp)
 
-            rf = RequestField(name=k, data=fp.read(),
+            if isinstance(fp, (str, bytes, bytearray)):
+                fdata = fp
+            else:
+                fdata = fp.read()
+
+            rf = RequestField(name=k, data=fdata,
                               filename=fn, headers=fh)
             rf.make_multipart(content_type=ft)
             new_fields.append(rf)
@@ -688,6 +689,8 @@ class Response(object):
         """Iterates over the response data, one line at a time.  When
         stream=True is set on the request, this avoids reading the
         content at once into memory for large responses.
+
+        .. note:: This method is not reentrant safe.
         """
 
         pending = None
diff --git a/lib/requests/packages/README.rst b/lib/requests/packages/README.rst
deleted file mode 100644
index c42f376b944c9332ec95593d2796d78ecc336c32..0000000000000000000000000000000000000000
--- a/lib/requests/packages/README.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-If you are planning to submit a pull request to requests with any changes in 
-this library do not go any further. These are independent libraries which we 
-vendor into requests. Any changes necessary to these libraries must be made in 
-them and submitted as separate pull requests to those libraries.
-
-urllib3 pull requests go here: https://github.com/shazow/urllib3
-
-chardet pull requests go here: https://github.com/chardet/chardet
diff --git a/lib/requests/packages/__init__.py b/lib/requests/packages/__init__.py
index ec6a9e0646d6c1122bbd98858889ce6523f1f9f0..4dcf870f3ba7de37b6c3ac328ac5e06f83b781e8 100644
--- a/lib/requests/packages/__init__.py
+++ b/lib/requests/packages/__init__.py
@@ -27,9 +27,13 @@ import sys
 
 class VendorAlias(object):
 
-    def __init__(self):
+    def __init__(self, package_names):
+        self._package_names = package_names
         self._vendor_name = __name__
         self._vendor_pkg = self._vendor_name + "."
+        self._vendor_pkgs = [
+            self._vendor_pkg + name for name in self._package_names
+        ]
 
     def find_module(self, fullname, path=None):
         if fullname.startswith(self._vendor_pkg):
@@ -44,6 +48,14 @@ class VendorAlias(object):
                 )
             )
 
+        if not (name == self._vendor_name or
+                any(name.startswith(pkg) for pkg in self._vendor_pkgs)):
+            raise ImportError(
+                "Cannot import %s, must be one of %s." % (
+                    name, self._vendor_pkgs
+                )
+            )
+
         # Check to see if we already have this item in sys.modules, if we do
         # then simply return that.
         if name in sys.modules:
@@ -92,4 +104,4 @@ class VendorAlias(object):
         return module
 
 
-sys.meta_path.append(VendorAlias())
+sys.meta_path.append(VendorAlias(["urllib3", "chardet"]))
diff --git a/lib/requests/packages/chardet/chardetect.py b/lib/requests/packages/chardet/chardetect.py
old mode 100755
new mode 100644
diff --git a/lib/requests/packages/urllib3/__init__.py b/lib/requests/packages/urllib3/__init__.py
index d7592ae7918e86d2a7937f413a96dd89a0f5426d..0660b9c83a593749284f8444db1c6fc726889ea3 100644
--- a/lib/requests/packages/urllib3/__init__.py
+++ b/lib/requests/packages/urllib3/__init__.py
@@ -4,7 +4,7 @@ urllib3 - Thread-safe connection pooling and re-using.
 
 __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
 __license__ = 'MIT'
-__version__ = 'dev'
+__version__ = '1.10.2'
 
 
 from .connectionpool import (
diff --git a/lib/requests/packages/urllib3/_collections.py b/lib/requests/packages/urllib3/_collections.py
index 784342a4eb5939ef7682c581dad219d3dee67316..cc424de0f45534ff0a5297be39c2e0d096ebebb1 100644
--- a/lib/requests/packages/urllib3/_collections.py
+++ b/lib/requests/packages/urllib3/_collections.py
@@ -1,7 +1,7 @@
 from collections import Mapping, MutableMapping
 try:
     from threading import RLock
-except ImportError: # Platform-specific: No threads available
+except ImportError:  # Platform-specific: No threads available
     class RLock:
         def __enter__(self):
             pass
@@ -10,11 +10,11 @@ except ImportError: # Platform-specific: No threads available
             pass
 
 
-try: # Python 2.7+
+try:  # Python 2.7+
     from collections import OrderedDict
 except ImportError:
     from .packages.ordered_dict import OrderedDict
-from .packages.six import iterkeys, itervalues
+from .packages.six import iterkeys, itervalues, PY3
 
 
 __all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
@@ -97,7 +97,14 @@ class RecentlyUsedContainer(MutableMapping):
             return list(iterkeys(self._container))
 
 
-class HTTPHeaderDict(MutableMapping):
+_dict_setitem = dict.__setitem__
+_dict_getitem = dict.__getitem__
+_dict_delitem = dict.__delitem__
+_dict_contains = dict.__contains__
+_dict_setdefault = dict.setdefault
+
+
+class HTTPHeaderDict(dict):
     """
     :param headers:
         An iterable of field-value pairs. Must not contain multiple field names
@@ -129,25 +136,75 @@ class HTTPHeaderDict(MutableMapping):
     'foo=bar, baz=quxx'
     >>> headers['Content-Length']
     '7'
-
-    If you want to access the raw headers with their original casing
-    for debugging purposes you can access the private ``._data`` attribute
-    which is a normal python ``dict`` that maps the case-insensitive key to a
-    list of tuples stored as (case-sensitive-original-name, value). Using the
-    structure from above as our example:
-
-    >>> headers._data
-    {'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
-    'content-length': [('content-length', '7')]}
     """
 
     def __init__(self, headers=None, **kwargs):
-        self._data = {}
-        if headers is None:
-            headers = {}
-        self.update(headers, **kwargs)
+        dict.__init__(self)
+        if headers is not None:
+            if isinstance(headers, HTTPHeaderDict):
+                self._copy_from(headers)
+            else:
+                self.extend(headers)
+        if kwargs:
+            self.extend(kwargs)
+
+    def __setitem__(self, key, val):
+        return _dict_setitem(self, key.lower(), (key, val))
+
+    def __getitem__(self, key):
+        val = _dict_getitem(self, key.lower())
+        return ', '.join(val[1:])
+
+    def __delitem__(self, key):
+        return _dict_delitem(self, key.lower())
 
-    def add(self, key, value):
+    def __contains__(self, key):
+        return _dict_contains(self, key.lower())
+
+    def __eq__(self, other):
+        if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+            return False
+        if not isinstance(other, type(self)):
+            other = type(self)(other)
+        return dict((k1, self[k1]) for k1 in self) == dict((k2, other[k2]) for k2 in other)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    values = MutableMapping.values
+    get = MutableMapping.get
+    update = MutableMapping.update
+    
+    if not PY3: # Python 2
+        iterkeys = MutableMapping.iterkeys
+        itervalues = MutableMapping.itervalues
+
+    __marker = object()
+
+    def pop(self, key, default=__marker):
+        '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+          If key is not found, d is returned if given, otherwise KeyError is raised.
+        '''
+        # Using the MutableMapping function directly fails due to the private marker.
+        # Using ordinary dict.pop would expose the internal structures.
+        # So let's reinvent the wheel.
+        try:
+            value = self[key]
+        except KeyError:
+            if default is self.__marker:
+                raise
+            return default
+        else:
+            del self[key]
+            return value
+
+    def discard(self, key):
+        try:
+            del self[key]
+        except KeyError:
+            pass
+
+    def add(self, key, val):
         """Adds a (name, value) pair, doesn't overwrite the value if it already
         exists.
 
@@ -156,43 +213,108 @@ class HTTPHeaderDict(MutableMapping):
         >>> headers['foo']
         'bar, baz'
         """
-        self._data.setdefault(key.lower(), []).append((key, value))
+        key_lower = key.lower()
+        new_vals = key, val
+        # Keep the common case aka no item present as fast as possible
+        vals = _dict_setdefault(self, key_lower, new_vals)
+        if new_vals is not vals:
+            # new_vals was not inserted, as there was a previous one
+            if isinstance(vals, list):
+                # If already several items got inserted, we have a list
+                vals.append(val)
+            else:
+                # vals should be a tuple then, i.e. only one item so far
+                # Need to convert the tuple to list for further extension
+                _dict_setitem(self, key_lower, [vals[0], vals[1], val])
+
+    def extend(*args, **kwargs):
+        """Generic import function for any type of header-like object.
+        Adapted version of MutableMapping.update in order to insert items
+        with self.add instead of self.__setitem__
+        """
+        if len(args) > 2:
+            raise TypeError("update() takes at most 2 positional "
+                            "arguments ({} given)".format(len(args)))
+        elif not args:
+            raise TypeError("update() takes at least 1 argument (0 given)")
+        self = args[0]
+        other = args[1] if len(args) >= 2 else ()
+        
+        if isinstance(other, Mapping):
+            for key in other:
+                self.add(key, other[key])
+        elif hasattr(other, "keys"):
+            for key in other.keys():
+                self.add(key, other[key])
+        else:
+            for key, value in other:
+                self.add(key, value)
+
+        for key, value in kwargs.items():
+            self.add(key, value)
 
     def getlist(self, key):
         """Returns a list of all the values for the named field. Returns an
         empty list if the key doesn't exist."""
-        return self[key].split(', ') if key in self else []
-
-    def copy(self):
-        h = HTTPHeaderDict()
-        for key in self._data:
-            for rawkey, value in self._data[key]:
-                h.add(rawkey, value)
-        return h
-
-    def __eq__(self, other):
-        if not isinstance(other, Mapping):
-            return False
-        other = HTTPHeaderDict(other)
-        return dict((k1, self[k1]) for k1 in self._data) == \
-                dict((k2, other[k2]) for k2 in other._data)
-
-    def __getitem__(self, key):
-        values = self._data[key.lower()]
-        return ', '.join(value[1] for value in values)
-
-    def __setitem__(self, key, value):
-        self._data[key.lower()] = [(key, value)]
+        try:
+            vals = _dict_getitem(self, key.lower())
+        except KeyError:
+            return []
+        else:
+            if isinstance(vals, tuple):
+                return [vals[1]]
+            else:
+                return vals[1:]
+
+    # Backwards compatibility for httplib
+    getheaders = getlist
+    getallmatchingheaders = getlist
+    iget = getlist
 
-    def __delitem__(self, key):
-        del self._data[key.lower()]
+    def __repr__(self):
+        return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
 
-    def __len__(self):
-        return len(self._data)
+    def _copy_from(self, other):
+        for key in other:
+            val = _dict_getitem(other, key)
+            if isinstance(val, list):
+                # Don't need to convert tuples
+                val = list(val)
+            _dict_setitem(self, key, val)
 
-    def __iter__(self):
-        for headers in itervalues(self._data):
-            yield headers[0][0]
-
-    def __repr__(self):
-        return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
+    def copy(self):
+        clone = type(self)()
+        clone._copy_from(self)
+        return clone
+
+    def iteritems(self):
+        """Iterate over all header lines, including duplicate ones."""
+        for key in self:
+            vals = _dict_getitem(self, key)
+            for val in vals[1:]:
+                yield vals[0], val
+
+    def itermerged(self):
+        """Iterate over all headers, merging duplicate ones together."""
+        for key in self:
+            val = _dict_getitem(self, key)
+            yield val[0], ', '.join(val[1:])
+
+    def items(self):
+        return list(self.iteritems())
+
+    @classmethod
+    def from_httplib(cls, message, duplicates=('set-cookie',)): # Python 2
+        """Read headers from a Python 2 httplib message object."""
+        ret = cls(message.items())
+        # ret now contains only the last header line for each duplicate.
+        # Importing with all duplicates would be nice, but this would
+        # mean to repeat most of the raw parsing already done, when the
+        # message object was created. Extracting only the headers of interest 
+        # separately, the cookies, should be faster and requires less
+        # extra code.
+        for key in duplicates:
+            ret.discard(key)
+            for val in message.getheaders(key):
+                ret.add(key, val)
+            return ret
diff --git a/lib/requests/packages/urllib3/connectionpool.py b/lib/requests/packages/urllib3/connectionpool.py
index 8bdf228ffe1fcc308e7f9c3a98cac7bc19b17f3f..0085345c43d0f84b63f08c643d44b4e6e70c2093 100644
--- a/lib/requests/packages/urllib3/connectionpool.py
+++ b/lib/requests/packages/urllib3/connectionpool.py
@@ -72,6 +72,21 @@ class ConnectionPool(object):
         return '%s(host=%r, port=%r)' % (type(self).__name__,
                                          self.host, self.port)
 
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.close()
+        # Return False to re-raise any potential exceptions
+        return False
+
+    def close():
+        """
+        Close all pooled connections and disable the pool.
+        """
+        pass
+
+
 # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
 _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
 
@@ -353,7 +368,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
 
         # Receive the response from the server
         try:
-            try:  # Python 2.7+, use buffering of HTTP responses
+            try:  # Python 2.7, use buffering of HTTP responses
                 httplib_response = conn.getresponse(buffering=True)
             except TypeError:  # Python 2.6 and older
                 httplib_response = conn.getresponse()
@@ -558,6 +573,14 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 conn = None
             raise SSLError(e)
 
+        except SSLError:
+            # Treat SSLError separately from BaseSSLError to preserve
+            # traceback.
+            if conn:
+                conn.close()
+                conn = None
+            raise
+
         except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
             if conn:
                 # Discard the connection for these exceptions. It will be
@@ -565,14 +588,13 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 conn.close()
                 conn = None
 
-            stacktrace = sys.exc_info()[2]
             if isinstance(e, SocketError) and self.proxy:
                 e = ProxyError('Cannot connect to proxy.', e)
             elif isinstance(e, (SocketError, HTTPException)):
                 e = ProtocolError('Connection aborted.', e)
 
-            retries = retries.increment(method, url, error=e,
-                                        _pool=self, _stacktrace=stacktrace)
+            retries = retries.increment(method, url, error=e, _pool=self,
+                                        _stacktrace=sys.exc_info()[2])
             retries.sleep()
 
             # Keep track of the error for the retry warning.
diff --git a/lib/requests/packages/urllib3/exceptions.py b/lib/requests/packages/urllib3/exceptions.py
index 0c6fd3c51b7486ebf105b26d9799e6208f00ad18..5d52301122e42dcd44f266be1257abceaf36f829 100644
--- a/lib/requests/packages/urllib3/exceptions.py
+++ b/lib/requests/packages/urllib3/exceptions.py
@@ -157,3 +157,8 @@ class InsecureRequestWarning(SecurityWarning):
 class SystemTimeWarning(SecurityWarning):
     "Warned when system time is suspected to be wrong"
     pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+    "Warned when certain SSL configuration is not available on a platform."
+    pass
diff --git a/lib/requests/packages/urllib3/poolmanager.py b/lib/requests/packages/urllib3/poolmanager.py
index 515dc96219b187c20272456f1b9f1d5f325d021f..b8d1e745d14997f084d44edf0c17fc646ca8d7bc 100644
--- a/lib/requests/packages/urllib3/poolmanager.py
+++ b/lib/requests/packages/urllib3/poolmanager.py
@@ -8,7 +8,7 @@ except ImportError:
 from ._collections import RecentlyUsedContainer
 from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
 from .connectionpool import port_by_scheme
-from .exceptions import LocationValueError
+from .exceptions import LocationValueError, MaxRetryError
 from .request import RequestMethods
 from .util.url import parse_url
 from .util.retry import Retry
@@ -64,6 +64,14 @@ class PoolManager(RequestMethods):
         self.pools = RecentlyUsedContainer(num_pools,
                                            dispose_func=lambda p: p.close())
 
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.clear()
+        # Return False to re-raise any potential exceptions
+        return False
+
     def _new_pool(self, scheme, host, port):
         """
         Create a new :class:`ConnectionPool` based on host, port and scheme.
@@ -167,7 +175,14 @@ class PoolManager(RequestMethods):
         if not isinstance(retries, Retry):
             retries = Retry.from_int(retries, redirect=redirect)
 
-        kw['retries'] = retries.increment(method, redirect_location)
+        try:
+            retries = retries.increment(method, url, response=response, _pool=conn)
+        except MaxRetryError:
+            if retries.raise_on_redirect:
+                raise
+            return response
+
+        kw['retries'] = retries
         kw['redirect'] = redirect
 
         log.info("Redirecting %s -> %s" % (url, redirect_location))
diff --git a/lib/requests/packages/urllib3/response.py b/lib/requests/packages/urllib3/response.py
index e69de95733ad2cdcc2fc5fb019116e09f0ec4a86..34cd3d7057fc71855db223b4e5b8fb939211a700 100644
--- a/lib/requests/packages/urllib3/response.py
+++ b/lib/requests/packages/urllib3/response.py
@@ -4,12 +4,11 @@ from socket import timeout as SocketTimeout
 
 from ._collections import HTTPHeaderDict
 from .exceptions import ProtocolError, DecodeError, ReadTimeoutError
-from .packages.six import string_types as basestring, binary_type
+from .packages.six import string_types as basestring, binary_type, PY3
 from .connection import HTTPException, BaseSSLError
 from .util.response import is_fp_closed
 
 
-
 class DeflateDecoder(object):
 
     def __init__(self):
@@ -21,6 +20,9 @@ class DeflateDecoder(object):
         return getattr(self._obj, name)
 
     def decompress(self, data):
+        if not data:
+            return data
+
         if not self._first_try:
             return self._obj.decompress(data)
 
@@ -36,9 +38,23 @@ class DeflateDecoder(object):
                 self._data = None
 
 
+class GzipDecoder(object):
+
+    def __init__(self):
+        self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+    def __getattr__(self, name):
+        return getattr(self._obj, name)
+
+    def decompress(self, data):
+        if not data:
+            return data
+        return self._obj.decompress(data)
+
+
 def _get_decoder(mode):
     if mode == 'gzip':
-        return zlib.decompressobj(16 + zlib.MAX_WBITS)
+        return GzipDecoder()
 
     return DeflateDecoder()
 
@@ -76,9 +92,10 @@ class HTTPResponse(io.IOBase):
                  strict=0, preload_content=True, decode_content=True,
                  original_response=None, pool=None, connection=None):
 
-        self.headers = HTTPHeaderDict()
-        if headers:
-            self.headers.update(headers)
+        if isinstance(headers, HTTPHeaderDict):
+            self.headers = headers
+        else:
+            self.headers = HTTPHeaderDict(headers)
         self.status = status
         self.version = version
         self.reason = reason
@@ -202,7 +219,7 @@ class HTTPResponse(io.IOBase):
 
             except BaseSSLError as e:
                 # FIXME: Is there a better way to differentiate between SSLErrors?
-                if not 'read operation timed out' in str(e):  # Defensive:
+                if 'read operation timed out' not in str(e):  # Defensive:
                     # This shouldn't happen but just in case we're missing an edge
                     # case, let's avoid swallowing SSL errors.
                     raise
@@ -267,14 +284,16 @@ class HTTPResponse(io.IOBase):
         Remaining parameters are passed to the HTTPResponse constructor, along
         with ``original_response=r``.
         """
-
-        headers = HTTPHeaderDict()
-        for k, v in r.getheaders():
-            headers.add(k, v)
+        headers = r.msg
+        if not isinstance(headers, HTTPHeaderDict):
+            if PY3: # Python 3
+                headers = HTTPHeaderDict(headers.items())
+            else: # Python 2
+                headers = HTTPHeaderDict.from_httplib(headers)
 
         # HTTPResponse objects in Python 3 don't have a .strict attribute
         strict = getattr(r, 'strict', 0)
-        return ResponseCls(body=r,
+        resp = ResponseCls(body=r,
                            headers=headers,
                            status=r.status,
                            version=r.version,
@@ -282,6 +301,7 @@ class HTTPResponse(io.IOBase):
                            strict=strict,
                            original_response=r,
                            **response_kw)
+        return resp
 
     # Backwards-compatibility methods for httplib.HTTPResponse
     def getheaders(self):
diff --git a/lib/requests/packages/urllib3/util/connection.py b/lib/requests/packages/urllib3/util/connection.py
index 2156993a0c413c1e1c0cb5395105eeb4ba9d6a47..859aec6ee6be0f6e2e59753ad7fb41c9370a3073 100644
--- a/lib/requests/packages/urllib3/util/connection.py
+++ b/lib/requests/packages/urllib3/util/connection.py
@@ -82,6 +82,7 @@ def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
             err = _
             if sock is not None:
                 sock.close()
+                sock = None
 
     if err is not None:
         raise err
diff --git a/lib/requests/packages/urllib3/util/ssl_.py b/lib/requests/packages/urllib3/util/ssl_.py
index a788b1b98c63150fe70021d96d2b8e1d45579019..e7e7dfae1881d7303e584fb61fbbd1fc62300ae8 100644
--- a/lib/requests/packages/urllib3/util/ssl_.py
+++ b/lib/requests/packages/urllib3/util/ssl_.py
@@ -1,7 +1,7 @@
 from binascii import hexlify, unhexlify
-from hashlib import md5, sha1
+from hashlib import md5, sha1, sha256
 
-from ..exceptions import SSLError
+from ..exceptions import SSLError, InsecurePlatformWarning
 
 
 SSLContext = None
@@ -10,6 +10,7 @@ create_default_context = None
 
 import errno
 import ssl
+import warnings
 
 try:  # Test for SSL features
     from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
@@ -29,8 +30,8 @@ try:
 except ImportError:
     _DEFAULT_CIPHERS = (
         'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
-        'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
-        'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
+        'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
+        '!eNULL:!MD5'
     )
 
 try:
@@ -69,6 +70,14 @@ except ImportError:
             self.ciphers = cipher_suite
 
         def wrap_socket(self, socket, server_hostname=None):
+            warnings.warn(
+                'A true SSLContext object is not available. This prevents '
+                'urllib3 from configuring SSL appropriately and may cause '
+                'certain SSL connections to fail. For more information, see '
+                'https://urllib3.readthedocs.org/en/latest/security.html'
+                '#insecureplatformwarning.',
+                InsecurePlatformWarning
+            )
             kwargs = {
                 'keyfile': self.keyfile,
                 'certfile': self.certfile,
@@ -96,7 +105,8 @@ def assert_fingerprint(cert, fingerprint):
     # this digest.
     hashfunc_map = {
         16: md5,
-        20: sha1
+        20: sha1,
+        32: sha256,
     }
 
     fingerprint = fingerprint.replace(':', '').lower()
@@ -211,7 +221,9 @@ def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
 
     context.verify_mode = cert_reqs
     if getattr(context, 'check_hostname', None) is not None:  # Platform-specific: Python 3.2
-        context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)
+        # We do our own verification, including fingerprints and alternative
+        # hostnames. So disable it here
+        context.check_hostname = False
     return context
 
 
diff --git a/lib/requests/sessions.py b/lib/requests/sessions.py
index 4f3069635332a9fe3284e332eccb7d1e38f62fe4..ef3f22bc5c5b7075301d5a162de00061041f2417 100644
--- a/lib/requests/sessions.py
+++ b/lib/requests/sessions.py
@@ -171,7 +171,10 @@ class SessionRedirectMixin(object):
             except KeyError:
                 pass
 
-            extract_cookies_to_jar(prepared_request._cookies, prepared_request, resp.raw)
+            # Extract any cookies sent on the response to the cookiejar
+            # in the new request. Because we've mutated our copied prepared
+            # request, use the old one that we haven't yet touched.
+            extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
             prepared_request._cookies.update(self.cookies)
             prepared_request.prepare_cookies(prepared_request._cookies)
 
diff --git a/lib/requests/utils.py b/lib/requests/utils.py
index 74679414471b02a8bd6e806dc2a6e47f4a9c18d3..8fba62dd82aec65e9339f8f25ea19e68a403d7b5 100644
--- a/lib/requests/utils.py
+++ b/lib/requests/utils.py
@@ -25,7 +25,8 @@ from . import __version__
 from . import certs
 from .compat import parse_http_list as _parse_list_header
 from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
-                     builtin_str, getproxies, proxy_bypass, urlunparse)
+                     builtin_str, getproxies, proxy_bypass, urlunparse,
+                     basestring)
 from .cookies import RequestsCookieJar, cookiejar_from_dict
 from .structures import CaseInsensitiveDict
 from .exceptions import InvalidURL
@@ -115,7 +116,8 @@ def get_netrc_auth(url):
 def guess_filename(obj):
     """Tries to guess the filename of the given object."""
     name = getattr(obj, 'name', None)
-    if name and isinstance(name, builtin_str) and name[0] != '<' and name[-1] != '>':
+    if (name and isinstance(name, basestring) and name[0] != '<' and
+            name[-1] != '>'):
         return os.path.basename(name)
 
 
@@ -418,10 +420,18 @@ def requote_uri(uri):
     This function passes the given URI through an unquote/quote cycle to
     ensure that it is fully and consistently quoted.
     """
-    # Unquote only the unreserved characters
-    # Then quote only illegal characters (do not quote reserved, unreserved,
-    # or '%')
-    return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
+    safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
+    safe_without_percent = "!#$&'()*+,/:;=?@[]~"
+    try:
+        # Unquote only the unreserved characters
+        # Then quote only illegal characters (do not quote reserved,
+        # unreserved, or '%')
+        return quote(unquote_unreserved(uri), safe=safe_with_percent)
+    except InvalidURL:
+        # We couldn't unquote the given URI, so let's try quoting it, but
+        # there may be unquoted '%'s in the URI. We need to make sure they're
+        # properly quoted so they do not cause issues elsewhere.
+        return quote(uri, safe=safe_without_percent)
 
 
 def address_in_network(ip, net):
diff --git a/lib/trakt/trakt.py b/lib/trakt/trakt.py
index 8381e22f1f610a46aedd1760629b964792f6ffa8..98c5ed157ffb42594d9a19a18b996d8ee71f2bac 100644
--- a/lib/trakt/trakt.py
+++ b/lib/trakt/trakt.py
@@ -1,67 +1,82 @@
 from lib import requests
 import json
+import sickbeard
+import time
 from sickbeard import logger
 
 from exceptions import traktException, traktAuthException, traktServerBusy
 
 class TraktAPI():
-    def __init__(self, apikey, username=None, password=None, disable_ssl_verify=False, timeout=30):
-        self.username = username
-        self.password = password
+    def __init__(self, disable_ssl_verify=False, timeout=30):
         self.verify = not disable_ssl_verify
         self.timeout = timeout if timeout else None
-        self.api_url = 'https://api-v2launch.trakt.tv/'
+        self.auth_url = sickbeard.TRAKT_OAUTH_URL
+        self.api_url = sickbeard.TRAKT_API_URL
         self.headers = {
           'Content-Type': 'application/json',
           'trakt-api-version': '2',
-          'trakt-api-key': apikey,
+          'trakt-api-key': sickbeard.TRAKT_API_KEY
         }
 
-    def validateAccount(self):
-        if hasattr(self, 'token'):
-            del(self.token)
+    def traktToken(self, trakt_pin=None, refresh=False, count=0):
+    
+        if count > 3:
+            sickbeard.TRAKT_ACCESS_TOKEN = ''
+            return False
+        elif count > 0:
+            time.sleep(2)
+        
+        
+        
         data = {
-            'login': self.username,
-            'password': self.password
+            'client_id': sickbeard.TRAKT_API_KEY,
+            'client_secret': sickbeard.TRAKT_API_SECRET,
+            'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob'
         }
-        try:
-            resp = requests.request('POST', self.api_url+"auth/login", headers=self.headers,
-                data=json.dumps(data), timeout=self.timeout, verify=self.verify)
-            resp.raise_for_status()
-            resp = resp.json()
-        except requests.RequestException as e:
-            code = getattr(e.response, 'status_code', None)
-            if not code:
-                # This is pretty much a fatal error if there is no status_code
-                # It means there basically was no response at all
-                raise traktException(e)
-            elif code == 502:
-                # Retry the request, cloudflare had a proxying issue
-                logger.log(u"Retrying trakt api request: auth/login", logger.WARNING)
-                return self.validateAccount()
-            elif code == 401:
-                logger.log(u"Unauthorized. Please check your Trakt settings", logger.WARNING)
-                raise traktAuthException(e)
-            elif code in (500,501,503,504,520,521,522):
-                #http://docs.trakt.apiary.io/#introduction/status-codes
-                logger.log(u"Trakt may have some issues and it's unavailable. Try again later please", logger.WARNING)
-                raise traktServerBusy(e)
-            else:
-                raise traktException(e)
-        if 'token' in resp:
-            self.token = resp['token']
+        
+        if refresh:
+            data['grant_type'] = 'refresh_token'
+            data['refresh_token'] = sickbeard.TRAKT_REFRESH_TOKEN
+        else:
+            data['grant_type'] = 'authorization_code'
+            if not None == trakt_pin:
+                data['code'] = trakt_pin
+        
+        headers = {
+            'Content-Type': 'application/json'
+        } 
+ 
+        resp = self.traktRequest('oauth/token', data=data,  headers=headers, url=self.auth_url , method='POST', count=count)
+
+        if 'access_token' in resp:
+            sickbeard.TRAKT_ACCESS_TOKEN  = resp['access_token']
+            if 'refresh_token' in resp:
+                sickbeard.TRAKT_REFRESH_TOKEN = resp['refresh_token']
             return True
         return False
-
-    def traktRequest(self, path, data=None, method='GET'):
-        url = self.api_url + path
-        headers = self.headers
-        if not getattr(self, 'token', None):
-            self.validateAccount()
-        headers['trakt-user-login'] = self.username
-        headers['trakt-user-token'] = self.token
-
-        # request the URL from trakt and parse the result as json
+        
+    def validateAccount(self):
+            
+        resp = self.traktRequest('users/settings')
+        
+        if 'account' in resp:
+            return True
+        return False
+        
+    def traktRequest(self, path, data=None, headers=None, url=None, method='GET',count=0):
+        if None == url:
+            url = self.api_url + path
+        else:
+            url = url + path
+        
+        count = count + 1
+        
+        if None == headers:
+            headers = self.headers
+        
+        if not None == sickbeard.TRAKT_ACCESS_TOKEN:
+            headers['Authorization'] = 'Bearer ' + sickbeard.TRAKT_ACCESS_TOKEN
+        
         try:
             resp = requests.request(method, url, headers=headers, timeout=self.timeout,
                 data=json.dumps(data) if data else [], verify=self.verify)
@@ -79,14 +94,16 @@ class TraktAPI():
                 raise traktException(e)
             elif code == 502:
                 # Retry the request, cloudflare had a proxying issue
-                logger.log(u"Retrying trakt api request: %s" % path, logger.WARNING)
-                return self.traktRequest(path, data, method)
+                logger.log(u'Retrying trakt api request: %s' % path, logger.WARNING)
+                return self.traktRequest(path, data, headers, method)
             elif code == 401:
-                logger.log(u"Unauthorized. Please check your Trakt settings", logger.WARNING)
+                logger.log(u'Unauthorized. Please check your Trakt settings', logger.WARNING)
+                if self.traktToken(refresh=True,count=count):
+                    return self.traktRequest(path, data, url, method)
                 raise traktAuthException(e)
             elif code in (500,501,503,504,520,521,522):
                 #http://docs.trakt.apiary.io/#introduction/status-codes
-                logger.log(u"Trakt may have some issues and it's unavailable. Try again later please", logger.WARNING)
+                logger.log(u'Trakt may have some issues and it\'s unavailable. Try again later please', logger.WARNING)
                 raise traktServerBusy(e)
             else:
                 raise traktException(e)
diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py
index e5df48eb4d698ddc85bc8263652dc961a129f565..6f37d89a2b2522c24358e092e8d1270570be96b3 100755
--- a/sickbeard/__init__.py
+++ b/sickbeard/__init__.py
@@ -420,7 +420,8 @@ SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = False
 
 USE_TRAKT = False
 TRAKT_USERNAME = None
-TRAKT_PASSWORD = None
+TRAKT_ACCESS_TOKEN = None
+TRAKT_REFRESH_TOKEN = None
 TRAKT_REMOVE_WATCHLIST = False
 TRAKT_REMOVE_SERIESLIST = False
 TRAKT_SYNC_WATCHLIST = False
@@ -521,7 +522,14 @@ CALENDAR_UNPROTECTED = False
 NO_RESTART = False
 
 TMDB_API_KEY = 'edc5f123313769de83a71e157758030b'
-TRAKT_API_KEY = 'd4161a7a106424551add171e5470112e4afdaf2438e6ef2fe0548edc75924868'
+#TRAKT_API_KEY = 'd4161a7a106424551add171e5470112e4afdaf2438e6ef2fe0548edc75924868'
+
+TRAKT_API_KEY = '5c65f55e11d48c35385d9e8670615763a605fad28374c8ae553a7b7a50651ddd'
+TRAKT_API_SECRET ='b53e32045ac122a445ef163e6d859403301ffe9b17fb8321d428531b69022a82'
+TRAKT_PIN_URL = 'https://trakt.tv/pin/4562'
+TRAKT_OAUTH_URL = 'https://trakt.tv/'
+TRAKT_API_URL = 'https://api-v2launch.trakt.tv/'
+
 FANART_API_KEY = '9b3afaf26f6241bdb57d6cc6bd798da7'
 
 __INITIALIZED__ = False
@@ -542,7 +550,7 @@ def initialize(consoleLogging=True):
             TORRENT_USERNAME, TORRENT_PASSWORD, TORRENT_HOST, TORRENT_PATH, TORRENT_SEED_TIME, TORRENT_PAUSED, TORRENT_HIGH_BANDWIDTH, TORRENT_LABEL, TORRENT_LABEL_ANIME, TORRENT_VERIFY_CERT, TORRENT_RPCURL, TORRENT_AUTH_TYPE, \
             USE_KODI, KODI_ALWAYS_ON, KODI_NOTIFY_ONSNATCH, KODI_NOTIFY_ONDOWNLOAD, KODI_NOTIFY_ONSUBTITLEDOWNLOAD, KODI_UPDATE_FULL, KODI_UPDATE_ONLYFIRST, \
             KODI_UPDATE_LIBRARY, KODI_HOST, KODI_USERNAME, KODI_PASSWORD, BACKLOG_FREQUENCY, \
-            USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_REMOVE_WATCHLIST, TRAKT_SYNC_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, traktRollingScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_SYNC_REMOVE, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_DISABLE_SSL_VERIFY, TRAKT_TIMEOUT, TRAKT_BLACKLIST_NAME, TRAKT_USE_ROLLING_DOWNLOAD, TRAKT_ROLLING_NUM_EP, TRAKT_ROLLING_ADD_PAUSED, TRAKT_ROLLING_FREQUENCY, \
+            USE_TRAKT, TRAKT_USERNAME, TRAKT_ACCESS_TOKEN, TRAKT_REFRESH_TOKEN, TRAKT_REMOVE_WATCHLIST, TRAKT_SYNC_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, traktRollingScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, TRAKT_SYNC_REMOVE, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_DISABLE_SSL_VERIFY, TRAKT_TIMEOUT, TRAKT_BLACKLIST_NAME, TRAKT_USE_ROLLING_DOWNLOAD, TRAKT_ROLLING_NUM_EP, TRAKT_ROLLING_ADD_PAUSED, TRAKT_ROLLING_FREQUENCY, \
             USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_NOTIFY_ONSUBTITLEDOWNLOAD, PLEX_UPDATE_LIBRARY, USE_PLEX_CLIENT, PLEX_CLIENT_USERNAME, PLEX_CLIENT_PASSWORD, \
             PLEX_SERVER_HOST, PLEX_SERVER_TOKEN, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, DEFAULT_BACKLOG_FREQUENCY, MIN_BACKLOG_FREQUENCY, BACKLOG_STARTUP, SKIP_REMOVED_FILES, \
             showUpdateScheduler, __INITIALIZED__, INDEXER_DEFAULT_LANGUAGE, EP_DEFAULT_DELETED_STATUS, LAUNCH_BROWSER, UPDATE_SHOWS_ON_START, UPDATE_SHOWS_ON_SNATCH, TRASH_REMOVE_SHOW, TRASH_ROTATE_LOGS, SORT_ARTICLE, showList, loadingShowList, \
@@ -1011,7 +1019,8 @@ def initialize(consoleLogging=True):
 
         USE_TRAKT = bool(check_setting_int(CFG, 'Trakt', 'use_trakt', 0))
         TRAKT_USERNAME = check_setting_str(CFG, 'Trakt', 'trakt_username', '', censor_log=True)
-        TRAKT_PASSWORD = check_setting_str(CFG, 'Trakt', 'trakt_password', '', censor_log=True)
+        TRAKT_ACCESS_TOKEN = check_setting_str(CFG, 'Trakt', 'trakt_access_token', '', censor_log=True)
+        TRAKT_REFRESH_TOKEN = check_setting_str(CFG, 'Trakt', 'trakt_refresh_token', '', censor_log=True)
         TRAKT_REMOVE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_watchlist', 0))
         TRAKT_REMOVE_SERIESLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_serieslist', 0))
         TRAKT_SYNC_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync_watchlist', 0))
@@ -1030,7 +1039,7 @@ def initialize(consoleLogging=True):
         TRAKT_ROLLING_FREQUENCY = check_setting_int(CFG, 'Trakt', 'trakt_rolling_frequency', 8)
         if TRAKT_ROLLING_FREQUENCY < 4:
             TRAKT_ROLLING_FREQUENCY = 4
-
+        
         CheckSection(CFG, 'pyTivo')
         USE_PYTIVO = bool(check_setting_int(CFG, 'pyTivo', 'use_pytivo', 0))
         PYTIVO_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_onsnatch', 0))
@@ -1968,7 +1977,8 @@ def save_config():
     new_config['Trakt'] = {}
     new_config['Trakt']['use_trakt'] = int(USE_TRAKT)
     new_config['Trakt']['trakt_username'] = TRAKT_USERNAME
-    new_config['Trakt']['trakt_password'] = helpers.encrypt(TRAKT_PASSWORD, ENCRYPTION_VERSION)
+    new_config['Trakt']['trakt_access_token'] = TRAKT_ACCESS_TOKEN
+    new_config['Trakt']['trakt_refresh_token'] = TRAKT_REFRESH_TOKEN
     new_config['Trakt']['trakt_remove_watchlist'] = int(TRAKT_REMOVE_WATCHLIST)
     new_config['Trakt']['trakt_remove_serieslist'] = int(TRAKT_REMOVE_SERIESLIST)
     new_config['Trakt']['trakt_sync_watchlist'] = int(TRAKT_SYNC_WATCHLIST)
diff --git a/sickbeard/db.py b/sickbeard/db.py
index c7be0a979dd08e4f0270a53e96f7954ca4450af7..821b11d9e9e1ef6f5c5c8c6de656ab15de664270 100644
--- a/sickbeard/db.py
+++ b/sickbeard/db.py
@@ -232,7 +232,7 @@ class DBConnection(object):
         try:
             return unicode(x, 'utf-8')
         except:
-            return unicode(x, sickbeard.SYS_ENCODING)
+            return unicode(x, sickbeard.SYS_ENCODING,errors="ignore")
 
     def _dict_factory(self, cursor, row):
         d = {}
diff --git a/sickbeard/notifiers/kodi.py b/sickbeard/notifiers/kodi.py
index f7e3b91689452f78eda1c28309c7026d444c2734..b6739e1fdb4691f925048e590b12e8edd2455a05 100644
--- a/sickbeard/notifiers/kodi.py
+++ b/sickbeard/notifiers/kodi.py
@@ -16,6 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with SickRage.  If not, see <http://www.gnu.org/licenses/>.
 
+import httplib
 import urllib
 import urllib2
 import socket
@@ -385,7 +386,7 @@ class KODINotifier:
 
             try:
                 response = urllib2.urlopen(req)
-            except urllib2.URLError, e:
+            except (httplib.BadStatusLine, urllib2.URLError), e:
                 logger.log(u"Error while trying to retrieve KODI API version for " + host + ": " + ex(e),
                            logger.WARNING)
                 return False
diff --git a/sickbeard/notifiers/trakt.py b/sickbeard/notifiers/trakt.py
index c35f27b86ad3622df1cf022ff176081edf68bc4c..5bf08bc4697068179ffcc070701d29db125ea10e 100644
--- a/sickbeard/notifiers/trakt.py
+++ b/sickbeard/notifiers/trakt.py
@@ -47,7 +47,7 @@ class TraktNotifier:
         """
 
         trakt_id = sickbeard.indexerApi(ep_obj.show.indexer).config['trakt_id']
-        trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
+        trakt_api = TraktAPI(sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
 
         if sickbeard.USE_TRAKT:
             try:
@@ -100,7 +100,7 @@ class TraktNotifier:
         update: type o action add or remove
         """
 
-        trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
+        trakt_api = TraktAPI(sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
 
         if sickbeard.USE_TRAKT:
 
@@ -205,7 +205,7 @@ class TraktNotifier:
 
         return post_data
 
-    def test_notify(self, username, password, disable_ssl, blacklist_name=None):
+    def test_notify(self, username, disable_ssl, blacklist_name=None):
         """
         Sends a test notification to trakt with the given authentication info and returns a boolean
         representing success.
@@ -218,7 +218,7 @@ class TraktNotifier:
         Returns: True if the request succeeded, False otherwise
         """
         try:
-            trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, username, password, disable_ssl, sickbeard.TRAKT_TIMEOUT)
+            trakt_api = TraktAPI(disable_ssl, sickbeard.TRAKT_TIMEOUT)
             trakt_api.validateAccount()
             if blacklist_name and blacklist_name is not None:
                 trakt_lists = trakt_api.traktRequest("users/" + username + "/lists")
diff --git a/sickbeard/providers/generic.py b/sickbeard/providers/generic.py
index 078b47fe9120e7879792ed8b28bdd86b27d05fd0..47f6ef084d336bc805feff58f01a9bf0e0044360 100644
--- a/sickbeard/providers/generic.py
+++ b/sickbeard/providers/generic.py
@@ -256,6 +256,21 @@ class GenericProvider:
 
         return title, url
 
+    def _get_size(self, item):
+        """Gets the size from the item"""
+        if self.providerType != GenericProvider.NZB:
+            logger.log(u"Torrent Generic providers doesn't have _get_size() implemented yet", logger.DEBUG)
+            return -1
+        else:
+            size = item.get('links')[1].get('length')
+            if size:
+                size = int(size)
+                return size
+            else:
+                logger.log(u"Size was not found in your provider response", logger.DEBUG)
+                return -1
+
+
     def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
 
         self._checkAuth()
@@ -430,6 +445,7 @@ class GenericProvider:
             result.release_group = release_group
             result.version = version
             result.content = None
+            result.size = self._get_size(item)
 
             if len(epObj) == 1:
                 epNum = epObj[0].episode
diff --git a/sickbeard/providers/newznab.py b/sickbeard/providers/newznab.py
index 790b01d1a1a83a4031071d70d7a8fb2f6d5aaeb9..11f6621cc0aca641d5066fe72b495f067f9066ee 100755
--- a/sickbeard/providers/newznab.py
+++ b/sickbeard/providers/newznab.py
@@ -423,7 +423,7 @@ class NewznabCache(tvcache.TVCache):
 
         tvrageid = 0
         for attr in item['newznab_attr'] if isinstance(item['newznab_attr'], list) else [item['newznab_attr']]:
-            if attr['name'] == 'tvrageid':
+            if attr['name'] == 'tvrageid' or attr['name'] == 'rageid':
                 tvrageid = int(attr['value'] or 0)
                 break
 
diff --git a/sickbeard/scene_exceptions.py b/sickbeard/scene_exceptions.py
index 48f66490744f8bc1ff3014fe32a6377ef6715456..169a26f3c4894436f57b5e7223d03be53dabf3db 100644
--- a/sickbeard/scene_exceptions.py
+++ b/sickbeard/scene_exceptions.py
@@ -176,7 +176,7 @@ def retrieve_exceptions():
             url_data = helpers.getURL(url)
             if url_data is None:
                 # When urlData is None, trouble connecting to github
-                logger.log(u"Check scene exceptions update failed. Unable to get URL: " + url, logger.ERROR)
+                logger.log(u"Check scene exceptions update failed. Unable to get URL: " + url, logger.WARNING)
                 continue
 
             setLastRefresh(sickbeard.indexerApi(indexer).name)
@@ -322,4 +322,4 @@ def getSceneSeasons(indexer_id):
     """
     myDB = db.DBConnection('cache.db')
     seasons = myDB.select("SELECT DISTINCT season FROM scene_exceptions WHERE indexer_id = ?", [indexer_id])
-    return [cur_exception["season"] for cur_exception in seasons]
\ No newline at end of file
+    return [cur_exception["season"] for cur_exception in seasons]
diff --git a/sickbeard/traktChecker.py b/sickbeard/traktChecker.py
index 4bc4ce825e7564903a13d347ff616c0ae6b80c5c..55ed11e980e16ebc108118a090c5738ef7249d30 100644
--- a/sickbeard/traktChecker.py
+++ b/sickbeard/traktChecker.py
@@ -62,7 +62,7 @@ def setEpisodeToWanted(show, s, e):
 class TraktChecker():
 
     def __init__(self):
-        self.trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
+        self.trakt_api = TraktAPI(sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
         self.todoBacklog = []
         self.todoWanted = []        
         self.ShowWatchlist = {}
@@ -623,7 +623,7 @@ class TraktChecker():
 class TraktRolling():
 
     def __init__(self):
-        self.trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
+        self.trakt_api = TraktAPI(sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
         self.EpisodeWatched = []
 
     def run(self, force=False):
diff --git a/sickbeard/webapi.py b/sickbeard/webapi.py
index 0238dd343f162f9432b57b49a80d1dc66e9f35e5..9ce20e82af55f667d7101b4d782d65fe57c49325 100644
--- a/sickbeard/webapi.py
+++ b/sickbeard/webapi.py
@@ -1383,6 +1383,7 @@ class CMD_PostProcess(ApiCall):
         self.process_method, args = self.check_params(args, kwargs, "process_method", False, False,
                                                       "string", ["copy", "symlink", "hardlink", "move"])
         self.is_priority, args = self.check_params(args, kwargs, "is_priority", 0, False, "bool", [])
+        self.failed, args = self.check_params(args, kwargs, "failed", 0, False, "bool", [])
         self.type, args = self.check_params(args, kwargs, "type", "auto", None, "string",
                                             ["auto", "manual"])
         # super, missing, help
@@ -1400,7 +1401,7 @@ class CMD_PostProcess(ApiCall):
             self.type = 'manual'
 
         data = processTV.processDir(self.path, process_method=self.process_method, force=self.force_replace,
-                                    is_priority=self.is_priority, failed=False, type=self.type)
+                                    is_priority=self.is_priority, failed=self.failed, type=self.type)
 
         if not self.return_data:
             data = ""
diff --git a/sickbeard/webserve.py b/sickbeard/webserve.py
index 9d26a00268341e90702b1ff92cacda9f94e4e300..c1fff3b24b00db62dbcd30ff527e0f74f6f5d070 100644
--- a/sickbeard/webserve.py
+++ b/sickbeard/webserve.py
@@ -992,13 +992,21 @@ class Home(WebRoot):
             return '{"message": "Unable to find NMJ Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % {
                 "dbloc": dbloc}
    
+    def getTraktToken(self, trakt_pin=None):
+        
+        trakt_api = TraktAPI(sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)      
+        response = trakt_api.traktToken(trakt_pin)
+        if response:
+            return "Trakt Authorized"
+        return "Trakt Not Authorized!"
+ 
     def testTrakt(self, username=None, password=None, disable_ssl=None, blacklist_name=None):
         # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
         if disable_ssl == 'true':
             disable_ssl = True
         else:
             disable_ssl = False
-        return notifiers.trakt_notifier.test_notify(username, password, disable_ssl, blacklist_name)
+        return notifiers.trakt_notifier.test_notify(username, disable_ssl, blacklist_name)
 
 
     def loadShowNotifyLists(self):
@@ -2372,7 +2380,7 @@ class HomeAddShows(Home):
 
         t.trending_shows = []
         
-        trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
+        trakt_api = TraktAPI(sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
 
         try:
             not_liked_show = ""
@@ -2433,7 +2441,7 @@ class HomeAddShows(Home):
 
         t.trending_shows = []
 
-        trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
+        trakt_api = TraktAPI(sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
 
         try:
             not_liked_show = ""
@@ -2485,7 +2493,7 @@ class HomeAddShows(Home):
             ]
         }
 
-        trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
+        trakt_api = TraktAPI(sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
 
         result=trakt_api.traktRequest("users/" + sickbeard.TRAKT_USERNAME + "/lists/" + sickbeard.TRAKT_BLACKLIST_NAME + "/items", data, method='POST')
 
@@ -4626,7 +4634,7 @@ class ConfigNotifications(Config):
                           libnotify_notify_onsubtitledownload=None,
                           use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
                           use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
-                          use_trakt=None, trakt_username=None, trakt_password=None,
+                          use_trakt=None, trakt_username=None, trakt_pin=None,
                           trakt_remove_watchlist=None, trakt_sync_watchlist=None, trakt_method_add=None,
                           trakt_start_paused=None, trakt_use_recommended=None, trakt_sync=None, trakt_sync_remove=None,
                           trakt_default_indexer=None, trakt_remove_serieslist=None, trakt_disable_ssl_verify=None, trakt_timeout=None, trakt_blacklist_name=None,
@@ -4746,7 +4754,6 @@ class ConfigNotifications(Config):
 
         config.change_USE_TRAKT(use_trakt)
         sickbeard.TRAKT_USERNAME = trakt_username
-        sickbeard.TRAKT_PASSWORD = trakt_password
         sickbeard.TRAKT_REMOVE_WATCHLIST = config.checkbox_to_value(trakt_remove_watchlist)
         sickbeard.TRAKT_REMOVE_SERIESLIST = config.checkbox_to_value(trakt_remove_serieslist)
         sickbeard.TRAKT_SYNC_WATCHLIST = config.checkbox_to_value(trakt_sync_watchlist)