diff --git a/BUILD.bazel b/BUILD.bazel index 087f32e18..9af528cd0 100755 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -29,9 +29,7 @@ py_library( ], ) -load("@subpar//:subpar.bzl", "par_binary") - -par_binary( +py_binary( name = "appender", srcs = ["tools/docker_appender_.py"], main = "tools/docker_appender_.py", @@ -39,7 +37,7 @@ par_binary( deps = [":containerregistry"], ) -par_binary( +py_binary( name = "puller", srcs = ["tools/fast_puller_.py"], main = "tools/fast_puller_.py", @@ -47,7 +45,7 @@ par_binary( deps = [":containerregistry"], ) -par_binary( +py_binary( name = "flatten", srcs = ["tools/fast_flatten_.py"], main = "tools/fast_flatten_.py", @@ -55,7 +53,7 @@ par_binary( deps = [":containerregistry"], ) -par_binary( +py_binary( name = "importer", srcs = ["tools/fast_importer_.py"], main = "tools/fast_importer_.py", @@ -63,7 +61,7 @@ par_binary( deps = [":containerregistry"], ) -par_binary( +py_binary( name = "pusher", srcs = ["tools/fast_pusher_.py"], main = "tools/fast_pusher_.py", @@ -71,7 +69,7 @@ par_binary( deps = [":containerregistry"], ) -par_binary( +py_binary( name = "digester", srcs = ["tools/image_digester_.py"], main = "tools/image_digester_.py", @@ -119,3 +117,14 @@ sh_test( ":testenv.sh", ], ) + +py_test( + name = "client_v2_2_unit_tests", + size = "large", + srcs = [ + "client_v2_2_unit_tests.py", + ":containerregistry", + ], + main = "client_v2_2_unit_tests.py", +) + diff --git a/client/docker_creds_.py b/client/docker_creds_.py index b320d04e3..fb375ab3f 100755 --- a/client/docker_creds_.py +++ b/client/docker_creds_.py @@ -92,6 +92,29 @@ def suffix(self): p = self.password.encode('utf8') return base64.b64encode(u + b':' + p).decode('utf8') +class IdentityToken(SchemeProvider): + """Implementation for providing ID token credentials.""" + + def __init__(self, token): + super(IdentityToken, self).__init__('Basic') + self._password = token + + @property + def username(self): + # MSFT for some reason requires the user name to be set to this value when using an identity token + # https://learn.microsoft.com/en-us/azure/container-registry/container-registry-authentication?tabs=azure-cli + return '00000000-0000-0000-0000-000000000000' + + @property + def password(self): + return self._password + + @property + def suffix(self): + u = self.username.encode('utf8') + p = self.password.encode('utf8') + return base64.b64encode(u + b':' + p).decode('utf8') + _USERNAME = '_token' @@ -278,14 +301,16 @@ def Resolve(self, name): for form in _FORMATS: if form % name.registry in auths: entry = auths[form % name.registry] - if 'auth' in entry: + if 'identitytoken' in entry: + token = entry['identitytoken'] + return IdentityToken(token) + elif 'auth' in entry: decoded = base64.b64decode(entry['auth']).decode('utf8') username, password = decoded.split(':', 1) return Basic(username, password) elif 'username' in entry and 'password' in entry: return Basic(entry['username'], entry['password']) else: - # TODO(user): Support identitytoken # TODO(user): Support registrytoken raise Exception( 'Unsupported entry in "auth" section of Docker config: ' + diff --git a/client/v2_2/docker_http_.py b/client/v2_2/docker_http_.py index b3462c76b..e00626c6f 100755 --- a/client/v2_2/docker_http_.py +++ b/client/v2_2/docker_http_.py @@ -335,7 +335,8 @@ def Request(self, method = None, body = None, content_type = None, - accepted_mimes = None + accepted_mimes = None, + additional_headers = {} ): """Wrapper containing much of the boilerplate REST logic for Registry calls. @@ -347,7 +348,8 @@ def Request(self, body: the body to pass into the PUT request (or None for GET) content_type: the mime-type of the request (or None for JSON). content_type is ignored when body is None. - accepted_mimes: the list of acceptable mime-types + accepted_mimes: the list of acceptable mime-types. + additional_headers: additional headers to include in the request. Raises: BadStateException: an unexpected internal state has been encountered. @@ -382,6 +384,8 @@ def Request(self, if method in ('POST', 'PUT') and not body: headers['content-length'] = '0' + headers.update(additional_headers) + resp, content = self._transport.request( url, method, body=body, headers=headers) diff --git a/client/v2_2/docker_image_.py b/client/v2_2/docker_image_.py index 2025a5036..a56a6bc56 100755 --- a/client/v2_2/docker_image_.py +++ b/client/v2_2/docker_image_.py @@ -406,6 +406,18 @@ def is_compressed(name): return name[0:2] == b'\x1f\x8b' +# Python earlier than 3.7 has a gzip.GzipFile bug that does not support writing +# content longer than 2^31 bytes. To work around this, we write the content in +# smaller chunks if exceed size limit. +def _write_large_content_to_zipped_file(zipped, content, chunk_size=2**31-1): + if len(content) > chunk_size: + # Write the content in chunks + for i in range(0, len(content), chunk_size): + zipped.write(content[i:i+chunk_size]) + else: + zipped.write(content) + + class FromTarball(DockerImage): """This decodes the image tarball output of docker_build for upload.""" @@ -458,7 +470,7 @@ def _content(self, zipped = gzip.GzipFile( mode='wb', compresslevel=self._compresslevel, fileobj=buf) try: - zipped.write(content) + _write_large_content_to_zipped_file(zipped, content) finally: zipped.close() content = buf.getvalue() @@ -799,18 +811,18 @@ def __exit__(self, unused_type, unused_value, unused_traceback): pass -def _in_whiteout_dir(fs, name): +def _in_whiteout_dir(fs, opaque_whiteouts, name): while name: dirname = os.path.dirname(name) if name == dirname: break - if fs.get(dirname): + if fs.get(dirname) or dirname in opaque_whiteouts: return True name = dirname return False - _WHITEOUT_PREFIX = '.wh.' +_OPAQUE_WHITEOUT_FILENAME = '.wh..wh..opq' def extract(image, tar): @@ -824,17 +836,27 @@ def extract(image, tar): # to whether they are a tombstone or not. fs = {} + opaque_whiteouts_in_higher_layers = set() + # Walk the layers, topmost first and add files. If we've seen them in a # higher layer then we skip them for layer in image.diff_ids(): buf = io.BytesIO(image.uncompressed_layer(layer)) with tarfile.open(mode='r:', fileobj=buf) as layer_tar: + opaque_whiteouts_in_this_layer = [] for tarinfo in layer_tar: # If we see a whiteout file, then don't add anything to the tarball # but ensure that any lower layers don't add a file with the whited # out name. basename = os.path.basename(tarinfo.name) dirname = os.path.dirname(tarinfo.name) + + # If we see an opaque whiteout file, then don't add anything to the + # tarball but ensure that any lower layers don't add files or + # directories which are siblings of the whiteout file. + if basename == _OPAQUE_WHITEOUT_FILENAME: + opaque_whiteouts_in_this_layer.append(dirname) + tombstone = basename.startswith(_WHITEOUT_PREFIX) if tombstone: basename = basename[len(_WHITEOUT_PREFIX):] @@ -846,7 +868,7 @@ def extract(image, tar): continue # Check for a whited out parent directory - if _in_whiteout_dir(fs, name): + if _in_whiteout_dir(fs, opaque_whiteouts_in_higher_layers, name): continue # Mark this file as handled by adding its name. @@ -858,3 +880,4 @@ def extract(image, tar): tar.addfile(tarinfo, fileobj=layer_tar.extractfile(tarinfo)) else: tar.addfile(tarinfo, fileobj=None) + opaque_whiteouts_in_higher_layers.update(opaque_whiteouts_in_this_layer) diff --git a/client/v2_2/docker_session_.py b/client/v2_2/docker_session_.py index 7aff6ae35..d19cdc152 100755 --- a/client/v2_2/docker_session_.py +++ b/client/v2_2/docker_session_.py @@ -32,6 +32,13 @@ import six.moves.urllib.parse +# 200 MB chunk balances performance in poor networking conditions. Should only be used for slower registries. +# We need to use 2GB for now for CMv2 to avoid hitting the partial upload api +# GCR registry unexpectedly drops partial upload connections (us-central1-docker.pkg.dev) +UPLOAD_CHUNK_SIZE_MAX = int(2e9) +UPLOAD_CHUNK_SIZE_MIN = int(2e7) + + def _tag_or_digest(name): if isinstance(name, docker_name.Tag): return name.tag @@ -48,7 +55,8 @@ def __init__(self, creds, transport, mount = None, - threads = 1): + threads = 1, + chunk_size = UPLOAD_CHUNK_SIZE_MAX): """Constructor. If multiple threads are used, the caller *must* ensure that the provided @@ -70,6 +78,7 @@ def __init__(self, docker_http.PUSH) self._mount = mount self._threads = threads + self._chunk_size = chunk_size def _scheme_and_host(self): return '{scheme}://{registry}'.format( @@ -150,10 +159,61 @@ def _put_upload(self, image, digest): method='PUT', body=self._get_blob(image, digest), accepted_codes=[six.moves.http_client.CREATED]) + + # pylint: disable=missing-docstring + def _patch_chunked_upload_image_body(self, image_body, digest): + if len(image_body) == 0: + raise ValueError('Empty image body') + + # See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pushing-a-blob-in-chunks + + mounted, location = self._start_upload(digest, self._mount) + + if mounted: + logging.info('Layer %s mounted.', digest) + return + + location = self._get_absolute_url(location) + + # Upload the content in chunks. Invoked at least once to get the response. + for i in range(0, len(image_body), self._chunk_size): + chunk = image_body[i:i + self._chunk_size] + chunk_start, chunk_end_inclusive = i, i + len(chunk) - 1 + logging.info('Pushing chunk(%d) for layer %s', i, digest) + resp, unused_content = self._transport.Request( + location, + method='PATCH', + body=chunk, + content_type='application/octet-stream', + additional_headers={ + 'Content-Range': '{start}-{end}'.format(start=chunk_start, end=chunk_end_inclusive) + }, + accepted_codes=[ + six.moves.http_client.NO_CONTENT, six.moves.http_client.ACCEPTED, + six.moves.http_client.CREATED + ]) + # Need to use the new location in the response. + location = self._get_absolute_url(resp.get('location')) + + location = self._add_digest(resp['location'], digest) + location = self._get_absolute_url(location) + self._transport.Request( + location, + method='PUT', + body=None, + accepted_codes=[six.moves.http_client.CREATED]) # pylint: disable=missing-docstring def _patch_upload(self, image, digest): + image_body = self._get_blob(image, digest) + # When the layer is too large, registry might reject. + # In this case, we need to do chunk upload. + if len(image_body) > self._chunk_size: + logging.info('Uploading layer %s in chunks.', digest) + self._patch_chunked_upload_image_body(image_body, digest) + return + mounted, location = self._start_upload(digest, self._mount) if mounted: @@ -165,7 +225,7 @@ def _patch_upload(self, image, resp, unused_content = self._transport.Request( location, method='PATCH', - body=self._get_blob(image, digest), + body=image_body, content_type='application/octet-stream', accepted_codes=[ six.moves.http_client.NO_CONTENT, six.moves.http_client.ACCEPTED, @@ -199,6 +259,14 @@ def _put_blob(self, image, digest): # POST /v2//blobs/uploads/ (no body*) # PATCH /v2//blobs/uploads/ (full body) # PUT /v2//blobs/uploads/ (no body) + # self._patch_upload(image, digest) + # + # When the layer is too large (> INT_MAX), we need to do chunk upload. + # POST /v2//blobs/uploads/ (no body*) + # PATCH /v2//blobs/uploads/ (body chunk 1) + # PATCH /v2//blobs/uploads/ (body chunk ...) + # PUT /v2//blobs/uploads/ (no body) + # self._patch_chunked_upload_image_body(image_body, digest) # # * We attempt to perform a cross-repo mount if any repositories are # specified in the "mount" parameter. This does a fast copy from a diff --git a/client/v2_2/save_.py b/client/v2_2/save_.py index 453692fc8..6eb908f81 100755 --- a/client/v2_2/save_.py +++ b/client/v2_2/save_.py @@ -144,7 +144,8 @@ def tarball(name, image, def fast(image, directory, threads = 1, - cache_directory = None): + cache_directory = None, + first_layer = 0): """Produce a FromDisk compatible file layout under the provided directory. After calling this, the following filesystem will exist: @@ -227,6 +228,10 @@ def valid(cached_layer, digest): idx = 0 layers = [] for blob in reversed(image.fs_layers()): + if idx < first_layer: + idx += 1 + continue + # Create a local copy layer_name = os.path.join(directory, '%03d.tar.gz' % idx) digest_name = os.path.join(directory, '%03d.sha256' % idx) diff --git a/client/v2_2/v2_compat_.py b/client/v2_2/v2_compat_.py index 322b4c42d..75ff42510 100755 --- a/client/v2_2/v2_compat_.py +++ b/client/v2_2/v2_compat_.py @@ -20,6 +20,7 @@ import json +import concurrent.futures from containerregistry.client.v2 import docker_image as v2_image from containerregistry.client.v2 import util as v2_util from containerregistry.client.v2_2 import docker_digest @@ -108,13 +109,18 @@ def _ProcessImage(self): raw_manifest_schema1 = self._v2_image.manifest() manifest_schema1 = json.loads(raw_manifest_schema1) + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor: + diff_id_futures = [ + executor.submit(self._GetDiffId, digest) + for digest in reversed(self._v2_image.fs_layers()) + ] + + diff_ids = [f.result() for f in diff_id_futures] + self._config_file = config_file([ json.loads(history.get('v1Compatibility', '{}')) for history in reversed(manifest_schema1.get('history', [])) - ], [ - self._GetDiffId(digest) - for digest in reversed(self._v2_image.fs_layers()) - ]) + ], diff_ids) config_bytes = self._config_file.encode('utf8') config_descriptor = { diff --git a/client_v2_2_unit_tests.py b/client_v2_2_unit_tests.py new file mode 100644 index 000000000..55792c41d --- /dev/null +++ b/client_v2_2_unit_tests.py @@ -0,0 +1,167 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import io +from StringIO import StringIO +import tarfile +import unittest + +from containerregistry.client.v2_2 import docker_image as v2_2_image + + +class MockImage(object): + """Mock of DockerImage, implementing only the methods called by extract().""" + + def __init__(self): + self._fs_layers = OrderedDict() + + def add_layer(self, filenames): + """Add a layer to the image. + + Args: + filenames: a list of filenames or (filename, content) pairs. Filenames + with trailing slashes become directory entries in the generated tar + """ + buf = io.BytesIO() + with tarfile.open(mode='w:', fileobj=buf) as tf: + for entry in filenames: + if (isinstance(entry, basestring)): + name = entry + content = "" + else: + (name, content) = entry + tarinfo = tarfile.TarInfo(name) + tarinfo.size = len(content) + if name.endswith("/"): + tarinfo.type = tarfile.DIRTYPE + tf.addfile(tarinfo, fileobj=(StringIO(content) if content else None)) + buf.seek(0) + new_layer_id = str(len(self._fs_layers)) + self._fs_layers[new_layer_id] = buf.getvalue() + + def diff_ids(self): + return reversed(self._fs_layers.keys()) + + def uncompressed_layer(self, layer_id): + return self._fs_layers[layer_id] + + +class TestExtract(unittest.TestCase): + + def _test_flatten(self, layer_filenames, expected_flattened_output): + # Construct a mock DockerImage with the specified layers: + img = MockImage() + for filenames in layer_filenames: + img.add_layer(filenames) + buf = io.BytesIO() + + # Run the actual extract logic: + with tarfile.open(mode='w:', fileobj=buf) as tar: + v2_2_image.extract(img, tar) + + # Compare the extract() output to the expected results: + buf.seek(0) + flattened_output = [] + with tarfile.open(mode='r', fileobj=buf) as tar: + for tarinfo in tar: + if tarinfo.isdir(): + flattened_output.append(tarinfo.name + "/") + else: + contents = tar.extractfile(tarinfo).read() + if contents: + flattened_output.append((tarinfo.name, contents)) + else: + flattened_output.append(tarinfo.name) + self.assertEqual(flattened_output, expected_flattened_output) + + def test_single_layer(self): + self._test_flatten( + [["/directory/", "/file"]], + ["/directory/", "/file"] + ) + + def test_purely_additive_layers(self): + self._test_flatten( + [ + ["dir/", "dir/file1", "file"], + ["dir/file2", "file2"] + ], + ["dir/file2", "file2", "dir/", "dir/file1", "file"] + ) + + def test_highest_layer_of_file_takes_precedence(self): + self._test_flatten( + [ + [("file", "a")], + [("file", "b")] + ], + [("file", "b")] + ) + + def test_single_file_whiteout(self): + self._test_flatten( + [ + ["/foo"], + ["/.wh.foo"] + ], + [] + ) + + def test_parent_directory_whiteout(self): + self._test_flatten( + [ + ["/x/a/", "/x/b/", "/x/b/1"], + ["/x/.wh.b"] + ], + ["/x/a/"] + ) + + def test_opaque_whiteout(self): + # Example from https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts + self._test_flatten( + [ + ["a/", "a/b/", "a/b/c/", "a/b/c/bar"], + ["a/", "a/.wh..wh..opq", "a/b/", "a/b/c/", "a/b/c/foo"], + ], + ["a/", "a/b/", "a/b/c/", "a/b/c/foo"], + ) + + self._test_flatten( + [ + ["a/", "a/b/", "a/b/c/", "a/b/c/bar"], + ["a/", "a/b/", "a/b/c/", "a/b/c/foo", "a/.wh..wh..opq"], + ], + ["a/", "a/b/", "a/b/c/", "a/b/c/foo"], + ) + + def test_opaque_whiteout_preserves_parent_directory(self): + # Example from https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts + self._test_flatten( + [ + [ + "bin/", + "bin/my-app-binary", + "bin/my-app-tools", + "bin/tools/", + "bin/tools/my-app-tool-one" + ], + ["bin/.wh..wh..opq"], + ], + ["bin/"], + ) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/tools/fast_puller_.py b/tools/fast_puller_.py index d7601d2bc..75e790146 100755 --- a/tools/fast_puller_.py +++ b/tools/fast_puller_.py @@ -63,6 +63,19 @@ parser.add_argument( '--cache', action='store', help='Image\'s files cache directory.') +parser.add_argument('--allow-v2', action='store_true', + help='Allow pulling V2 Images') + +parser.add_argument('--first-layer', action='store', type=int, + help=('WIP')) + +parser.add_argument('--certificates', nargs='*', help='A comma separated ' + + 'tuple of key file, cert, and domain. (From httplib2 ' + + 'docs) Add a key and cert that will be used for an SSL ' + + 'connection to the specified domain. keyfile is the name ' + + 'of a PEM formatted file that contains your private key. ' + + 'certfile is a PEM formatted certificate chain file.') + _THREADS = 8 @@ -75,6 +88,12 @@ def main(): retry_factory = retry_factory.WithSourceTransportCallable(httplib2.Http) transport = transport_pool.Http(retry_factory.Build, size=_THREADS) + if args.certificates: + for item in args.certificates: + logging.info('Adding certificate %s', item) + key, cert, domain = item.split(',') + transport.add_certificate(key, cert, domain) + if '@' in args.name: name = docker_name.Digest(args.name) else: @@ -125,9 +144,14 @@ def main(): v2_2_img, args.directory, threads=_THREADS, - cache_directory=args.cache) + cache_directory=args.cache, + first_layer=args.first_layer) return + if not args.allow_v2: + logging.fatal('v2.2 image not found: %r', name) + sys.exit(1) + logging.info('Pulling v2 image from %r ...', name) with v2_image.FromRegistry(name, creds, transport) as v2_img: with v2_compat.V22FromV2(v2_img) as v2_2_img: diff --git a/tools/fast_pusher_.py b/tools/fast_pusher_.py index c956551b0..3b6a18a5e 100755 --- a/tools/fast_pusher_.py +++ b/tools/fast_pusher_.py @@ -24,7 +24,9 @@ import argparse import logging +logging.getLogger().setLevel(logging.INFO) import sys +import os from containerregistry.client import docker_creds from containerregistry.client import docker_name @@ -39,7 +41,6 @@ import httplib2 from six.moves import zip # pylint: disable=redefined-builtin - parser = argparse.ArgumentParser( description='Push images to a Docker Registry, faaaaaast.') @@ -86,6 +87,21 @@ help='The path to the directory where the client configuration files are ' 'located. Overiddes the value from DOCKER_CONFIG') +parser.add_argument('--certificates', nargs='*', help='A comma separated ' + + 'tuple of key file, cert, and domain. (From httplib2 ' + + 'docs) Add a key and cert that will be used for an SSL ' + + 'connection to the specified domain. keyfile is the name ' + + 'of a PEM formatted file that contains your private key. ' + + 'certfile is a PEM formatted certificate chain file. ' + + 'If the key/cert does not exist it will be ignored.') + +parser.add_argument( + '--chunk-size', + type=int, + default=docker_session.UPLOAD_CHUNK_SIZE_MAX, + required=False, + help='The size of the upload chunk in bytes. Defaults to 2e9 (2 GB).') + _THREADS = 8 @@ -160,6 +176,33 @@ def main(): retry_factory = retry_factory.WithSourceTransportCallable(httplib2.Http) transport = transport_pool.Http(retry_factory.Build, size=_THREADS) + if args.certificates: + found_one = False + + for item in args.certificates: + logging.info('Adding certificate %s', item) + key, cert, domain = item.split(',') + # httplib2 does not like taking cert files that do not exist, so check that they exist + if os.path.isfile(key) and os.path.isfile(cert): + transport.add_certificate(key, cert, domain) + test_cert_response = transport.request("https://" + domain + "/certtestignore", "POST", body="test") + # If the return value is success, redirect, or page not found we know that we are authorized, + # but that the image /certtestignore is not a real image. Which is expected. + if test_cert_response[0].status == 200 or \ + test_cert_response[0].status == 201 or \ + test_cert_response[0].status == 307 or \ + test_cert_response[0].status == 404: + found_one = True + break; + transport = transport_pool.Http(retry_factory.Build, size=_THREADS) + + if not found_one: + logging.fatal("Local kube cert expired/is not present. Please run './eng-tools/bin/get-kube-access dev'") + sys.exit(1) + + if args.chunk_size < docker_session.UPLOAD_CHUNK_SIZE_MIN or args.chunk_size > docker_session.UPLOAD_CHUNK_SIZE_MAX: + logging.warning('The upload chunk size ' + str(args.chunk_size) + ' is not within [20MB, 2GB] range. This may cause performance issues.') + logging.info('Loading v2.2 image from disk ...') with v2_2_image.FromDisk( config, @@ -177,7 +220,7 @@ def main(): try: with docker_session.Push( - name, creds, transport, threads=_THREADS) as session: + name, creds, transport, threads=_THREADS, chunk_size=args.chunk_size) as session: logging.info('Starting upload ...') if args.oci: with oci_compat.OCIFromV22(v2_2_img) as oci_img: diff --git a/transport/retry_.py b/transport/retry_.py index 2224e2b56..e6722b1a3 100755 --- a/transport/retry_.py +++ b/transport/retry_.py @@ -17,6 +17,8 @@ import logging import time +import socket +import errno from containerregistry.transport import nested @@ -28,11 +30,14 @@ DEFAULT_BACKOFF_FACTOR = 0.5 RETRYABLE_EXCEPTION_TYPES = [ six.moves.http_client.IncompleteRead, - six.moves.http_client.ResponseNotReady + six.moves.http_client.ResponseNotReady, + socket.timeout, ] def ShouldRetry(err): + if isinstance(err, OSError) and err.errno == errno.ETIMEDOUT: + return True for exception_type in RETRYABLE_EXCEPTION_TYPES: if isinstance(err, exception_type): return True diff --git a/transport/transport_pool_.py b/transport/transport_pool_.py index 15fcf4fc5..23702c98f 100755 --- a/transport/transport_pool_.py +++ b/transport/transport_pool_.py @@ -48,6 +48,18 @@ def _return_transport(self, transport): # We returned an item, notify a waiting thread. self._condition.notify(n=1) + def add_certificate(self, key, cert, domain): + """Adds a certificate to all of the underlying transports. + + From httplib2 docs: + + Add a key and cert that will be used for an SSL connection to the + specified domain. keyfile is the name of a PEM formatted file that contains + your private key. certfile is a PEM formatted certificate chain file. + """ + for transport in self._transports: + transport.add_certificate(key, cert, domain) + def request(self, *args, **kwargs): """This awaits a transport and delegates the request call.