# HG changeset patch # User Laurent Peuch <cortex@worlddomination.be> # Date 1671681275 -3600 # Thu Dec 22 04:54:35 2022 +0100 # Node ID dde488a18eb129c423502f87b8e059864a75fb16 # Parent e8e692f7c946b81fbcf9cb61b42f9a42a0e0925b style: black diff --git a/cubicweb_s3storage/__pkginfo__.py b/cubicweb_s3storage/__pkginfo__.py --- a/cubicweb_s3storage/__pkginfo__.py +++ b/cubicweb_s3storage/__pkginfo__.py @@ -2,28 +2,28 @@ """cubicweb-s3storage application packaging information""" -modname = 'cubicweb_s3storage' -distname = 'cubicweb-s3storage' +modname = "cubicweb_s3storage" +distname = "cubicweb-s3storage" numversion = (3, 3, 0) -version = '.'.join(str(num) for num in numversion) +version = ".".join(str(num) for num in numversion) -license = 'LGPL' -author = 'LOGILAB S.A. (Paris, FRANCE)' -author_email = 'contact@logilab.fr' -description = 'A Cubicweb Storage that stores the data on S3' -web = 'https://forge.extranet.logilab.fr/cubicweb/cubes/%s' % distname +license = "LGPL" +author = "LOGILAB S.A. (Paris, FRANCE)" +author_email = "contact@logilab.fr" +description = "A Cubicweb Storage that stores the data on S3" +web = "https://forge.extranet.logilab.fr/cubicweb/cubes/%s" % distname __depends__ = { - 'cubicweb': ">= 3.24.7, < 3.38.0", - 'six': '>= 1.4.0', - 'boto3': None, + "cubicweb": ">= 3.24.7, < 3.38.0", + "six": ">= 1.4.0", + "boto3": None, } __recommends__ = {} classifiers = [ - 'Environment :: Web Environment', - 'Framework :: CubicWeb', - 'Programming Language :: Python', - 'Programming Language :: JavaScript', + "Environment :: Web Environment", + "Framework :: CubicWeb", + "Programming Language :: Python", + "Programming Language :: JavaScript", ] diff --git a/cubicweb_s3storage/migration/postcreate.py b/cubicweb_s3storage/migration/postcreate.py --- a/cubicweb_s3storage/migration/postcreate.py +++ b/cubicweb_s3storage/migration/postcreate.py @@ -22,4 +22,4 @@ """ # Example of site property change -#set_property('ui.site-title', "<sitename>") +# set_property('ui.site-title', "<sitename>") diff --git a/cubicweb_s3storage/site_cubicweb.py b/cubicweb_s3storage/site_cubicweb.py --- a/cubicweb_s3storage/site_cubicweb.py +++ b/cubicweb_s3storage/site_cubicweb.py @@ -4,31 +4,43 @@ from cubicweb import Binary options = ( - ('s3-auto-delete', { - 'type': 'yn', - 'default': True, - 'help': 'Delete S3 objects on entity deletion', - 'group': 's3', - 'level': 2}), - ('s3-transaction-suffix-key', { - 'type': 'yn', - 'default': False, - 'help': 'Add a temporary suffix to S3 keys during transaction', - 'group': 's3', - 'level': 2}), - ('s3-activate-object-versioning', { - 'type': 'yn', - 'default': False, - 'help': 'store the objects version-id in database', - 'group': 's3', - 'level': 2}), + ( + "s3-auto-delete", + { + "type": "yn", + "default": True, + "help": "Delete S3 objects on entity deletion", + "group": "s3", + "level": 2, + }, + ), + ( + "s3-transaction-suffix-key", + { + "type": "yn", + "default": False, + "help": "Add a temporary suffix to S3 keys during transaction", + "group": "s3", + "level": 2, + }, + ), + ( + "s3-activate-object-versioning", + { + "type": "yn", + "default": False, + "help": "store the objects version-id in database", + "group": "s3", + "level": 2, + }, + ), ) class STKEY(FunctionDescr): - """return the S3 key of the bytes attribute stored using the S3 Storage (s3s) - """ - rtype = 'Bytes' + """return the S3 key of the bytes attribute stored using the S3 Storage (s3s)""" + + rtype = "Bytes" def update_cb_stack(self, stack): assert len(stack) == 1 @@ -36,8 +48,9 @@ def as_sql(self, backend, args): raise NotImplementedError( - 'This callback is only available for S3Storage ' - 'managed attribute. Is STKEY() argument S3S managed?') + "This callback is only available for S3Storage " + "managed attribute. Is STKEY() argument S3S managed?" + ) def source_execute(self, source, session, value): s3key = source.binary_to_str(value) diff --git a/cubicweb_s3storage/storages.py b/cubicweb_s3storage/storages.py --- a/cubicweb_s3storage/storages.py +++ b/cubicweb_s3storage/storages.py @@ -33,27 +33,27 @@ class S3Storage(Storage): is_source_callback = True - KEY_SEPARATOR = '#' + KEY_SEPARATOR = "#" - def __init__(self, bucket, suffix='.tmp'): + def __init__(self, bucket, suffix=".tmp"): self.s3cnx = self._s3_client() self.bucket = bucket self.suffix = suffix @classmethod def _s3_client(cls): - endpoint_url = os.environ.get('AWS_S3_ENDPOINT_URL') + endpoint_url = os.environ.get("AWS_S3_ENDPOINT_URL") if endpoint_url: - cls.debug('Using custom S3 endpoint url {}'.format(endpoint_url)) - return boto3.client('s3', - endpoint_url=endpoint_url) + cls.debug("Using custom S3 endpoint url {}".format(endpoint_url)) + return boto3.client("s3", endpoint_url=endpoint_url) def callback(self, source, cnx, value): - """see docstring for prototype, which vary according to is_source_callback - """ - key = source.binary_to_str(value).decode('utf-8') - if cnx.repo.config['s3-transaction-suffix-key'] \ - and cnx.commit_state == 'precommit': + """see docstring for prototype, which vary according to is_source_callback""" + key = source.binary_to_str(value).decode("utf-8") + if ( + cnx.repo.config["s3-transaction-suffix-key"] + and cnx.commit_state == "precommit" + ): # download suffixed key if it exists # FIXME need a way to check that the attribute is actually edited try: @@ -69,12 +69,12 @@ def entity_added(self, entity, attr): """an entity using this storage for attr has been added""" - if entity._cw.transaction_data.get('fs_importing'): + if entity._cw.transaction_data.get("fs_importing"): # fs_importing allows to change S3 key saved in database entity._cw_dont_cache_attribute(attr, repo_side=True) key = entity.cw_edited[attr].getvalue() if PY3: - key = key.decode('utf-8') + key = key.decode("utf-8") try: return self.get_s3_object(entity._cw, key) except Exception: @@ -96,27 +96,30 @@ binary.seek(0) buffer = Binary(binary.read()) binary.seek(0) - if entity._cw.repo.config['s3-transaction-suffix-key']: + if entity._cw.repo.config["s3-transaction-suffix-key"]: upload_key = self.suffixed_key(key) else: upload_key = key extra_args = self.get_upload_extra_args(entity, attr, key) - put_object_result = self.s3cnx.put_object(Body=buffer, - Bucket=self.bucket, - Key=upload_key, - **extra_args) + put_object_result = self.s3cnx.put_object( + Body=buffer, Bucket=self.bucket, Key=upload_key, **extra_args + ) buffer.close() - version_id = put_object_result.get('VersionId', None) + version_id = put_object_result.get("VersionId", None) # save S3 key entity = self.save_s3_key(entity, attr, upload_key, version_id) # when key is suffixed, move to final key in post commit event # remove temporary key on rollback - S3AddFileOp.get_instance(entity._cw).add_data( - (self, key, entity.eid, attr)) - self.info('Uploaded %s.%s (%s/%s) to S3', - entity.eid, attr, self.bucket, upload_key) + S3AddFileOp.get_instance(entity._cw).add_data((self, key, entity.eid, attr)) + self.info( + "Uploaded %s.%s (%s/%s) to S3", + entity.eid, + attr, + self.bucket, + upload_key, + ) if oldkey is not None and oldkey != key: # remove unneeded old key self.delay_deletion(entity, attr, oldkey) @@ -143,12 +146,18 @@ self.delay_deletion(entity, attr, key) def delay_deletion(self, entity, attr, key): - if entity._cw.repo.config['s3-auto-delete']: + if entity._cw.repo.config["s3-auto-delete"]: # delete key in a post commit event S3DeleteFileOp.get_instance(entity._cw).add_data( - (self, key, entity.eid, attr)) - self.info('Delaying deletion for %s.%s (%s/%s) in S3', - entity.eid, attr, self.bucket, key) + (self, key, entity.eid, attr) + ) + self.info( + "Delaying deletion for %s.%s (%s/%s) in S3", + entity.eid, + attr, + self.bucket, + key, + ) def migrate_entity(self, entity, attribute): """migrate an entity attribute to the storage""" @@ -158,8 +167,7 @@ cnx = entity._cw source = cnx.repo.system_source attrs = source.preprocess_entity(entity) - sql = source.sqlgen.update('cw_' + entity.cw_etype, attrs, - ['cw_eid']) + sql = source.sqlgen.update("cw_" + entity.cw_etype, attrs, ["cw_eid"]) source.doexec(cnx, sql, attrs) entity.cw_edited = None @@ -168,11 +176,12 @@ Save the s3 key into the entity bytes attribute """ id_string = key - if entity._cw.repo.config['s3-activate-object-versioning'] and \ - version_id is not None: + if ( + entity._cw.repo.config["s3-activate-object-versioning"] + and version_id is not None + ): id_string = self.format_version_id_suffix(key, version_id) - entity.cw_edited.edited_attribute(attr, - Binary(id_string.encode('utf-8'))) + entity.cw_edited.edited_attribute(attr, Binary(id_string.encode("utf-8"))) return entity def parse_key(self, key): @@ -195,8 +204,8 @@ """ try: rset = entity._cw.execute( - 'Any stkey(D) WHERE X eid %s, X %s D' % - (entity.eid, attr)) + "Any stkey(D) WHERE X eid %s, X %s D" % (entity.eid, attr) + ) except NotImplementedError: # may occur when called from migrate_entity, ie. when the storage # has not yet been installed @@ -204,7 +213,7 @@ if rset and rset.rows[0][0]: key = rset.rows[0][0].getvalue() if PY3: - key = key.decode('utf-8') + key = key.decode("utf-8") return key return None @@ -224,9 +233,7 @@ get s3 stored attribute for key handle the case of versioned object """ - versioning_activated = cnx.repo.config[ - 's3-activate-object-versioning' - ] + versioning_activated = cnx.repo.config["s3-activate-object-versioning"] # check first : does the key contain a '<separator>' key, version_id = self.parse_key(key) @@ -242,15 +249,15 @@ https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_object """ result = self.s3cnx.get_object(Bucket=self.bucket, Key=key, **kwargs) - self.info('Downloaded %s/%s from S3', self.bucket, key) - return Binary(result['Body'].read()) + self.info("Downloaded %s/%s from S3", self.bucket, key) + return Binary(result["Body"].read()) class S3AddFileOp(DataOperationMixIn, LateOperation): containercls = list def postcommit_event(self): - if not self.cnx.repo.config['s3-transaction-suffix-key']: + if not self.cnx.repo.config["s3-transaction-suffix-key"]: return consumed_keys = set() for storage, key, eid, attr in self.get_data(): @@ -260,24 +267,34 @@ suffixed_key = storage.suffixed_key(key) storage.s3cnx.copy_object( Bucket=storage.bucket, - CopySource={'Bucket': storage.bucket, 'Key': suffixed_key}, - Key=key) - storage.s3cnx.delete_object( - Bucket=storage.bucket, Key=suffixed_key) - self.info('Moved temporary object for %s.%s (%s/%s to %s/%s)' - ' in S3', eid, attr, storage.bucket, suffixed_key, - storage.bucket, key) + CopySource={"Bucket": storage.bucket, "Key": suffixed_key}, + Key=key, + ) + storage.s3cnx.delete_object(Bucket=storage.bucket, Key=suffixed_key) + self.info( + "Moved temporary object for %s.%s (%s/%s to %s/%s)" " in S3", + eid, + attr, + storage.bucket, + suffixed_key, + storage.bucket, + key, + ) def rollback_event(self): for storage, key, eid, attr in self.get_data(): - if self.cnx.repo.config['s3-transaction-suffix-key']: + if self.cnx.repo.config["s3-transaction-suffix-key"]: upload_key = storage.suffixed_key(key) else: upload_key = key - storage.s3cnx.delete_object( - Bucket=storage.bucket, Key=upload_key) - self.info('Deleted temporary object for %s.%s (%s/%s) in S3', - eid, attr, storage.bucket, upload_key) + storage.s3cnx.delete_object(Bucket=storage.bucket, Key=upload_key) + self.info( + "Deleted temporary object for %s.%s (%s/%s) in S3", + eid, + attr, + storage.bucket, + upload_key, + ) class S3DeleteFileOp(DataOperationMixIn, LateOperation): @@ -285,14 +302,14 @@ def postcommit_event(self): for storage, key, eid, attr in self.get_data(): - self.info('Deleting object %s.%s (%s/%s) from S3', - eid, attr, storage.bucket, key) + self.info( + "Deleting object %s.%s (%s/%s) from S3", eid, attr, storage.bucket, key + ) resp = storage.s3cnx.delete_object(Bucket=storage.bucket, Key=key) - if resp.get('ResponseMetadata', {}).get('HTTPStatusCode') >= 300: - self.error('S3 object deletion FAILED: %s', resp) + if resp.get("ResponseMetadata", {}).get("HTTPStatusCode") >= 300: + self.error("S3 object deletion FAILED: %s", resp) else: - self.debug('S3 object deletion OK: %s', resp) + self.debug("S3 object deletion OK: %s", resp) -set_log_methods(S3Storage, - getLogger('cube.s3storage.storages.s3storage')) +set_log_methods(S3Storage, getLogger("cube.s3storage.storages.s3storage")) diff --git a/cubicweb_s3storage/testing.py b/cubicweb_s3storage/testing.py --- a/cubicweb_s3storage/testing.py +++ b/cubicweb_s3storage/testing.py @@ -5,16 +5,16 @@ class S3StorageTestMixin(object): - s3_bucket = 'test-bucket' + s3_bucket = "test-bucket" def setUp(self): s3_mock = mock_s3() s3_mock.start() - resource = boto3.resource('s3', region_name='us-east-1') + resource = boto3.resource("s3", region_name="us-east-1") self.s3_bucket = resource.create_bucket(Bucket=self.s3_bucket) patched_storage_s3_client = patch( - 'cubicweb_s3storage.storages.S3Storage._s3_client', - return_value=boto3.client('s3'), + "cubicweb_s3storage.storages.S3Storage._s3_client", + return_value=boto3.client("s3"), ) patched_storage_s3_client.start() self._mocks = [ diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -31,33 +31,34 @@ # load metadata from the __pkginfo__.py file so there is no risk of conflict # see https://packaging.python.org/en/latest/single_source_version.html -pkginfo = join(here, 'cubicweb_s3storage', '__pkginfo__.py') +pkginfo = join(here, "cubicweb_s3storage", "__pkginfo__.py") __pkginfo__ = {} with open(pkginfo) as f: exec(f.read(), __pkginfo__) # get required metadatas -distname = __pkginfo__['distname'] -version = __pkginfo__['version'] -license = __pkginfo__['license'] -description = __pkginfo__['description'] -web = __pkginfo__['web'] -author = __pkginfo__['author'] -author_email = __pkginfo__['author_email'] -classifiers = __pkginfo__['classifiers'] +distname = __pkginfo__["distname"] +version = __pkginfo__["version"] +license = __pkginfo__["license"] +description = __pkginfo__["description"] +web = __pkginfo__["web"] +author = __pkginfo__["author"] +author_email = __pkginfo__["author_email"] +classifiers = __pkginfo__["classifiers"] -with open(join(here, 'README.rst')) as f: +with open(join(here, "README.rst")) as f: long_description = f.read() # get optional metadatas -data_files = __pkginfo__.get('data_files', None) -dependency_links = __pkginfo__.get('dependency_links', ()) +data_files = __pkginfo__.get("data_files", None) +dependency_links = __pkginfo__.get("dependency_links", ()) requires = {} for entry in ("__depends__",): # "__recommends__"): requires.update(__pkginfo__.get(entry, {})) -install_requires = ["{0} {1}".format(d, v and v or "").strip() - for d, v in requires.items()] +install_requires = [ + "{0} {1}".format(d, v and v or "").strip() for d, v in requires.items() +] setup( @@ -70,12 +71,12 @@ author_email=author_email, url=web, classifiers=classifiers, - packages=find_packages(exclude=['test']), + packages=find_packages(exclude=["test"]), install_requires=install_requires, include_package_data=True, entry_points={ - 'cubicweb.cubes': [ - 's3storage=cubicweb_s3storage', + "cubicweb.cubes": [ + "s3storage=cubicweb_s3storage", ], }, zip_safe=False, diff --git a/test/data/hooks.py b/test/data/hooks.py --- a/test/data/hooks.py +++ b/test/data/hooks.py @@ -1,12 +1,11 @@ - from cubicweb.server.hook import Hook from cubicweb_s3storage.storages import S3Storage class S3StorageStartupHook(Hook): - __regid__ = 's3tests.server-startup-hook' - events = ('server_startup', 'server_maintenance') + __regid__ = "s3tests.server-startup-hook" + events = ("server_startup", "server_maintenance") - def __call__(self): - storage = S3Storage('test-bucket') - self.repo.system_source.set_storage('Image', 'data', storage) + def __call__(self): + storage = S3Storage("test-bucket") + self.repo.system_source.set_storage("Image", "data", storage) diff --git a/test/test_s3storage.py b/test/test_s3storage.py --- a/test/test_s3storage.py +++ b/test/test_s3storage.py @@ -11,84 +11,90 @@ from cubicweb_s3storage import testing -def create_image(cnx, data=b'the-data', **kwargs): - return cnx.create_entity('Image', data=Binary(data), **kwargs) +def create_image(cnx, data=b"the-data", **kwargs): + return cnx.create_entity("Image", data=Binary(data), **kwargs) class S3StorageVersionedTC(testing.S3StorageTestMixin, CubicWebTC): - def test_s3key_gen(self): self.s3_bucket.Versioning().enable() - self.repo.vreg.config['s3-activate-object-versioning'] = True - s3storage = self.repo.system_source.storage('Image', 'data') + self.repo.vreg.config["s3-activate-object-versioning"] = True + s3storage = self.repo.system_source.storage("Image", "data") with self.admin_access.client_cnx() as cnx: - fobj = create_image(cnx, b'some content') + fobj = create_image(cnx, b"some content") cnx.commit() eid = fobj.eid - k1 = s3storage.get_s3_key(fobj, 'data') + k1 = s3storage.get_s3_key(fobj, "data") with self.admin_access.client_cnx() as cnx: - fobj = cnx.find('Image', eid=eid).one() - k2 = s3storage.get_s3_key(fobj, 'data') + fobj = cnx.find("Image", eid=eid).one() + k2 = s3storage.get_s3_key(fobj, "data") self.assertEqual(k1, k2) def test_entity_create_versioning(self): self.s3_bucket.Versioning().enable() - self.repo.vreg.config['s3-activate-object-versioning'] = True + self.repo.vreg.config["s3-activate-object-versioning"] = True with self.admin_access.client_cnx() as cnx: - eid = create_image(cnx, b'some content').eid + eid = create_image(cnx, b"some content").eid cnx.commit() - key = cnx.execute('Any STKEY(D) WHERE X is Image, X data D, ' - 'X eid %(eid)s', {'eid': eid}).rows[0][0] + key = cnx.execute( + "Any STKEY(D) WHERE X is Image, X data D, " "X eid %(eid)s", + {"eid": eid}, + ).rows[0][0] key = key.getvalue().decode() - s3storage = self.repo.system_source.storage('Image', 'data') + s3storage = self.repo.system_source.storage("Image", "data") key, _ = s3storage.parse_key(key) - data = self.s3_bucket.Object(key).get()['Body'].read() - self.assertEqual(data, b'some content') + data = self.s3_bucket.Object(key).get()["Body"].read() + self.assertEqual(data, b"some content") def test_entity_create_with_same_key(self): self.s3_bucket.Versioning().enable() - self.repo.vreg.config['s3-activate-object-versioning'] = True - s3storage = self.repo.system_source.storage('Image', 'data') - with self.admin_access.client_cnx() as cnx, \ - patch('cubicweb_s3storage.storages.S3Storage.new_s3_key', - return_value='shared-key'): - eid = create_image(cnx, b'some content').eid - _ = create_image(cnx, b'some content').eid + self.repo.vreg.config["s3-activate-object-versioning"] = True + s3storage = self.repo.system_source.storage("Image", "data") + with self.admin_access.client_cnx() as cnx, patch( + "cubicweb_s3storage.storages.S3Storage.new_s3_key", + return_value="shared-key", + ): + eid = create_image(cnx, b"some content").eid + _ = create_image(cnx, b"some content").eid cnx.commit() - key = cnx.execute('Any STKEY(D) WHERE X is Image, X data D, ' - 'X eid %(eid)s', {'eid': eid}).rows[0][0] + key = cnx.execute( + "Any STKEY(D) WHERE X is Image, X data D, " "X eid %(eid)s", + {"eid": eid}, + ).rows[0][0] key = key.getvalue().decode() key, _ = s3storage.parse_key(key) - data = self.s3_bucket.Object(key).get()['Body'].read() - self.assertEqual(data, b'some content') + data = self.s3_bucket.Object(key).get()["Body"].read() + self.assertEqual(data, b"some content") def test_entity_modify(self): self.s3_bucket.Versioning().enable() - self.repo.vreg.config['s3-activate-object-versioning'] = True - s3storage = self.repo.system_source.storage('Image', 'data') + self.repo.vreg.config["s3-activate-object-versioning"] = True + s3storage = self.repo.system_source.storage("Image", "data") with self.admin_access.client_cnx() as cnx: - eid = create_image(cnx, b'some content').eid + eid = create_image(cnx, b"some content").eid cnx.commit() with self.admin_access.client_cnx() as cnx: - fobj = cnx.find('Image', eid=eid).one() - fobj.cw_set(data=Binary(b'something else')) + fobj = cnx.find("Image", eid=eid).one() + fobj.cw_set(data=Binary(b"something else")) cnx.commit() # retrieve key now as it will have changed by the modification - key = cnx.execute('Any STKEY(D) WHERE X is Image, X data D, ' - 'X eid %(eid)s', {'eid': eid}).rows[0][0] + key = cnx.execute( + "Any STKEY(D) WHERE X is Image, X data D, " "X eid %(eid)s", + {"eid": eid}, + ).rows[0][0] key = key.getvalue().decode() key, _ = s3storage.parse_key(key) - data = self.s3_bucket.Object(key).get()['Body'].read() - self.assertEqual(data, b'something else') + data = self.s3_bucket.Object(key).get()["Body"].read() + self.assertEqual(data, b"something else") def test_entity_retrieve(self): self.s3_bucket.Versioning().enable() - self.repo.vreg.config['s3-activate-object-versioning'] = True - binstuff = ''.join(chr(x) for x in range(256)) + self.repo.vreg.config["s3-activate-object-versioning"] = True + binstuff = "".join(chr(x) for x in range(256)) if PY3: binstuff = binstuff.encode() with self.admin_access.client_cnx() as cnx: @@ -96,110 +102,115 @@ cnx.commit() with self.admin_access.client_cnx() as cnx: - rset = cnx.execute('Any D WHERE F eid %(eid)s, F data D', - {'eid': eid}) + rset = cnx.execute("Any D WHERE F eid %(eid)s, F data D", {"eid": eid}) self.assertTrue(rset) data = rset.rows[0][0] self.assertEqual(data.read(), binstuff) def test_entity_delete(self): self.s3_bucket.Versioning().enable() - self.repo.vreg.config['s3-activate-object-versioning'] = True - s3storage = self.repo.system_source.storage('Image', 'data') - self.repo.vreg.config['s3-auto-delete'] = True + self.repo.vreg.config["s3-activate-object-versioning"] = True + s3storage = self.repo.system_source.storage("Image", "data") + self.repo.vreg.config["s3-auto-delete"] = True with self.admin_access.client_cnx() as cnx: - eid = create_image(cnx, b'some content').eid + eid = create_image(cnx, b"some content").eid cnx.commit() - key = cnx.execute('Any STKEY(D) WHERE X is Image, X data D, ' - 'X eid %(eid)s', {'eid': eid}).rows[0][0] + key = cnx.execute( + "Any STKEY(D) WHERE X is Image, X data D, " "X eid %(eid)s", + {"eid": eid}, + ).rows[0][0] key = key.getvalue().decode() key, _ = s3storage.parse_key(key) keys = [x.key for x in self.s3_bucket.objects.all()] self.assertIn(key, keys) with self.admin_access.client_cnx() as cnx: - cnx.execute('DELETE Image X WHERE X eid %(eid)s', {'eid': eid}) + cnx.execute("DELETE Image X WHERE X eid %(eid)s", {"eid": eid}) cnx.commit() keys = [x.key for x in self.s3_bucket.objects.all()] self.assertNotIn(key, keys) def test_upload_content_type(self): self.s3_bucket.Versioning().enable() - self.repo.vreg.config['s3-activate-object-versioning'] = True - s3storage = self.repo.system_source.storage('Image', 'data') - mime_type = 'x-custom/mime-type' - with self.admin_access.client_cnx() as cnx, \ - patch('cubicweb_s3storage.storages.S3Storage' - '.get_upload_extra_args', - return_value={'ContentType': mime_type}): - image = create_image(cnx, b'some content') + self.repo.vreg.config["s3-activate-object-versioning"] = True + s3storage = self.repo.system_source.storage("Image", "data") + mime_type = "x-custom/mime-type" + with self.admin_access.client_cnx() as cnx, patch( + "cubicweb_s3storage.storages.S3Storage" ".get_upload_extra_args", + return_value={"ContentType": mime_type}, + ): + image = create_image(cnx, b"some content") cnx.commit() - s3storage = self.repo.system_source.storage('Image', 'data') - s3_key = s3storage.get_s3_key(image, 'data') + s3storage = self.repo.system_source.storage("Image", "data") + s3_key = s3storage.get_s3_key(image, "data") s3_key, _ = s3storage.parse_key(s3_key) - head = s3storage.s3cnx.head_object( - Bucket=self.s3_bucket.name, - Key=s3_key) - self.assertEqual(head['ContentType'], mime_type) + head = s3storage.s3cnx.head_object(Bucket=self.s3_bucket.name, Key=s3_key) + self.assertEqual(head["ContentType"], mime_type) class S3StorageTC(testing.S3StorageTestMixin, CubicWebTC): - def test_s3key_gen(self): - s3storage = self.repo.system_source.storage('Image', 'data') + s3storage = self.repo.system_source.storage("Image", "data") with self.admin_access.client_cnx() as cnx: - fobj = create_image(cnx, b'some content') + fobj = create_image(cnx, b"some content") cnx.commit() eid = fobj.eid - k1 = s3storage.get_s3_key(fobj, 'data') + k1 = s3storage.get_s3_key(fobj, "data") with self.admin_access.client_cnx() as cnx: - fobj = cnx.find('Image', eid=eid).one() - k2 = s3storage.get_s3_key(fobj, 'data') + fobj = cnx.find("Image", eid=eid).one() + k2 = s3storage.get_s3_key(fobj, "data") self.assertEqual(k1, k2) def test_entity_create(self): with self.admin_access.client_cnx() as cnx: - eid = create_image(cnx, b'some content').eid + eid = create_image(cnx, b"some content").eid cnx.commit() - key = cnx.execute('Any STKEY(D) WHERE X is Image, X data D, ' - 'X eid %(eid)s', {'eid': eid}).rows[0][0] + key = cnx.execute( + "Any STKEY(D) WHERE X is Image, X data D, " "X eid %(eid)s", + {"eid": eid}, + ).rows[0][0] key = key.getvalue().decode() - data = self.s3_bucket.Object(key).get()['Body'].read() - self.assertEqual(data, b'some content') + data = self.s3_bucket.Object(key).get()["Body"].read() + self.assertEqual(data, b"some content") def test_entity_create_with_same_key(self): - with self.admin_access.client_cnx() as cnx, \ - patch('cubicweb_s3storage.storages.S3Storage.new_s3_key', - return_value='shared-key'): - eid = create_image(cnx, b'some content').eid - _ = create_image(cnx, b'some content').eid + with self.admin_access.client_cnx() as cnx, patch( + "cubicweb_s3storage.storages.S3Storage.new_s3_key", + return_value="shared-key", + ): + eid = create_image(cnx, b"some content").eid + _ = create_image(cnx, b"some content").eid cnx.commit() - key = cnx.execute('Any STKEY(D) WHERE X is Image, X data D, ' - 'X eid %(eid)s', {'eid': eid}).rows[0][0] + key = cnx.execute( + "Any STKEY(D) WHERE X is Image, X data D, " "X eid %(eid)s", + {"eid": eid}, + ).rows[0][0] key = key.getvalue().decode() - data = self.s3_bucket.Object(key).get()['Body'].read() - self.assertEqual(data, b'some content') + data = self.s3_bucket.Object(key).get()["Body"].read() + self.assertEqual(data, b"some content") def test_entity_modify(self): with self.admin_access.client_cnx() as cnx: - eid = create_image(cnx, b'some content').eid + eid = create_image(cnx, b"some content").eid cnx.commit() with self.admin_access.client_cnx() as cnx: - fobj = cnx.find('Image', eid=eid).one() - fobj.cw_set(data=Binary(b'something else')) + fobj = cnx.find("Image", eid=eid).one() + fobj.cw_set(data=Binary(b"something else")) cnx.commit() # retrieve key now as it will have changed by the modification - key = cnx.execute('Any STKEY(D) WHERE X is Image, X data D, ' - 'X eid %(eid)s', {'eid': eid}).rows[0][0] + key = cnx.execute( + "Any STKEY(D) WHERE X is Image, X data D, " "X eid %(eid)s", + {"eid": eid}, + ).rows[0][0] key = key.getvalue().decode() - data = self.s3_bucket.Object(key).get()['Body'].read() - self.assertEqual(data, b'something else') + data = self.s3_bucket.Object(key).get()["Body"].read() + self.assertEqual(data, b"something else") def test_entity_retrieve(self): - binstuff = ''.join(chr(x) for x in range(256)) + binstuff = "".join(chr(x) for x in range(256)) if PY3: binstuff = binstuff.encode() with self.admin_access.client_cnx() as cnx: @@ -207,83 +218,84 @@ cnx.commit() with self.admin_access.client_cnx() as cnx: - rset = cnx.execute('Any D WHERE F eid %(eid)s, F data D', - {'eid': eid}) + rset = cnx.execute("Any D WHERE F eid %(eid)s, F data D", {"eid": eid}) self.assertTrue(rset) data = rset.rows[0][0] self.assertEqual(data.read(), binstuff) def test_entity_delete(self): - self.repo.vreg.config['s3-auto-delete'] = True + self.repo.vreg.config["s3-auto-delete"] = True with self.admin_access.client_cnx() as cnx: - eid = create_image(cnx, b'some content').eid + eid = create_image(cnx, b"some content").eid cnx.commit() - key = cnx.execute('Any STKEY(D) WHERE X is Image, X data D, ' - 'X eid %(eid)s', {'eid': eid}).rows[0][0] + key = cnx.execute( + "Any STKEY(D) WHERE X is Image, X data D, " "X eid %(eid)s", + {"eid": eid}, + ).rows[0][0] key = key.getvalue().decode() keys = [x.key for x in self.s3_bucket.objects.all()] self.assertIn(key, keys) with self.admin_access.client_cnx() as cnx: - cnx.execute('DELETE Image X WHERE X eid %(eid)s', {'eid': eid}) + cnx.execute("DELETE Image X WHERE X eid %(eid)s", {"eid": eid}) cnx.commit() keys = [x.key for x in self.s3_bucket.objects.all()] self.assertNotIn(key, keys) def test_upload_content_type(self): - mime_type = 'x-custom/mime-type' - with self.admin_access.client_cnx() as cnx, \ - patch('cubicweb_s3storage.storages.S3Storage' - '.get_upload_extra_args', - return_value={'ContentType': mime_type}): - image = create_image(cnx, b'some content') + mime_type = "x-custom/mime-type" + with self.admin_access.client_cnx() as cnx, patch( + "cubicweb_s3storage.storages.S3Storage" ".get_upload_extra_args", + return_value={"ContentType": mime_type}, + ): + image = create_image(cnx, b"some content") cnx.commit() - s3storage = self.repo.system_source.storage('Image', 'data') - s3_key = s3storage.get_s3_key(image, 'data') - head = s3storage.s3cnx.head_object( - Bucket=self.s3_bucket.name, - Key=s3_key) - self.assertEqual(head['ContentType'], mime_type) + s3storage = self.repo.system_source.storage("Image", "data") + s3_key = s3storage.get_s3_key(image, "data") + head = s3storage.s3cnx.head_object(Bucket=self.s3_bucket.name, Key=s3_key) + self.assertEqual(head["ContentType"], mime_type) class S3StorageMigrationTC(testing.S3StorageTestMixin, CubicWebTC): - @contextmanager def mh(self): with self.admin_access.repo_cnx() as cnx: yield cnx, ServerMigrationHelper( - self.repo.config, self.repo.schema, - repo=self.repo, cnx=cnx, - interactive=False) + self.repo.config, + self.repo.schema, + repo=self.repo, + cnx=cnx, + interactive=False, + ) def test_entity_migration(self): with self.admin_access.client_cnx() as cnx: - create_image(cnx, thumbnail=Binary(b'some content')) + create_image(cnx, thumbnail=Binary(b"some content")) cnx.commit() # Re-use storage instance of "data" attribute as it already has s3 # mock activated. - s3_storage = self.repo.system_source.storage('Image', 'data') + s3_storage = self.repo.system_source.storage("Image", "data") with self.mh() as (cnx, mh): - storages.set_attribute_storage( - self.repo, 'Image', 'thumbnail', s3_storage) + storages.set_attribute_storage(self.repo, "Image", "thumbnail", s3_storage) - mh.cmd_storage_changed('Image', 'thumbnail') + mh.cmd_storage_changed("Image", "thumbnail") cnx.commit() with self.admin_access.client_cnx() as cnx: - key = cnx.execute('Any STKEY(D) WHERE X is Image, ' - 'X thumbnail D').rows[0][0] + key = cnx.execute("Any STKEY(D) WHERE X is Image, " "X thumbnail D").rows[ + 0 + ][0] key = key.getvalue().decode() # check it looks like an UUID generated by uuid.uuid1() # sorry, I'm lazy, this regex is a bit too permissive... - self.assertTrue(re.match(r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}', key)) + self.assertTrue(re.match(r"\w{8}-\w{4}-\w{4}-\w{4}-\w{12}", key)) - value = cnx.execute('Any D WHERE X is Image, ' - 'X thumbnail D').rows[0][0] - self.assertEqual(b'some content', value.getvalue()) + value = cnx.execute("Any D WHERE X is Image, " "X thumbnail D").rows[0][0] + self.assertEqual(b"some content", value.getvalue()) -if __name__ == '__main__': +if __name__ == "__main__": from unittest import main + main()