From 5c9b61aa2a501f4f0dea9b2e570e8f047944ea1b Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Wed, 17 Mar 2021 15:57:42 -0400 Subject: [PATCH 01/14] feat: customer managed keys (CMEK) Implement customer managed keys (CMEK) feature. WIP. DO NOT MERGE. --- google/cloud/bigtable/cluster.py | 26 + google/cloud/bigtable/enums.py | 30 + google/cloud/bigtable/instance.py | 24 +- google/cloud/bigtable/table.py | 68 ++ tests/system/__init__.py | 0 tests/system/test_cmek.py | 1280 ++++++++++++++++++++ tests/{system.py => system/test_system.py} | 11 + tests/unit/test_cluster.py | 97 ++ tests/unit/test_table.py | 109 ++ 9 files changed, 1644 insertions(+), 1 deletion(-) create mode 100644 tests/system/__init__.py create mode 100644 tests/system/test_cmek.py rename tests/{system.py => system/test_system.py} (99%) diff --git a/google/cloud/bigtable/cluster.py b/google/cloud/bigtable/cluster.py index 5c4c355ff..eed5103f7 100644 --- a/google/cloud/bigtable/cluster.py +++ b/google/cloud/bigtable/cluster.py @@ -63,6 +63,19 @@ class Cluster(object): Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. + :type kms_key_name: str + :param kms_key_name: (Optional, Creation Only) The name of the KMS customer managed + encryption key (CMEK) to use for at-rest encryption of data in + this cluster. If omitted, Google's default encryption will be + used. If specified, the requirements for this key are: + + 1) The Cloud Bigtable service account associated with the + project that contains the cluster must be granted the + ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK. + 2) Only regional keys can be used and the region of the CMEK + key must match the region of the cluster. + 3) All clusters within an instance must use the same CMEK key. + :type _state: int :param _state: (`OutputOnly`) The current state of the cluster. @@ -81,6 +94,7 @@ def __init__( location_id=None, serve_nodes=None, default_storage_type=None, + kms_key_name=None, _state=None, ): self.cluster_id = cluster_id @@ -88,6 +102,7 @@ def __init__( self.location_id = location_id self.serve_nodes = serve_nodes self.default_storage_type = default_storage_type + self._kms_key_name = kms_key_name self._state = _state @classmethod @@ -145,6 +160,8 @@ def _update_from_pb(self, cluster_pb): self.location_id = cluster_pb.location.split("/")[-1] self.serve_nodes = cluster_pb.serve_nodes self.default_storage_type = cluster_pb.default_storage_type + if cluster_pb.encryption_config: + self._kms_key_name = cluster_pb.encryption_config.kms_key_name self._state = cluster_pb.state @property @@ -187,6 +204,11 @@ def state(self): """ return self._state + @property + def kms_key_name(self): + """str: Customer managed encryption key for the cluster.""" + return self._kms_key_name + def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented @@ -356,4 +378,8 @@ def _to_pb(self): serve_nodes=self.serve_nodes, default_storage_type=self.default_storage_type, ) + if self._kms_key_name: + cluster_pb.encryption_config = instance.Cluster.EncryptionConfig( + kms_key_name=self._kms_key_name, + ) return cluster_pb diff --git a/google/cloud/bigtable/enums.py b/google/cloud/bigtable/enums.py index 50c7f2e60..327b2f828 100644 --- a/google/cloud/bigtable/enums.py +++ b/google/cloud/bigtable/enums.py @@ -156,6 +156,7 @@ class View(object): NAME_ONLY = table.Table.View.NAME_ONLY SCHEMA_VIEW = table.Table.View.SCHEMA_VIEW REPLICATION_VIEW = table.Table.View.REPLICATION_VIEW + ENCRYPTION_VIEW = table.Table.View.ENCRYPTION_VIEW FULL = table.Table.View.FULL class ReplicationState(object): @@ -191,3 +192,32 @@ class ReplicationState(object): table.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE ) READY = table.Table.ClusterState.ReplicationState.READY + + +class EncryptionInfo: + class EncryptionType: + """Possible encryption types for a resource. + + Attributes: + ENCRYPTION_TYPE_UNSPECIFIED (int): Encryption type was not specified, though + data at rest remains encrypted. + GOOGLE_DEFAULT_ENCRYPTION (int): The data backing this resource is encrypted + at rest with a key that is fully managed by Google. No key version or + status will be populated. This is the default state. + CUSTOMER_MANAGED_ENCRYPTION (int): The data backing this resource is + encrypted at rest with a key that is managed by the customer. The in-use + version of the key and its status are populated for CMEK-protected + tables. CMEK-protected backups are pinned to the key version that was in + use at the time the backup was taken. This key version is populated but + its status is not tracked and is reported as `UNKNOWN`. + """ + + ENCRYPTION_TYPE_UNSPECIFIED = ( + table.EncryptionInfo.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED + ) + GOOGLE_DEFAULT_ENCRYPTION = ( + table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + CUSTOMER_MANAGED_ENCRYPTION = ( + table.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + ) diff --git a/google/cloud/bigtable/instance.py b/google/cloud/bigtable/instance.py index d2fb5db07..138d3bfc1 100644 --- a/google/cloud/bigtable/instance.py +++ b/google/cloud/bigtable/instance.py @@ -540,7 +540,12 @@ def test_iam_permissions(self, permissions): return list(resp.permissions) def cluster( - self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None + self, + cluster_id, + location_id=None, + serve_nodes=None, + default_storage_type=None, + kms_key_name=None, ): """Factory to create a cluster associated with this instance. @@ -576,6 +581,22 @@ def cluster( :rtype: :class:`~google.cloud.bigtable.instance.Cluster` :returns: a cluster owned by this instance. + + :type kms_key_name: str + :param kms_key_name: (Optional, Creation Only) The name of the KMS customer + managed encryption key (CMEK) to use for at-rest encryption + of data in this cluster. If omitted, Google's default + encryption will be used. If specified, the requirements for + this key are: + + 1) The Cloud Bigtable service account associated with the + project that contains the cluster must be granted the + ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the + CMEK. + 2) Only regional keys can be used and the region of the + CMEK key must match the region of the cluster. + 3) All clusters within an instance must use the same CMEK + key. """ return Cluster( cluster_id, @@ -583,6 +604,7 @@ def cluster( location_id=location_id, serve_nodes=serve_nodes, default_storage_type=default_storage_type, + kms_key_name=kms_key_name, ) def list_clusters(self): diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index 740a65ae6..068a25213 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -484,6 +484,33 @@ def get_cluster_states(self): for cluster_id, value_pb in table_pb.cluster_states.items() } + def get_encryption_info(self): + """List the encryption info for each cluster owned by this table. + + Gets the current encryption info for the table across all of the clusters. The + returned dict will be keyed by cluster id and contain a status for all of the + keys in use. + + :rtype: dict + :returns: Dictionary of encryption info for this table. Keys are cluster ids and + values are tuples of :class:`EncryptionInfo` instances. + """ + ENCRYPTION_VIEW = enums.Table.View.ENCRYPTION_VIEW + table_client = self._instance._client.table_admin_client + table_pb = table_client.get_table( + request={"name": self.name, "view": ENCRYPTION_VIEW} + ) + + return { + cluster_id: tuple( + ( + EncryptionInfo._from_pb(info_pb) + for info_pb in value_pb.encryption_info + ) + ) + for cluster_id, value_pb in table_pb.cluster_states.items() + } + def read_row(self, row_key, filter_=None): """Read a single row from this table. @@ -1202,6 +1229,47 @@ def __ne__(self, other): return not self == other +class EncryptionInfo: + """Representation of Encryption Info + + :type encryption_type: int + :param encryption_type: See :class:`enums.EncryptionInfo.EncryptionType` + + :type encryption_status: google.rpc.status_pb2.Status + :param encryption_status: The encryption status. + + :type kms_key_version: str + :param kms_key_version: The key version used for encryption. + """ + + @classmethod + def _from_pb(cls, info_pb): + return cls( + info_pb.encryption_type, info_pb.encryption_status, info_pb.kms_key_version + ) + + def __init__(self, encryption_type, encryption_status, kms_key_version): + self.encryption_type = encryption_type + self.encryption_status = encryption_status + self.kms_key_version = kms_key_version + + def __eq__(self, other): + if self is other: + return True + + if not isinstance(other, type(self)): + return NotImplemented + + return ( + self.encryption_type == other.encryption_type + and self.encryption_status == other.encryption_status + and self.kms_key_version == other.kms_key_version + ) + + def __ne__(self, other): + return not self == other + + def _create_row_request( table_name, start_key=None, diff --git a/tests/system/__init__.py b/tests/system/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/system/test_cmek.py b/tests/system/test_cmek.py new file mode 100644 index 000000000..d15b242da --- /dev/null +++ b/tests/system/test_cmek.py @@ -0,0 +1,1280 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import operator +import os +import time +import unittest + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import TooManyRequests +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from test_utils.retry import RetryErrors +from test_utils.retry import RetryResult +from test_utils.system import EmulatorCreds +from test_utils.system import unique_resource_id + +from google.cloud._helpers import _datetime_from_microseconds +from google.cloud._helpers import _microseconds_from_datetime +from google.cloud._helpers import UTC +from google.cloud.bigtable.client import Client +from google.cloud.bigtable.column_family import MaxVersionsGCRule +from google.cloud.bigtable.policy import Policy +from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE +from google.cloud.bigtable.row_filters import ApplyLabelFilter +from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter +from google.cloud.bigtable.row_filters import RowFilterChain +from google.cloud.bigtable.row_filters import RowFilterUnion +from google.cloud.bigtable.row_data import Cell +from google.cloud.bigtable.row_data import PartialRowData +from google.cloud.bigtable.row_set import RowSet +from google.cloud.bigtable.row_set import RowRange + +# from google.cloud.bigtable_admin_v2.gapic import ( +# bigtable_table_admin_client_config as table_admin_config, +# ) + +UNIQUE_SUFFIX = unique_resource_id("-") +LOCATION_ID = "us-central1-c" +INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX +INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX +TABLE_ID = "google-cloud-python-test-table" +CLUSTER_ID = INSTANCE_ID + "-cluster" +CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" +SERVE_NODES = 3 +COLUMN_FAMILY_ID1 = "col-fam-id1" +COLUMN_FAMILY_ID2 = "col-fam-id2" +COL_NAME1 = b"col-name1" +COL_NAME2 = b"col-name2" +COL_NAME3 = b"col-name3-but-other-fam" +CELL_VAL1 = b"cell-val" +CELL_VAL2 = b"cell-val-newer" +CELL_VAL3 = b"altcol-cell-val" +CELL_VAL4 = b"foo" +ROW_KEY = b"row-key" +ROW_KEY_ALT = b"row-key-alt" +EXISTING_INSTANCES = [] +LABEL_KEY = "python-system" +label_stamp = ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") +) +LABELS = {LABEL_KEY: str(label_stamp)} + + +class Config(object): + """Run-time configuration to be modified at set-up. + + This is a mutable stand-in to allow test set-up to modify + global state. + """ + + CLIENT = None + INSTANCE = None + INSTANCE_DATA = None + CLUSTER = None + CLUSTER_DATA = None + IN_EMULATOR = False + + @classmethod + def KMS_KEY_NAME(cls): + return ( + f"projects/{cls.CLIENT.project}/locations/us-central1/" # {LOCATION_ID}/" + "keyRings/test-key-ring/cryptoKeys/test-key" + ) + + +def _retry_on_unavailable(exc): + """Retry only errors whose status code is 'UNAVAILABLE'.""" + from grpc import StatusCode + + return exc.code() == StatusCode.UNAVAILABLE + + +retry_429 = RetryErrors(TooManyRequests, max_tries=9) + + +def setUpModule(): + from google.cloud.exceptions import GrpcRendezvous + from google.cloud.bigtable.enums import Instance + + # See: https://github.com/googleapis/google-cloud-python/issues/5928 + # interfaces = table_admin_config.config["interfaces"] + # iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] + # methods = iface_config["methods"] + # create_table = methods["CreateTable"] + # create_table["timeout_millis"] = 90000 + + Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None + + if Config.IN_EMULATOR: + credentials = EmulatorCreds() + Config.CLIENT = Client(admin=True, credentials=credentials) + else: + Config.CLIENT = Client(admin=True) + + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) + Config.CLUSTER = Config.INSTANCE.cluster( + CLUSTER_ID, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + kms_key_name=Config.KMS_KEY_NAME(), + ) + Config.INSTANCE_DATA = Config.CLIENT.instance( + INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS + ) + Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( + CLUSTER_ID_DATA, location_id=LOCATION_ID + ) + + if not Config.IN_EMULATOR: + retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) + instances, failed_locations = retry(Config.CLIENT.list_instances)() + + if len(failed_locations) != 0: + raise ValueError("List instances failed in module set up.") + + EXISTING_INSTANCES[:] = instances + + # After listing, create the test instances. + admin_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) + admin_op.result(timeout=10) + data_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) + data_op.result(timeout=10) + + +def tearDownModule(): + if not Config.IN_EMULATOR: + retry_429(Config.INSTANCE.delete)() + retry_429(Config.INSTANCE_DATA.delete)() + + +class TestInstanceAdminAPI(unittest.TestCase): + def setUp(self): + if Config.IN_EMULATOR: + self.skipTest("Instance Admin API not supported in emulator") + self.instances_to_delete = [] + + def tearDown(self): + for instance in self.instances_to_delete: + retry_429(instance.delete)() + + def test_list_instances(self): + instances, failed_locations = Config.CLIENT.list_instances() + + self.assertEqual(failed_locations, []) + + found = set([instance.name for instance in instances]) + self.assertTrue(Config.INSTANCE.name in found) + + def test_reload(self): + from google.cloud.bigtable import enums + + # Use same arguments as Config.INSTANCE (created in `setUpModule`) + # so we can use reload() on a fresh instance. + alt_instance = Config.CLIENT.instance(INSTANCE_ID) + # Make sure metadata unset before reloading. + alt_instance.display_name = None + + alt_instance.reload() + self.assertEqual(alt_instance.display_name, Config.INSTANCE.display_name) + self.assertEqual(alt_instance.labels, Config.INSTANCE.labels) + self.assertEqual(alt_instance.type_, enums.Instance.Type.PRODUCTION) + + def test_create_instance_defaults(self): + from google.cloud.bigtable import enums + + ALT_INSTANCE_ID = "ndef" + UNIQUE_SUFFIX + instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) + ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" + serve_nodes = 1 + cluster = instance.cluster( + ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=serve_nodes + ) + operation = instance.create(clusters=[cluster]) + + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + instance_alt.reload() + + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) + # Make sure that by default a PRODUCTION type instance is created + self.assertIsNone(instance.type_) + self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION) + + def test_create_instance(self): + from google.cloud.bigtable import enums + + _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT + + ALT_INSTANCE_ID = "new" + UNIQUE_SUFFIX + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS + ) + ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" + cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID) + operation = instance.create(clusters=[cluster]) + + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + instance_alt.reload() + + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) + self.assertEqual(instance.type_, instance_alt.type_) + self.assertEqual(instance_alt.labels, LABELS) + self.assertEqual(instance_alt.state, enums.Instance.State.READY) + + def test_cluster_exists(self): + NONEXISTING_CLUSTER_ID = "cluster-id" + + cluster = Config.INSTANCE.cluster(CLUSTER_ID) + alt_cluster = Config.INSTANCE.cluster(NONEXISTING_CLUSTER_ID) + self.assertTrue(cluster.exists()) + self.assertFalse(alt_cluster.exists()) + + def test_instance_exists(self): + NONEXISTING_INSTANCE_ID = "instancer-id" + + alt_instance = Config.CLIENT.instance(NONEXISTING_INSTANCE_ID) + self.assertTrue(Config.INSTANCE.exists()) + self.assertFalse(alt_instance.exists()) + + def test_create_instance_w_two_clusters(self): + from google.cloud.bigtable import enums + from google.cloud.bigtable.table import ClusterState + + _PRODUCTION = enums.Instance.Type.PRODUCTION + ALT_INSTANCE_ID = "dif" + UNIQUE_SUFFIX + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS + ) + + ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" + ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" + LOCATION_ID_2 = "us-central1-f" + STORAGE_TYPE = enums.StorageType.HDD + serve_nodes = 1 + cluster_1 = instance.cluster( + ALT_CLUSTER_ID_1, + location_id=LOCATION_ID, + serve_nodes=serve_nodes, + default_storage_type=STORAGE_TYPE, + kms_key_name=Config.KMS_KEY_NAME(), + ) + cluster_2 = instance.cluster( + ALT_CLUSTER_ID_2, + location_id=LOCATION_ID_2, + serve_nodes=serve_nodes, + default_storage_type=STORAGE_TYPE, + kms_key_name=Config.KMS_KEY_NAME(), + ) + operation = instance.create(clusters=[cluster_1, cluster_2]) + + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + operation.result(timeout=120) + + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + instance_alt.reload() + + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) + self.assertEqual(instance.type_, instance_alt.type_) + + clusters, failed_locations = instance_alt.list_clusters() + self.assertEqual(failed_locations, []) + + clusters.sort(key=lambda x: x.name) + alt_cluster_1, alt_cluster_2 = clusters + + self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) + self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) + self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) + self.assertEqual( + cluster_1.default_storage_type, alt_cluster_1.default_storage_type + ) + self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) + self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) + self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) + self.assertEqual( + cluster_2.default_storage_type, alt_cluster_2.default_storage_type + ) + + # Test list clusters in project via 'client.list_clusters' + clusters, failed_locations = Config.CLIENT.list_clusters() + self.assertFalse(failed_locations) + found = set([cluster.name for cluster in clusters]) + self.assertTrue( + {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( + found + ) + ) + + temp_table_id = "test-get-cluster-states" + temp_table = instance.table(temp_table_id) + temp_table.create() + + encryption_info = temp_table.get_encryption_info() + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, + enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, + enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + + result = temp_table.get_cluster_states() + ReplicationState = enums.Table.ReplicationState + expected_results = [ + ClusterState(ReplicationState.STATE_NOT_KNOWN), + ClusterState(ReplicationState.INITIALIZING), + ClusterState(ReplicationState.PLANNED_MAINTENANCE), + ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), + ClusterState(ReplicationState.READY), + ] + cluster_id_list = result.keys() + self.assertEqual(len(cluster_id_list), 2) + self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) + self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) + for clusterstate in result.values(): + self.assertIn(clusterstate, expected_results) + + # Test create app profile with multi_cluster_routing policy + app_profiles_to_delete = [] + description = "routing policy-multy" + app_profile_id_1 = "app_profile_id_1" + routing = enums.RoutingPolicyType.ANY + self._test_create_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + ignore_warnings=True, + ) + app_profiles_to_delete.append(app_profile_id_1) + + # Test list app profiles + self._test_list_app_profiles_helper(instance, [app_profile_id_1]) + + # Test modify app profile app_profile_id_1 + # routing policy to single cluster policy, + # cluster -> ALT_CLUSTER_ID_1, + # allow_transactional_writes -> disallowed + # modify description + description = "to routing policy-single" + routing = enums.RoutingPolicyType.SINGLE + self._test_modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_1, + allow_transactional_writes=False, + ) + + # Test modify app profile app_profile_id_1 + # cluster -> ALT_CLUSTER_ID_2, + # allow_transactional_writes -> allowed + self._test_modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=True, + ignore_warnings=True, + ) + + # Test create app profile with single cluster routing policy + description = "routing policy-single" + app_profile_id_2 = "app_profile_id_2" + routing = enums.RoutingPolicyType.SINGLE + self._test_create_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=False, + ) + app_profiles_to_delete.append(app_profile_id_2) + + # Test list app profiles + self._test_list_app_profiles_helper( + instance, [app_profile_id_1, app_profile_id_2] + ) + + # Test modify app profile app_profile_id_2 to + # allow transactional writes + # Note: no need to set ``ignore_warnings`` to True + # since we are not restrictings anything with this modification. + self._test_modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=True, + ) + + # Test modify app profile app_profile_id_2 routing policy + # to multi_cluster_routing policy + # modify description + description = "to routing policy-multy" + routing = enums.RoutingPolicyType.ANY + self._test_modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + allow_transactional_writes=False, + ignore_warnings=True, + ) + + # Test delete app profiles + for app_profile_id in app_profiles_to_delete: + self._test_delete_app_profile_helper(app_profile_id, instance) + + def test_update_display_name_and_labels(self): + OLD_DISPLAY_NAME = Config.INSTANCE.display_name + NEW_DISPLAY_NAME = "Foo Bar Baz" + n_label_stamp = ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") + ) + + NEW_LABELS = {LABEL_KEY: str(n_label_stamp)} + Config.INSTANCE.display_name = NEW_DISPLAY_NAME + Config.INSTANCE.labels = NEW_LABELS + operation = Config.INSTANCE.update() + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new instance instance and reload it. + instance_alt = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) + self.assertEqual(instance_alt.display_name, OLD_DISPLAY_NAME) + self.assertEqual(instance_alt.labels, LABELS) + instance_alt.reload() + self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) + self.assertEqual(instance_alt.labels, NEW_LABELS) + + # Make sure to put the instance back the way it was for the + # other test cases. + Config.INSTANCE.display_name = OLD_DISPLAY_NAME + Config.INSTANCE.labels = LABELS + operation = Config.INSTANCE.update() + + # We want to make sure the operation completes. + operation.result(timeout=10) + + def test_update_type(self): + from google.cloud.bigtable.enums import Instance + + _DEVELOPMENT = Instance.Type.DEVELOPMENT + _PRODUCTION = Instance.Type.PRODUCTION + ALT_INSTANCE_ID = "ndif" + UNIQUE_SUFFIX + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS + ) + operation = instance.create(location_id=LOCATION_ID) + + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Unset the display_name + instance.display_name = None + + instance.type_ = _PRODUCTION + operation = instance.update() + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new instance instance and reload it. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + self.assertIsNone(instance_alt.type_) + instance_alt.reload() + self.assertEqual(instance_alt.type_, _PRODUCTION) + + def test_update_cluster(self): + NEW_SERVE_NODES = 4 + + Config.CLUSTER.serve_nodes = NEW_SERVE_NODES + + operation = Config.CLUSTER.update() + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new cluster instance and reload it. + alt_cluster = Config.INSTANCE.cluster(CLUSTER_ID) + alt_cluster.reload() + self.assertEqual(alt_cluster.serve_nodes, NEW_SERVE_NODES) + + # Make sure to put the cluster back the way it was for the + # other test cases. + Config.CLUSTER.serve_nodes = SERVE_NODES + operation = Config.CLUSTER.update() + operation.result(timeout=20) + + def test_create_cluster(self): + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster + + ALT_CLUSTER_ID = INSTANCE_ID + "-c2" + ALT_LOCATION_ID = "us-central1-f" + ALT_SERVE_NODES = 2 + + cluster_2 = Config.INSTANCE.cluster( + ALT_CLUSTER_ID, + location_id=ALT_LOCATION_ID, + serve_nodes=ALT_SERVE_NODES, + default_storage_type=(StorageType.SSD), + kms_key_name=Config.KMS_KEY_NAME(), + ) + operation = cluster_2.create() + + # We want to make sure the operation completes. + operation.result(timeout=30) + + # Create a new object instance, reload and make sure it is the same. + alt_cluster = Config.INSTANCE.cluster(ALT_CLUSTER_ID) + alt_cluster.reload() + + self.assertEqual(cluster_2, alt_cluster) + self.assertEqual(cluster_2.location_id, alt_cluster.location_id) + self.assertEqual(alt_cluster.state, Cluster.State.READY) + self.assertEqual(cluster_2.serve_nodes, alt_cluster.serve_nodes) + self.assertEqual( + cluster_2.default_storage_type, alt_cluster.default_storage_type + ) + + # Delete the newly created cluster and confirm + self.assertTrue(cluster_2.exists()) + cluster_2.delete() + self.assertFalse(cluster_2.exists()) + + def _test_create_app_profile_helper( + self, + app_profile_id, + instance, + routing_policy_type, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None, + ): + + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes, + ) + self.assertEqual( + app_profile.allow_transactional_writes, allow_transactional_writes + ) + + app_profile = app_profile.create(ignore_warnings=ignore_warnings) + + # Load a different app_profile objec form the server and + # verrify that it is the same + alt_app_profile = instance.app_profile(app_profile_id) + alt_app_profile.reload() + + self.assertEqual(app_profile.app_profile_id, alt_app_profile.app_profile_id) + self.assertEqual(app_profile.routing_policy_type, routing_policy_type) + self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) + self.assertEqual(app_profile.description, alt_app_profile.description) + self.assertFalse(app_profile.allow_transactional_writes) + self.assertFalse(alt_app_profile.allow_transactional_writes) + + def _test_list_app_profiles_helper(self, instance, app_profile_ids): + app_profiles = instance.list_app_profiles() + found = [app_prof.app_profile_id for app_prof in app_profiles] + for app_profile_id in app_profile_ids: + self.assertTrue(app_profile_id in found) + + def _test_modify_app_profile_helper( + self, + app_profile_id, + instance, + routing_policy_type, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None, + ): + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes, + ) + + operation = app_profile.update(ignore_warnings) + operation.result(timeout=30) + + alt_app_profile = instance.app_profile(app_profile_id) + alt_app_profile.reload() + self.assertEqual(alt_app_profile.description, description) + self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) + self.assertEqual(alt_app_profile.cluster_id, cluster_id) + self.assertEqual( + alt_app_profile.allow_transactional_writes, allow_transactional_writes + ) + + def _test_delete_app_profile_helper(self, app_profile_id, instance): + app_profile = instance.app_profile(app_profile_id) + self.assertTrue(app_profile.exists()) + app_profile.delete(ignore_warnings=True) + self.assertFalse(app_profile.exists()) + + +class TestTableAdminAPI(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls._table = Config.INSTANCE_DATA.table(TABLE_ID) + cls._table.create() + + @classmethod + def tearDownClass(cls): + cls._table.delete() + + def setUp(self): + self.tables_to_delete = [] + self.backups_to_delete = [] + + def tearDown(self): + for table in self.tables_to_delete: + table.delete() + for backup in self.backups_to_delete: + backup.delete() + + def _skip_if_emulated(self, message): + # NOTE: This method is necessary because ``Config.IN_EMULATOR`` + # is set at runtime rather than import time, which means we + # can't use the @unittest.skipIf decorator. + if Config.IN_EMULATOR: + self.skipTest(message) + + def test_list_tables(self): + # Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the + # table created in `setUpClass` here will be the only one. + tables = Config.INSTANCE_DATA.list_tables() + self.assertEqual(tables, [self._table]) + + def test_exists(self): + retry_until_true = RetryResult(lambda result: result) + retry_until_false = RetryResult(lambda result: not result) + temp_table_id = "test-table_existence" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + self.assertFalse(temp_table.exists()) + temp_table.create() + self.assertTrue(retry_until_true(temp_table.exists)()) + temp_table.delete() + self.assertFalse(retry_until_false(temp_table.exists)()) + + def test_create_table(self): + temp_table_id = "test-create-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + # First, create a sorted version of our expected result. + name_attr = operator.attrgetter("name") + expected_tables = sorted([temp_table, self._table], key=name_attr) + + # Then query for the tables in the instance and sort them by + # name as well. + tables = Config.INSTANCE_DATA.list_tables() + sorted_tables = sorted(tables, key=name_attr) + self.assertEqual(sorted_tables, expected_tables) + + def test_test_iam_permissions(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") + temp_table_id = "test-test-iam-policy-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] + permissions_allowed = temp_table.test_iam_permissions(permissions) + self.assertEqual(permissions, permissions_allowed) + + def test_get_iam_policy(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") + temp_table_id = "test-get-iam-policy-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + policy = temp_table.get_iam_policy().to_api_repr() + self.assertEqual(policy["etag"], "ACAB") + self.assertEqual(policy["version"], 0) + + def test_set_iam_policy(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") + temp_table_id = "test-set-iam-policy-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + new_policy = Policy() + service_account_email = Config.CLIENT._credentials.service_account_email + new_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.service_account(service_account_email) + ] + policy_latest = temp_table.set_iam_policy(new_policy).to_api_repr() + + self.assertEqual(policy_latest["bindings"][0]["role"], "roles/bigtable.admin") + self.assertIn(service_account_email, policy_latest["bindings"][0]["members"][0]) + + def test_create_table_with_families(self): + temp_table_id = "test-create-table-with-failies" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + gc_rule = MaxVersionsGCRule(1) + temp_table.create(column_families={COLUMN_FAMILY_ID1: gc_rule}) + self.tables_to_delete.append(temp_table) + + col_fams = temp_table.list_column_families() + + self.assertEqual(len(col_fams), 1) + retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] + self.assertIs(retrieved_col_fam._table, temp_table) + self.assertEqual(retrieved_col_fam.column_family_id, COLUMN_FAMILY_ID1) + self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) + + def test_create_table_with_split_keys(self): + self._skip_if_emulated("Split keys are not supported by Bigtable emulator") + temp_table_id = "foo-bar-baz-split-table" + initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create(initial_split_keys=initial_split_keys) + self.tables_to_delete.append(temp_table) + + # Read Sample Row Keys for created splits + sample_row_keys = temp_table.sample_row_keys() + actual_keys = [srk.row_key for srk in sample_row_keys] + + expected_keys = initial_split_keys + expected_keys.append(b"") + + self.assertEqual(actual_keys, expected_keys) + + def test_create_column_family(self): + temp_table_id = "test-create-column-family" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + self.assertEqual(temp_table.list_column_families(), {}) + gc_rule = MaxVersionsGCRule(1) + column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) + column_family.create() + + col_fams = temp_table.list_column_families() + + self.assertEqual(len(col_fams), 1) + retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] + self.assertIs(retrieved_col_fam._table, column_family._table) + self.assertEqual( + retrieved_col_fam.column_family_id, column_family.column_family_id + ) + self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) + + def test_update_column_family(self): + temp_table_id = "test-update-column-family" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + gc_rule = MaxVersionsGCRule(1) + column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) + column_family.create() + + # Check that our created table is as expected. + col_fams = temp_table.list_column_families() + self.assertEqual(col_fams, {COLUMN_FAMILY_ID1: column_family}) + + # Update the column family's GC rule and then try to update. + column_family.gc_rule = None + column_family.update() + + # Check that the update has propagated. + col_fams = temp_table.list_column_families() + self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) + + def test_delete_column_family(self): + temp_table_id = "test-delete-column-family" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + self.assertEqual(temp_table.list_column_families(), {}) + column_family = temp_table.column_family(COLUMN_FAMILY_ID1) + column_family.create() + + # Make sure the family is there before deleting it. + col_fams = temp_table.list_column_families() + self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1]) + + retry_504 = RetryErrors(DeadlineExceeded) + retry_504(column_family.delete)() + # Make sure we have successfully deleted it. + self.assertEqual(temp_table.list_column_families(), {}) + + def test_backup(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + + temp_table_id = "test-backup-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + temp_backup_id = "test-backup" + + # TODO: consider using `datetime.datetime.now().timestamp()` + # when support for Python 2 is fully dropped + expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 + + # Testing `Table.backup()` factory + temp_backup = temp_table.backup( + temp_backup_id, + cluster_id=CLUSTER_ID_DATA, + expire_time=datetime.datetime.utcfromtimestamp(expire), + ) + + # Sanity check for `Backup.exists()` method + self.assertFalse(temp_backup.exists()) + + # Testing `Backup.create()` method + temp_backup.create().result() + + # Implicit testing of `Backup.delete()` method + self.backups_to_delete.append(temp_backup) + + # Testing `Backup.exists()` method + self.assertTrue(temp_backup.exists()) + + # Testing `Table.list_backups()` method + temp_table_backup = temp_table.list_backups()[0] + self.assertEqual(temp_backup_id, temp_table_backup.backup_id) + self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) + self.assertEqual(expire, temp_table_backup.expire_time.seconds) + + # Testing `Backup.update_expire_time()` method + expire += 3600 # A one-hour change in the `expire_time` parameter + updated_time = datetime.datetime.utcfromtimestamp(expire) + temp_backup.update_expire_time(updated_time) + test = _datetime_to_pb_timestamp(updated_time) + + # Testing `Backup.get()` method + temp_table_backup = temp_backup.get() + self.assertEqual( + test.seconds, + DatetimeWithNanoseconds.timestamp(temp_table_backup.expire_time), + ) + + # Testing `Table.restore()` and `Backup.retore()` methods + restored_table_id = "test-backup-table-restored" + restored_table = Config.INSTANCE_DATA.table(restored_table_id) + temp_table.restore( + restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id + ).result() + tables = Config.INSTANCE_DATA.list_tables() + self.assertIn(restored_table, tables) + restored_table.delete() + + +class TestDataAPI(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls._table = table = Config.INSTANCE_DATA.table("test-data-api") + table.create() + table.column_family(COLUMN_FAMILY_ID1).create() + table.column_family(COLUMN_FAMILY_ID2).create() + + @classmethod + def tearDownClass(cls): + # Will also delete any data contained in the table. + cls._table.delete() + + def _maybe_emulator_skip(self, message): + # NOTE: This method is necessary because ``Config.IN_EMULATOR`` + # is set at runtime rather than import time, which means we + # can't use the @unittest.skipIf decorator. + if Config.IN_EMULATOR: + self.skipTest(message) + + def setUp(self): + self.rows_to_delete = [] + + def tearDown(self): + for row in self.rows_to_delete: + row.clear() + row.delete() + row.commit() + + def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): + timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) + timestamp1_micros = _microseconds_from_datetime(timestamp1) + # Truncate to millisecond granularity. + timestamp1_micros -= timestamp1_micros % 1000 + timestamp1 = _datetime_from_microseconds(timestamp1_micros) + # 1000 microseconds is a millisecond + timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) + timestamp2_micros = _microseconds_from_datetime(timestamp2) + timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) + timestamp3_micros = _microseconds_from_datetime(timestamp3) + timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) + timestamp4_micros = _microseconds_from_datetime(timestamp4) + + if row1 is not None: + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) + if row2 is not None: + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, timestamp=timestamp2) + if row3 is not None: + row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, timestamp=timestamp3) + if row4 is not None: + row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, timestamp=timestamp4) + + # Create the cells we will check. + cell1 = Cell(CELL_VAL1, timestamp1_micros) + cell2 = Cell(CELL_VAL2, timestamp2_micros) + cell3 = Cell(CELL_VAL3, timestamp3_micros) + cell4 = Cell(CELL_VAL4, timestamp4_micros) + return cell1, cell2, cell3, cell4 + + def test_timestamp_filter_millisecond_granularity(self): + from google.cloud.bigtable import row_filters + + end = datetime.datetime.now() + start = end - datetime.timedelta(minutes=60) + timestamp_range = row_filters.TimestampRange(start=start, end=end) + timefilter = row_filters.TimestampRangeFilter(timestamp_range) + row_data = self._table.read_rows(filter_=timefilter) + row_data.consume_all() + + def test_mutate_rows(self): + row1 = self._table.row(ROW_KEY) + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row1.commit() + self.rows_to_delete.append(row1) + row2 = self._table.row(ROW_KEY_ALT) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2) + row2.commit() + self.rows_to_delete.append(row2) + + # Change the contents + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4) + rows = [row1, row2] + statuses = self._table.mutate_rows(rows) + result = [status.code for status in statuses] + expected_result = [0, 0] + self.assertEqual(result, expected_result) + + # Check the contents + row1_data = self._table.read_row(ROW_KEY) + self.assertEqual( + row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3 + ) + row2_data = self._table.read_row(ROW_KEY_ALT) + self.assertEqual( + row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4 + ) + + def test_truncate_table(self): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_pr_1", + b"row_key_pr_2", + b"row_key_pr_3", + b"row_key_pr_4", + b"row_key_pr_5", + ] + + for row_key in row_keys: + row = self._table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row.commit() + self.rows_to_delete.append(row) + + self._table.truncate(timeout=200) + + read_rows = self._table.yield_rows() + + for row in read_rows: + self.assertNotIn(row.row_key.decode("utf-8"), row_keys) + + def test_drop_by_prefix_table(self): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_pr_1", + b"row_key_pr_2", + b"row_key_pr_3", + b"row_key_pr_4", + b"row_key_pr_5", + ] + + for row_key in row_keys: + row = self._table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row.commit() + self.rows_to_delete.append(row) + + self._table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) + + read_rows = self._table.yield_rows() + expected_rows_count = 5 + read_rows_count = 0 + + for row in read_rows: + if row.row_key in row_keys: + read_rows_count += 1 + + self.assertEqual(expected_rows_count, read_rows_count) + + def test_yield_rows_with_row_set(self): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_6", + b"row_key_7", + b"row_key_8", + b"row_key_9", + ] + + rows = [] + for row_key in row_keys: + row = self._table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + rows.append(row) + self.rows_to_delete.append(row) + self._table.mutate_rows(rows) + + row_set = RowSet() + row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) + row_set.add_row_key(b"row_key_1") + + read_rows = self._table.yield_rows(row_set=row_set) + + expected_row_keys = [ + b"row_key_1", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_6", + ] + found_row_keys = [row.row_key for row in read_rows] + self.assertEqual(found_row_keys, expected_row_keys) + + def test_add_row_range_by_prefix_from_keys(self): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"sample_row_key_1", + b"sample_row_key_2", + ] + + rows = [] + for row_key in row_keys: + row = self._table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + rows.append(row) + self.rows_to_delete.append(row) + self._table.mutate_rows(rows) + + row_set = RowSet() + row_set.add_row_range_with_prefix("row") + + read_rows = self._table.yield_rows(row_set=row_set) + + expected_row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + ] + found_row_keys = [row.row_key for row in read_rows] + self.assertEqual(found_row_keys, expected_row_keys) + + def test_read_large_cell_limit(self): + self._maybe_emulator_skip( + "Maximum gRPC received message size for emulator is 4194304 bytes." + ) + row = self._table.row(ROW_KEY) + self.rows_to_delete.append(row) + + number_of_bytes = 10 * 1024 * 1024 + data = b"1" * number_of_bytes # 10MB of 1's. + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) + row.commit() + + # Read back the contents of the row. + partial_row_data = self._table.read_row(ROW_KEY) + self.assertEqual(partial_row_data.row_key, ROW_KEY) + cell = partial_row_data.cells[COLUMN_FAMILY_ID1] + column = cell[COL_NAME1] + self.assertEqual(len(column), 1) + self.assertEqual(column[0].value, data) + + def test_read_row(self): + row = self._table.row(ROW_KEY) + self.rows_to_delete.append(row) + + cell1, cell2, cell3, cell4 = self._write_to_row(row, row, row, row) + row.commit() + + # Read back the contents of the row. + partial_row_data = self._table.read_row(ROW_KEY) + self.assertEqual(partial_row_data.row_key, ROW_KEY) + + # Check the cells match. + ts_attr = operator.attrgetter("timestamp") + expected_row_contents = { + COLUMN_FAMILY_ID1: { + COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), + COL_NAME2: [cell3], + }, + COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, + } + self.assertEqual(partial_row_data.cells, expected_row_contents) + + def test_read_rows(self): + row = self._table.row(ROW_KEY) + row_alt = self._table.row(ROW_KEY_ALT) + self.rows_to_delete.extend([row, row_alt]) + + cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt, row, row_alt) + row.commit() + row_alt.commit() + + rows_data = self._table.read_rows() + self.assertEqual(rows_data.rows, {}) + rows_data.consume_all() + + # NOTE: We should refrain from editing protected data on instances. + # Instead we should make the values public or provide factories + # for constructing objects with them. + row_data = PartialRowData(ROW_KEY) + row_data._chunks_encountered = True + row_data._committed = True + row_data._cells = {COLUMN_FAMILY_ID1: {COL_NAME1: [cell1], COL_NAME2: [cell3]}} + + row_alt_data = PartialRowData(ROW_KEY_ALT) + row_alt_data._chunks_encountered = True + row_alt_data._committed = True + row_alt_data._cells = { + COLUMN_FAMILY_ID1: {COL_NAME1: [cell2]}, + COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, + } + + expected_rows = {ROW_KEY: row_data, ROW_KEY_ALT: row_alt_data} + self.assertEqual(rows_data.rows, expected_rows) + + def test_read_with_label_applied(self): + self._maybe_emulator_skip("Labels not supported by Bigtable emulator") + row = self._table.row(ROW_KEY) + self.rows_to_delete.append(row) + + cell1, _, cell3, _ = self._write_to_row(row, None, row) + row.commit() + + # Combine a label with column 1. + label1 = "label-red" + label1_filter = ApplyLabelFilter(label1) + col1_filter = ColumnQualifierRegexFilter(COL_NAME1) + chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) + + # Combine a label with column 2. + label2 = "label-blue" + label2_filter = ApplyLabelFilter(label2) + col2_filter = ColumnQualifierRegexFilter(COL_NAME2) + chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) + + # Bring our two labeled columns together. + row_filter = RowFilterUnion(filters=[chain1, chain2]) + partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter) + self.assertEqual(partial_row_data.row_key, ROW_KEY) + + cells_returned = partial_row_data.cells + col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1) + # Make sure COLUMN_FAMILY_ID1 was the only key. + self.assertEqual(len(cells_returned), 0) + + (cell1_new,) = col_fam1.pop(COL_NAME1) + (cell3_new,) = col_fam1.pop(COL_NAME2) + # Make sure COL_NAME1 and COL_NAME2 were the only keys. + self.assertEqual(len(col_fam1), 0) + + # Check that cell1 has matching values and gained a label. + self.assertEqual(cell1_new.value, cell1.value) + self.assertEqual(cell1_new.timestamp, cell1.timestamp) + self.assertEqual(cell1.labels, []) + self.assertEqual(cell1_new.labels, [label1]) + + # Check that cell3 has matching values and gained a label. + self.assertEqual(cell3_new.value, cell3.value) + self.assertEqual(cell3_new.timestamp, cell3.timestamp) + self.assertEqual(cell3.labels, []) + self.assertEqual(cell3_new.labels, [label2]) + + def test_access_with_non_admin_client(self): + client = Client(admin=False) + instance = client.instance(INSTANCE_ID_DATA) + table = instance.table(self._table.table_id) + self.assertIsNone(table.read_row("nonesuch")) diff --git a/tests/system.py b/tests/system/test_system.py similarity index 99% rename from tests/system.py rename to tests/system/test_system.py index 84f9977e1..511061177 100644 --- a/tests/system.py +++ b/tests/system/test_system.py @@ -332,6 +332,17 @@ def test_create_instance_w_two_clusters(self): temp_table_id = "test-get-cluster-states" temp_table = instance.table(temp_table_id) temp_table.create() + + encryption_info = temp_table.get_encryption_info() + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, + enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + ) + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, + enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + ) + result = temp_table.get_cluster_states() ReplicationState = enums.Table.ReplicationState expected_results = [ diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index d5f731eb6..cba2473ed 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -16,6 +16,7 @@ import unittest import mock +import pytest from ._testing import _make_credentials @@ -60,6 +61,9 @@ class TestCluster(unittest.TestCase): OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format( PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID ) + KEY_RING_ID = "key-ring-id" + CRYPTO_KEY_ID = "crypto-key-id" + KMS_KEY_NAME = f"{LOCATION_PATH}/keyRings/{KEY_RING_ID}/cryptoKeys/{CRYPTO_KEY_ID}" @staticmethod def _get_target_class(): @@ -90,6 +94,7 @@ def test_constructor_defaults(self): self.assertIsNone(cluster.state) self.assertIsNone(cluster.serve_nodes) self.assertIsNone(cluster.default_storage_type) + self.assertIsNone(cluster.kms_key_name) def test_constructor_non_default(self): from google.cloud.bigtable.enums import StorageType @@ -107,6 +112,7 @@ def test_constructor_non_default(self): _state=STATE, serve_nodes=self.SERVE_NODES, default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=self.KMS_KEY_NAME, ) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) @@ -114,6 +120,7 @@ def test_constructor_non_default(self): self.assertEqual(cluster.state, STATE) self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD) + self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) def test_name_property(self): credentials = _make_credentials() @@ -125,6 +132,18 @@ def test_name_property(self): self.assertEqual(cluster.name, self.CLUSTER_NAME) + def test_kms_key_name_property(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._make_one( + self.CLUSTER_ID, instance, kms_key_name=self.KMS_KEY_NAME + ) + + self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) + with pytest.raises(AttributeError): + cluster.kms_key_name = "I'm read only" + def test_from_pb_success(self): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums @@ -141,6 +160,9 @@ def test_from_pb_success(self): state=state, serve_nodes=self.SERVE_NODES, default_storage_type=storage_type, + encryption_config=data_v2_pb2.Cluster.EncryptionConfig( + kms_key_name=self.KMS_KEY_NAME, + ), ) klass = self._get_target_class() @@ -152,6 +174,7 @@ def test_from_pb_success(self): self.assertEqual(cluster.state, state) self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) self.assertEqual(cluster.default_storage_type, storage_type) + self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) def test_from_pb_bad_cluster_name(self): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 @@ -392,6 +415,80 @@ def test_create(self): ].kwargs self.assertEqual(actual_request, expected_request) + def test_create_w_cmek(self): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = self.LOCATION_PATH + self.LOCATION_ID + instance = Instance(self.INSTANCE_ID, client) + cluster = self._make_one( + self.CLUSTER_ID, + instance, + location_id=self.LOCATION_ID, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=self.KMS_KEY_NAME, + ) + expected_request_cluster = instance_v2_pb2.Cluster( + location=LOCATION, + serve_nodes=cluster.serve_nodes, + default_storage_type=cluster.default_storage_type, + encryption_config=instance_v2_pb2.Cluster.EncryptionConfig( + kms_key_name=self.KMS_KEY_NAME, + ), + ) + expected_request = { + "request": { + "parent": instance.name, + "cluster_id": self.CLUSTER_ID, + "cluster": expected_request_cluster, + } + } + name = instance.name + metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + # Patch the stub used by the API method. + api = mock.create_autospec(BigtableInstanceAdminClient) + api.common_location_path.return_value = LOCATION + client._instance_admin_client = api + cluster._instance._client = client + cluster._instance._client.instance_admin_client.instance_path.return_value = ( + name + ) + client._instance_admin_client.create_cluster.return_value = response_pb + # Perform the method and check the result. + cluster.create() + + actual_request = client._instance_admin_client.create_cluster.call_args_list[ + 0 + ].kwargs + self.assertEqual(actual_request, expected_request) + def test_update(self): import datetime from google.longrunning import operations_pb2 diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index c52119192..227415718 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -534,6 +534,88 @@ def test_get_cluster_states(self): result = table.get_cluster_states() self.assertEqual(result, expected_result) + def test_get_encryption_info(self): + from google.rpc.code_pb2 import Code + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) + from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto + from google.cloud.bigtable.table import EncryptionInfo + + ENCRYPTION_TYPE_UNSPECIFIED = ( + enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED + ) + GOOGLE_DEFAULT_ENCRYPTION = enum_crypto.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + CUSTOMER_MANAGED_ENCRYPTION = ( + enum_crypto.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + ) + + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + credentials = _make_credentials() + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + response_pb = _TablePB( + cluster_states={ + "cluster-id1": _ClusterStateEncryptionInfoPB( + encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, + encryption_status=_StatusPB( + Code.OK, "beats me" + ), # , "I", "dunno"), + ), + "cluster-id2": _ClusterStateEncryptionInfoPB( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + ), + "cluster-id3": _ClusterStateEncryptionInfoPB( + encryption_type=CUSTOMER_MANAGED_ENCRYPTION, + encryption_status=_StatusPB( + Code.UNKNOWN, "Key version is not yet known." + ), + kms_key_version="shrug", + ), + } + ) + + # Patch the stub used by the API method. + client._table_admin_client = table_api + bigtable_table_stub = client._table_admin_client + + bigtable_table_stub.get_table.side_effect = [response_pb] + + # build expected result + expected_result = { + "cluster-id1": ( + EncryptionInfo( + encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, + encryption_status=_StatusPB(Code.OK, "beats me", "I", "dunno"), + kms_key_version="", + ), + ), + "cluster-id2": ( + EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=_StatusPB(0, ""), + kms_key_version="", + ), + ), + "cluster-id3": ( + EncryptionInfo( + encryption_type=CUSTOMER_MANAGED_ENCRYPTION, + encryption_status=_StatusPB( + Code.UNKNOWN, "Key version is not yet known." + ), + kms_key_version="shrug", + ), + ), + } + + # Perform the method and check the result. + result = table.get_encryption_info() + self.assertEqual(result, expected_result) + def _read_row_helper(self, chunks, expected_result, app_profile_id=None): from google.cloud._testing import _Monkey @@ -2257,5 +2339,32 @@ def _ClusterStatePB(replication_state): return table_v2_pb2.Table.ClusterState(replication_state=replication_state) +def _ClusterStateEncryptionInfoPB( + encryption_type, encryption_status=None, kms_key_version=None +): + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 + + return table_v2_pb2.Table.ClusterState( + encryption_info=( + table_v2_pb2.EncryptionInfo( + encryption_type=encryption_type, + encryption_status=encryption_status, + kms_key_version=kms_key_version, + ), + ) + ) + + +def _StatusPB(code, message, *details): + from google.rpc import status_pb2 + + status_pb = status_pb2.Status() + status_pb.code = code + status_pb.message = message + # status_pb.details = details ??? + + return status_pb + + def _read_rows_retry_exception(exc): return isinstance(exc, DeadlineExceeded) From 1f3bc49d5a18469d6a396c3ae4c843451d26cd2b Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Thu, 18 Mar 2021 12:28:07 -0400 Subject: [PATCH 02/14] Wrap Status. --- google/cloud/bigtable/table.py | 61 ++++++++++++++++++++++++++++++++-- tests/unit/test_table.py | 16 ++++----- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index 068a25213..90297bad6 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -1230,12 +1230,16 @@ def __ne__(self, other): class EncryptionInfo: - """Representation of Encryption Info + """Encryption information for a given resource. + + If this resource is protected with customer managed encryption, the in-use Google + Cloud Key Management Service (KMS) key versions will be specified along with their + status. :type encryption_type: int :param encryption_type: See :class:`enums.EncryptionInfo.EncryptionType` - :type encryption_status: google.rpc.status_pb2.Status + :type encryption_status: google.cloud.bigtable.table.Status :param encryption_status: The encryption status. :type kms_key_version: str @@ -1245,7 +1249,9 @@ class EncryptionInfo: @classmethod def _from_pb(cls, info_pb): return cls( - info_pb.encryption_type, info_pb.encryption_status, info_pb.kms_key_version + info_pb.encryption_type, + Status(info_pb.encryption_status), + info_pb.kms_key_version, ) def __init__(self, encryption_type, encryption_status, kms_key_version): @@ -1270,6 +1276,55 @@ def __ne__(self, other): return not self == other +class Status: + """A status, comprising a code and a message. + + See: `Cloud APIs Errors `_ + + This is a thin wrapper for ``google.rpc.status_pb2.Status``. + + :type status_pb: google.rpc.status_pb2.Status + :param status_pb: The status protocol buffer. + """ + + def __init__(self, status_pb): + self.status_pb = status_pb + + @property + def code(self): + """The status code. + + Values are defined in ``google.rpc.code_pb2.Code``. + + See: `googe.rpc.Code + `_ + + :rtype: int + :returns: The status code. + """ + return self.status_pb.code + + @property + def message(self): + """A human readable status message. + + :rypte: str + :returns: The status message. + """ + return self.status_pb.message + + def __repr__(self): + return repr(self.status_pb) + + def __eq__(self, other): + if isinstance(other, type(self)): + return self.status_pb == other.status_pb + return NotImplemented + + def __ne__(self, other): + return not self == other + + def _create_row_request( table_name, start_key=None, diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index 227415718..ce21cf5e9 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -540,7 +540,7 @@ def test_get_encryption_info(self): client as bigtable_table_admin, ) from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto - from google.cloud.bigtable.table import EncryptionInfo + from google.cloud.bigtable.table import EncryptionInfo, Status ENCRYPTION_TYPE_UNSPECIFIED = ( enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED @@ -562,9 +562,7 @@ def test_get_encryption_info(self): cluster_states={ "cluster-id1": _ClusterStateEncryptionInfoPB( encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, - encryption_status=_StatusPB( - Code.OK, "beats me" - ), # , "I", "dunno"), + encryption_status=_StatusPB(Code.OK, "beats me"), ), "cluster-id2": _ClusterStateEncryptionInfoPB( encryption_type=GOOGLE_DEFAULT_ENCRYPTION, @@ -590,22 +588,24 @@ def test_get_encryption_info(self): "cluster-id1": ( EncryptionInfo( encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, - encryption_status=_StatusPB(Code.OK, "beats me", "I", "dunno"), + encryption_status=Status( + _StatusPB(Code.OK, "beats me", "I", "dunno") + ), kms_key_version="", ), ), "cluster-id2": ( EncryptionInfo( encryption_type=GOOGLE_DEFAULT_ENCRYPTION, - encryption_status=_StatusPB(0, ""), + encryption_status=Status(_StatusPB(0, "")), kms_key_version="", ), ), "cluster-id3": ( EncryptionInfo( encryption_type=CUSTOMER_MANAGED_ENCRYPTION, - encryption_status=_StatusPB( - Code.UNKNOWN, "Key version is not yet known." + encryption_status=Status( + _StatusPB(Code.UNKNOWN, "Key version is not yet known.") ), kms_key_version="shrug", ), From ea5dde1776412534fdba5b54c54adef4b7cb9696 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Thu, 18 Mar 2021 14:53:53 -0400 Subject: [PATCH 03/14] Wrapper for Status, reorganize to avoid circular imports. --- google/cloud/bigtable/backup.py | 21 +++++- google/cloud/bigtable/encryption.py | 64 +++++++++++++++++++ google/cloud/bigtable/error.py | 64 +++++++++++++++++++ google/cloud/bigtable/table.py | 99 +---------------------------- tests/system/test_cmek.py | 7 +- tests/system/test_system.py | 5 ++ tests/unit/test_backup.py | 29 +++++++++ tests/unit/test_table.py | 6 +- 8 files changed, 193 insertions(+), 102 deletions(-) create mode 100644 google/cloud/bigtable/encryption.py create mode 100644 google/cloud/bigtable/error.py diff --git a/google/cloud/bigtable/backup.py b/google/cloud/bigtable/backup.py index 6dead1f74..1c9dc5f02 100644 --- a/google/cloud/bigtable/backup.py +++ b/google/cloud/bigtable/backup.py @@ -19,6 +19,7 @@ from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable.encryption import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound from google.protobuf import field_mask_pb2 @@ -67,13 +68,20 @@ class Backup(object): """ def __init__( - self, backup_id, instance, cluster_id=None, table_id=None, expire_time=None + self, + backup_id, + instance, + cluster_id=None, + table_id=None, + expire_time=None, + encryption_info=None, ): self.backup_id = backup_id self._instance = instance self._cluster = cluster_id self.table_id = table_id self._expire_time = expire_time + self._encryption_info = encryption_info self._parent = None self._source_table = None @@ -176,6 +184,15 @@ def expire_time(self): def expire_time(self, new_expire_time): self._expire_time = new_expire_time + @property + def encryption_info(self): + """Encryption info for this Backup. + + :rtype: :class:`google.cloud.bigtable.encryption.EncryptionInfo` + :returns: The encryption information for this backup. + """ + return self._encryption_info + @property def start_time(self): """The time this Backup was started. @@ -255,6 +272,7 @@ def from_pb(cls, backup_pb, instance): table_id = match.group("table_id") if match else None expire_time = backup_pb._pb.expire_time + encryption_info = EncryptionInfo._from_pb(backup_pb.encryption_info) backup = cls( backup_id, @@ -262,6 +280,7 @@ def from_pb(cls, backup_pb, instance): cluster_id=cluster_id, table_id=table_id, expire_time=expire_time, + encryption_info=encryption_info, ) backup._start_time = backup_pb._pb.start_time backup._end_time = backup_pb._pb.end_time diff --git a/google/cloud/bigtable/encryption.py b/google/cloud/bigtable/encryption.py new file mode 100644 index 000000000..1757297bc --- /dev/null +++ b/google/cloud/bigtable/encryption.py @@ -0,0 +1,64 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Class for encryption info for tables and backups.""" + +from google.cloud.bigtable.error import Status + + +class EncryptionInfo: + """Encryption information for a given resource. + + If this resource is protected with customer managed encryption, the in-use Google + Cloud Key Management Service (KMS) key versions will be specified along with their + status. + + :type encryption_type: int + :param encryption_type: See :class:`enums.EncryptionInfo.EncryptionType` + + :type encryption_status: google.cloud.bigtable.encryption.Status + :param encryption_status: The encryption status. + + :type kms_key_version: str + :param kms_key_version: The key version used for encryption. + """ + + @classmethod + def _from_pb(cls, info_pb): + return cls( + info_pb.encryption_type, + Status(info_pb.encryption_status), + info_pb.kms_key_version, + ) + + def __init__(self, encryption_type, encryption_status, kms_key_version): + self.encryption_type = encryption_type + self.encryption_status = encryption_status + self.kms_key_version = kms_key_version + + def __eq__(self, other): + if self is other: + return True + + if not isinstance(other, type(self)): + return NotImplemented + + return ( + self.encryption_type == other.encryption_type + and self.encryption_status == other.encryption_status + and self.kms_key_version == other.kms_key_version + ) + + def __ne__(self, other): + return not self == other diff --git a/google/cloud/bigtable/error.py b/google/cloud/bigtable/error.py new file mode 100644 index 000000000..6c9a0dc4d --- /dev/null +++ b/google/cloud/bigtable/error.py @@ -0,0 +1,64 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Class for error status.""" + + +class Status: + """A status, comprising a code and a message. + + See: `Cloud APIs Errors `_ + + This is a thin wrapper for ``google.rpc.status_pb2.Status``. + + :type status_pb: google.rpc.status_pb2.Status + :param status_pb: The status protocol buffer. + """ + + def __init__(self, status_pb): + self.status_pb = status_pb + + @property + def code(self): + """The status code. + + Values are defined in ``google.rpc.code_pb2.Code``. + + See: `googe.rpc.Code + `_ + + :rtype: int + :returns: The status code. + """ + return self.status_pb.code + + @property + def message(self): + """A human readable status message. + + :rypte: str + :returns: The status message. + """ + return self.status_pb.message + + def __repr__(self): + return repr(self.status_pb) + + def __eq__(self, other): + if isinstance(other, type(self)): + return self.status_pb == other.status_pb + return NotImplemented + + def __ne__(self, other): + return not self == other diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index 90297bad6..fd4896a1e 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -28,6 +28,7 @@ from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES +from google.cloud.bigtable.encryption import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow from google.cloud.bigtable.row import ConditionalRow @@ -493,7 +494,7 @@ def get_encryption_info(self): :rtype: dict :returns: Dictionary of encryption info for this table. Keys are cluster ids and - values are tuples of :class:`EncryptionInfo` instances. + values are tuples of :class:`google.cloud.bigtable.encryption.EncryptionInfo` instances. """ ENCRYPTION_VIEW = enums.Table.View.ENCRYPTION_VIEW table_client = self._instance._client.table_admin_client @@ -1229,102 +1230,6 @@ def __ne__(self, other): return not self == other -class EncryptionInfo: - """Encryption information for a given resource. - - If this resource is protected with customer managed encryption, the in-use Google - Cloud Key Management Service (KMS) key versions will be specified along with their - status. - - :type encryption_type: int - :param encryption_type: See :class:`enums.EncryptionInfo.EncryptionType` - - :type encryption_status: google.cloud.bigtable.table.Status - :param encryption_status: The encryption status. - - :type kms_key_version: str - :param kms_key_version: The key version used for encryption. - """ - - @classmethod - def _from_pb(cls, info_pb): - return cls( - info_pb.encryption_type, - Status(info_pb.encryption_status), - info_pb.kms_key_version, - ) - - def __init__(self, encryption_type, encryption_status, kms_key_version): - self.encryption_type = encryption_type - self.encryption_status = encryption_status - self.kms_key_version = kms_key_version - - def __eq__(self, other): - if self is other: - return True - - if not isinstance(other, type(self)): - return NotImplemented - - return ( - self.encryption_type == other.encryption_type - and self.encryption_status == other.encryption_status - and self.kms_key_version == other.kms_key_version - ) - - def __ne__(self, other): - return not self == other - - -class Status: - """A status, comprising a code and a message. - - See: `Cloud APIs Errors `_ - - This is a thin wrapper for ``google.rpc.status_pb2.Status``. - - :type status_pb: google.rpc.status_pb2.Status - :param status_pb: The status protocol buffer. - """ - - def __init__(self, status_pb): - self.status_pb = status_pb - - @property - def code(self): - """The status code. - - Values are defined in ``google.rpc.code_pb2.Code``. - - See: `googe.rpc.Code - `_ - - :rtype: int - :returns: The status code. - """ - return self.status_pb.code - - @property - def message(self): - """A human readable status message. - - :rypte: str - :returns: The status message. - """ - return self.status_pb.message - - def __repr__(self): - return repr(self.status_pb) - - def __eq__(self, other): - if isinstance(other, type(self)): - return self.status_pb == other.status_pb - return NotImplemented - - def __ne__(self, other): - return not self == other - - def _create_row_request( table_name, start_key=None, diff --git a/tests/system/test_cmek.py b/tests/system/test_cmek.py index d15b242da..7b4160212 100644 --- a/tests/system/test_cmek.py +++ b/tests/system/test_cmek.py @@ -138,7 +138,7 @@ def setUpModule(): INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS ) Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( - CLUSTER_ID_DATA, location_id=LOCATION_ID + CLUSTER_ID_DATA, location_id=LOCATION_ID, kms_key_name=Config.KMS_KEY_NAME() ) if not Config.IN_EMULATOR: @@ -865,6 +865,7 @@ def test_delete_column_family(self): def test_backup(self): from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable import enums temp_table_id = "test-backup-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) @@ -901,6 +902,10 @@ def test_backup(self): self.assertEqual(temp_backup_id, temp_table_backup.backup_id) self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) self.assertEqual(expire, temp_table_backup.expire_time.seconds) + self.assertEqual( + temp_table_backup.encryption_info.encryption_type, + enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) # Testing `Backup.update_expire_time()` method expire += 3600 # A one-hour change in the `expire_time` parameter diff --git a/tests/system/test_system.py b/tests/system/test_system.py index 511061177..ee792cbaf 100644 --- a/tests/system/test_system.py +++ b/tests/system/test_system.py @@ -852,6 +852,7 @@ def test_delete_column_family(self): def test_backup(self): from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable import enums temp_table_id = "test-backup-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) @@ -888,6 +889,10 @@ def test_backup(self): self.assertEqual(temp_backup_id, temp_table_backup.backup_id) self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) self.assertEqual(expire, temp_table_backup.expire_time.seconds) + self.assertEqual( + temp_table_backup.encryption_info.encryption_type, + enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + ) # Testing `Backup.update_expire_time()` method expire += 3600 # A one-hour change in the `expire_time` parameter diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py index 02efef492..2b81c3dfc 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/test_backup.py @@ -66,6 +66,7 @@ def test_constructor_defaults(self): self.assertIsNone(backup._end_time) self.assertIsNone(backup._size_bytes) self.assertIsNone(backup._state) + self.assertIsNone(backup._encryption_info) def test_constructor_non_defaults(self): instance = _Instance(self.INSTANCE_NAME) @@ -77,6 +78,7 @@ def test_constructor_non_defaults(self): cluster_id=self.CLUSTER_ID, table_id=self.TABLE_ID, expire_time=expire_time, + encryption_info="encryption_info", ) self.assertEqual(backup.backup_id, self.BACKUP_ID) @@ -84,6 +86,7 @@ def test_constructor_non_defaults(self): self.assertIs(backup._cluster, self.CLUSTER_ID) self.assertEqual(backup.table_id, self.TABLE_ID) self.assertEqual(backup._expire_time, expire_time) + self.assertEqual(backup._encryption_info, "encryption_info") self.assertIsNone(backup._parent) self.assertIsNone(backup._source_table) @@ -128,14 +131,20 @@ def test_from_pb_bad_name(self): klasse.from_pb(backup_pb, instance) def test_from_pb_success(self): + from google.cloud.bigtable.encryption import EncryptionInfo + from google.cloud.bigtable.error import Status from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp + from google.rpc.code_pb2 import Code client = _Client() instance = _Instance(self.INSTANCE_NAME, client) timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) size_bytes = 1234 state = table.Backup.State.READY + GOOGLE_DEFAULT_ENCRYPTION = ( + table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, @@ -144,6 +153,11 @@ def test_from_pb_success(self): end_time=timestamp, size_bytes=size_bytes, state=state, + encryption_info=table.EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=_StatusPB(Code.OK, "Looks good over here."), + kms_key_version="I dunno, like, 2?", + ), ) klasse = self._get_target_class() @@ -159,6 +173,11 @@ def test_from_pb_success(self): self.assertEqual(backup.end_time, timestamp) self.assertEqual(backup._size_bytes, size_bytes) self.assertEqual(backup._state, state) + self.assertEqual(backup.encryption_info, EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=Status(_StatusPB(Code.OK, "Looks good over here.")), + kms_key_version="I dunno, like, 2?", + )) def test_property_name(self): from google.cloud.bigtable.client import Client @@ -862,3 +881,13 @@ def __init__(self, name, client=None): self.name = name self.instance_id = name.rsplit("/", 1)[1] self._client = client + + +def _StatusPB(code, message): + from google.rpc import status_pb2 + + status_pb = status_pb2.Status() + status_pb.code = code + status_pb.message = message + + return status_pb diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index ce21cf5e9..f70b525d9 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -539,8 +539,9 @@ def test_get_encryption_info(self): from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( client as bigtable_table_admin, ) + from google.cloud.bigtable.encryption import EncryptionInfo from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto - from google.cloud.bigtable.table import EncryptionInfo, Status + from google.cloud.bigtable.error import Status ENCRYPTION_TYPE_UNSPECIFIED = ( enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED @@ -2355,13 +2356,12 @@ def _ClusterStateEncryptionInfoPB( ) -def _StatusPB(code, message, *details): +def _StatusPB(code, message): from google.rpc import status_pb2 status_pb = status_pb2.Status() status_pb.code = code status_pb.message = message - # status_pb.details = details ??? return status_pb From 47b84b629ff5f87b31f793b8f2450e60ef1c4f7a Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Fri, 19 Mar 2021 11:25:30 -0400 Subject: [PATCH 04/14] Blacken. --- tests/unit/test_backup.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py index 2b81c3dfc..9512eb759 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/test_backup.py @@ -173,11 +173,14 @@ def test_from_pb_success(self): self.assertEqual(backup.end_time, timestamp) self.assertEqual(backup._size_bytes, size_bytes) self.assertEqual(backup._state, state) - self.assertEqual(backup.encryption_info, EncryptionInfo( - encryption_type=GOOGLE_DEFAULT_ENCRYPTION, - encryption_status=Status(_StatusPB(Code.OK, "Looks good over here.")), - kms_key_version="I dunno, like, 2?", - )) + self.assertEqual( + backup.encryption_info, + EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=Status(_StatusPB(Code.OK, "Looks good over here.")), + kms_key_version="I dunno, like, 2?", + ), + ) def test_property_name(self): from google.cloud.bigtable.client import Client From 20c8ba7b45286b501db4ebc7ad71b3ae88360d93 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Fri, 19 Mar 2021 11:26:04 -0400 Subject: [PATCH 05/14] Make system tests in charge of their own key. --- tests/system/test_cmek.py | 69 +++++++++++++++++++++++++++++++-------- 1 file changed, 56 insertions(+), 13 deletions(-) diff --git a/tests/system/test_cmek.py b/tests/system/test_cmek.py index 7b4160212..1a0913592 100644 --- a/tests/system/test_cmek.py +++ b/tests/system/test_cmek.py @@ -48,7 +48,10 @@ # ) UNIQUE_SUFFIX = unique_resource_id("-") -LOCATION_ID = "us-central1-c" +KMS_LOCATION_ID = "us-central1" +KMS_KEY_ID = "bigtable-system-tests-key" +KMS_KEY_RING_ID = f"{KMS_KEY_ID}-ring" +LOCATION_ID = f"{KMS_LOCATION_ID}-c" INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX TABLE_ID = "google-cloud-python-test-table" @@ -88,15 +91,11 @@ class Config(object): INSTANCE_DATA = None CLUSTER = None CLUSTER_DATA = None + KMS_CLIENT = None + KMS_KEY_RING = None + KMS_KEY = None IN_EMULATOR = False - @classmethod - def KMS_KEY_NAME(cls): - return ( - f"projects/{cls.CLIENT.project}/locations/us-central1/" # {LOCATION_ID}/" - "keyRings/test-key-ring/cryptoKeys/test-key" - ) - def _retry_on_unavailable(exc): """Retry only errors whose status code is 'UNAVAILABLE'.""" @@ -109,6 +108,7 @@ def _retry_on_unavailable(exc): def setUpModule(): + from google.cloud import kms from google.cloud.exceptions import GrpcRendezvous from google.cloud.bigtable.enums import Instance @@ -127,18 +127,61 @@ def setUpModule(): else: Config.CLIENT = Client(admin=True) + Config.KMS_CLIENT = kms.KeyManagementServiceClient() + Config.KMS_LOCATION_NAME = ( + f"projects/{Config.CLIENT.project}/locations/{KMS_LOCATION_ID}" + ) + + # There doesn't seem to be a way in the KMS API to destroy key rings or keys (only + # key versions), so since we can't really clean up after ourselves, we'll try to + # just create one key and reuse it for future test runs. + key_ring_name = f"{Config.KMS_LOCATION_NAME}/keyRings/{KMS_KEY_RING_ID}" + key_rings = Config.KMS_CLIENT.list_key_rings(parent=Config.KMS_LOCATION_NAME) + for key_ring in key_rings: + if key_ring.name == key_ring_name: + Config.KMS_KEY_RING = key_ring + break + else: + Config.KMS_KEY_RING = Config.KMS_CLIENT.create_key_ring( + request={ + "parent": Config.KMS_LOCATION_NAME, + "key_ring_id": KMS_KEY_RING_ID, + "key_ring": {}, + } + ) + + key_name = f"{key_ring_name}/cryptoKeys/{KMS_KEY_ID}" + keys = Config.KMS_CLIENT.list_crypto_keys(parent=Config.KMS_KEY_RING.name) + for key in keys: + if key.name == key_name: + Config.KMS_KEY = key + break + else: + Config.KMS_KEY = Config.KMS_CLIENT.create_crypto_key( + request={ + "parent": Config.KMS_KEY_RING.name, + "crypto_key_id": KMS_KEY_ID, + "crypto_key": { + "purpose": kms.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT, + "version_template": { + "algorithm": kms.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION, + }, + }, + } + ) + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) Config.CLUSTER = Config.INSTANCE.cluster( CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, - kms_key_name=Config.KMS_KEY_NAME(), + kms_key_name=Config.KMS_KEY.name, ) Config.INSTANCE_DATA = Config.CLIENT.instance( INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS ) Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( - CLUSTER_ID_DATA, location_id=LOCATION_ID, kms_key_name=Config.KMS_KEY_NAME() + CLUSTER_ID_DATA, location_id=LOCATION_ID, kms_key_name=Config.KMS_KEY.name ) if not Config.IN_EMULATOR: @@ -287,14 +330,14 @@ def test_create_instance_w_two_clusters(self): location_id=LOCATION_ID, serve_nodes=serve_nodes, default_storage_type=STORAGE_TYPE, - kms_key_name=Config.KMS_KEY_NAME(), + kms_key_name=Config.KMS_KEY.name, ) cluster_2 = instance.cluster( ALT_CLUSTER_ID_2, location_id=LOCATION_ID_2, serve_nodes=serve_nodes, default_storage_type=STORAGE_TYPE, - kms_key_name=Config.KMS_KEY_NAME(), + kms_key_name=Config.KMS_KEY.name, ) operation = instance.create(clusters=[cluster_1, cluster_2]) @@ -567,7 +610,7 @@ def test_create_cluster(self): location_id=ALT_LOCATION_ID, serve_nodes=ALT_SERVE_NODES, default_storage_type=(StorageType.SSD), - kms_key_name=Config.KMS_KEY_NAME(), + kms_key_name=Config.KMS_KEY.name, ) operation = cluster_2.create() From df7bdc56855585fa583c05e89b7e309fa1780454 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Mon, 22 Mar 2021 11:35:09 -0400 Subject: [PATCH 06/14] Consolidate system tests. Get KMS_KEY_NAME from user's environment. --- tests/system/__init__.py | 0 tests/system/test_cmek.py | 1328 ----------------------------- tests/{system => }/test_system.py | 225 ++++- 3 files changed, 216 insertions(+), 1337 deletions(-) delete mode 100644 tests/system/__init__.py delete mode 100644 tests/system/test_cmek.py rename tests/{system => }/test_system.py (85%) diff --git a/tests/system/__init__.py b/tests/system/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/system/test_cmek.py b/tests/system/test_cmek.py deleted file mode 100644 index 1a0913592..000000000 --- a/tests/system/test_cmek.py +++ /dev/null @@ -1,1328 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import operator -import os -import time -import unittest - -from google.api_core.datetime_helpers import DatetimeWithNanoseconds -from google.api_core.exceptions import DeadlineExceeded -from google.api_core.exceptions import TooManyRequests -from google.cloud.environment_vars import BIGTABLE_EMULATOR -from test_utils.retry import RetryErrors -from test_utils.retry import RetryResult -from test_utils.system import EmulatorCreds -from test_utils.system import unique_resource_id - -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud._helpers import _microseconds_from_datetime -from google.cloud._helpers import UTC -from google.cloud.bigtable.client import Client -from google.cloud.bigtable.column_family import MaxVersionsGCRule -from google.cloud.bigtable.policy import Policy -from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE -from google.cloud.bigtable.row_filters import ApplyLabelFilter -from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter -from google.cloud.bigtable.row_filters import RowFilterChain -from google.cloud.bigtable.row_filters import RowFilterUnion -from google.cloud.bigtable.row_data import Cell -from google.cloud.bigtable.row_data import PartialRowData -from google.cloud.bigtable.row_set import RowSet -from google.cloud.bigtable.row_set import RowRange - -# from google.cloud.bigtable_admin_v2.gapic import ( -# bigtable_table_admin_client_config as table_admin_config, -# ) - -UNIQUE_SUFFIX = unique_resource_id("-") -KMS_LOCATION_ID = "us-central1" -KMS_KEY_ID = "bigtable-system-tests-key" -KMS_KEY_RING_ID = f"{KMS_KEY_ID}-ring" -LOCATION_ID = f"{KMS_LOCATION_ID}-c" -INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX -INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX -TABLE_ID = "google-cloud-python-test-table" -CLUSTER_ID = INSTANCE_ID + "-cluster" -CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" -SERVE_NODES = 3 -COLUMN_FAMILY_ID1 = "col-fam-id1" -COLUMN_FAMILY_ID2 = "col-fam-id2" -COL_NAME1 = b"col-name1" -COL_NAME2 = b"col-name2" -COL_NAME3 = b"col-name3-but-other-fam" -CELL_VAL1 = b"cell-val" -CELL_VAL2 = b"cell-val-newer" -CELL_VAL3 = b"altcol-cell-val" -CELL_VAL4 = b"foo" -ROW_KEY = b"row-key" -ROW_KEY_ALT = b"row-key-alt" -EXISTING_INSTANCES = [] -LABEL_KEY = "python-system" -label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") -) -LABELS = {LABEL_KEY: str(label_stamp)} - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - INSTANCE = None - INSTANCE_DATA = None - CLUSTER = None - CLUSTER_DATA = None - KMS_CLIENT = None - KMS_KEY_RING = None - KMS_KEY = None - IN_EMULATOR = False - - -def _retry_on_unavailable(exc): - """Retry only errors whose status code is 'UNAVAILABLE'.""" - from grpc import StatusCode - - return exc.code() == StatusCode.UNAVAILABLE - - -retry_429 = RetryErrors(TooManyRequests, max_tries=9) - - -def setUpModule(): - from google.cloud import kms - from google.cloud.exceptions import GrpcRendezvous - from google.cloud.bigtable.enums import Instance - - # See: https://github.com/googleapis/google-cloud-python/issues/5928 - # interfaces = table_admin_config.config["interfaces"] - # iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] - # methods = iface_config["methods"] - # create_table = methods["CreateTable"] - # create_table["timeout_millis"] = 90000 - - Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None - - if Config.IN_EMULATOR: - credentials = EmulatorCreds() - Config.CLIENT = Client(admin=True, credentials=credentials) - else: - Config.CLIENT = Client(admin=True) - - Config.KMS_CLIENT = kms.KeyManagementServiceClient() - Config.KMS_LOCATION_NAME = ( - f"projects/{Config.CLIENT.project}/locations/{KMS_LOCATION_ID}" - ) - - # There doesn't seem to be a way in the KMS API to destroy key rings or keys (only - # key versions), so since we can't really clean up after ourselves, we'll try to - # just create one key and reuse it for future test runs. - key_ring_name = f"{Config.KMS_LOCATION_NAME}/keyRings/{KMS_KEY_RING_ID}" - key_rings = Config.KMS_CLIENT.list_key_rings(parent=Config.KMS_LOCATION_NAME) - for key_ring in key_rings: - if key_ring.name == key_ring_name: - Config.KMS_KEY_RING = key_ring - break - else: - Config.KMS_KEY_RING = Config.KMS_CLIENT.create_key_ring( - request={ - "parent": Config.KMS_LOCATION_NAME, - "key_ring_id": KMS_KEY_RING_ID, - "key_ring": {}, - } - ) - - key_name = f"{key_ring_name}/cryptoKeys/{KMS_KEY_ID}" - keys = Config.KMS_CLIENT.list_crypto_keys(parent=Config.KMS_KEY_RING.name) - for key in keys: - if key.name == key_name: - Config.KMS_KEY = key - break - else: - Config.KMS_KEY = Config.KMS_CLIENT.create_crypto_key( - request={ - "parent": Config.KMS_KEY_RING.name, - "crypto_key_id": KMS_KEY_ID, - "crypto_key": { - "purpose": kms.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT, - "version_template": { - "algorithm": kms.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION, - }, - }, - } - ) - - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) - Config.CLUSTER = Config.INSTANCE.cluster( - CLUSTER_ID, - location_id=LOCATION_ID, - serve_nodes=SERVE_NODES, - kms_key_name=Config.KMS_KEY.name, - ) - Config.INSTANCE_DATA = Config.CLIENT.instance( - INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS - ) - Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( - CLUSTER_ID_DATA, location_id=LOCATION_ID, kms_key_name=Config.KMS_KEY.name - ) - - if not Config.IN_EMULATOR: - retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) - instances, failed_locations = retry(Config.CLIENT.list_instances)() - - if len(failed_locations) != 0: - raise ValueError("List instances failed in module set up.") - - EXISTING_INSTANCES[:] = instances - - # After listing, create the test instances. - admin_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) - admin_op.result(timeout=10) - data_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) - data_op.result(timeout=10) - - -def tearDownModule(): - if not Config.IN_EMULATOR: - retry_429(Config.INSTANCE.delete)() - retry_429(Config.INSTANCE_DATA.delete)() - - -class TestInstanceAdminAPI(unittest.TestCase): - def setUp(self): - if Config.IN_EMULATOR: - self.skipTest("Instance Admin API not supported in emulator") - self.instances_to_delete = [] - - def tearDown(self): - for instance in self.instances_to_delete: - retry_429(instance.delete)() - - def test_list_instances(self): - instances, failed_locations = Config.CLIENT.list_instances() - - self.assertEqual(failed_locations, []) - - found = set([instance.name for instance in instances]) - self.assertTrue(Config.INSTANCE.name in found) - - def test_reload(self): - from google.cloud.bigtable import enums - - # Use same arguments as Config.INSTANCE (created in `setUpModule`) - # so we can use reload() on a fresh instance. - alt_instance = Config.CLIENT.instance(INSTANCE_ID) - # Make sure metadata unset before reloading. - alt_instance.display_name = None - - alt_instance.reload() - self.assertEqual(alt_instance.display_name, Config.INSTANCE.display_name) - self.assertEqual(alt_instance.labels, Config.INSTANCE.labels) - self.assertEqual(alt_instance.type_, enums.Instance.Type.PRODUCTION) - - def test_create_instance_defaults(self): - from google.cloud.bigtable import enums - - ALT_INSTANCE_ID = "ndef" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) - ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" - serve_nodes = 1 - cluster = instance.cluster( - ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=serve_nodes - ) - operation = instance.create(clusters=[cluster]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - # Make sure that by default a PRODUCTION type instance is created - self.assertIsNone(instance.type_) - self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION) - - def test_create_instance(self): - from google.cloud.bigtable import enums - - _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT - - ALT_INSTANCE_ID = "new" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS - ) - ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" - cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID) - operation = instance.create(clusters=[cluster]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - self.assertEqual(instance_alt.labels, LABELS) - self.assertEqual(instance_alt.state, enums.Instance.State.READY) - - def test_cluster_exists(self): - NONEXISTING_CLUSTER_ID = "cluster-id" - - cluster = Config.INSTANCE.cluster(CLUSTER_ID) - alt_cluster = Config.INSTANCE.cluster(NONEXISTING_CLUSTER_ID) - self.assertTrue(cluster.exists()) - self.assertFalse(alt_cluster.exists()) - - def test_instance_exists(self): - NONEXISTING_INSTANCE_ID = "instancer-id" - - alt_instance = Config.CLIENT.instance(NONEXISTING_INSTANCE_ID) - self.assertTrue(Config.INSTANCE.exists()) - self.assertFalse(alt_instance.exists()) - - def test_create_instance_w_two_clusters(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable.table import ClusterState - - _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "dif" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS - ) - - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" - LOCATION_ID_2 = "us-central1-f" - STORAGE_TYPE = enums.StorageType.HDD - serve_nodes = 1 - cluster_1 = instance.cluster( - ALT_CLUSTER_ID_1, - location_id=LOCATION_ID, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - kms_key_name=Config.KMS_KEY.name, - ) - cluster_2 = instance.cluster( - ALT_CLUSTER_ID_2, - location_id=LOCATION_ID_2, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - kms_key_name=Config.KMS_KEY.name, - ) - operation = instance.create(clusters=[cluster_1, cluster_2]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=120) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - - clusters, failed_locations = instance_alt.list_clusters() - self.assertEqual(failed_locations, []) - - clusters.sort(key=lambda x: x.name) - alt_cluster_1, alt_cluster_2 = clusters - - self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) - self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) - self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) - self.assertEqual( - cluster_1.default_storage_type, alt_cluster_1.default_storage_type - ) - self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) - self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster_2.default_storage_type - ) - - # Test list clusters in project via 'client.list_clusters' - clusters, failed_locations = Config.CLIENT.list_clusters() - self.assertFalse(failed_locations) - found = set([cluster.name for cluster in clusters]) - self.assertTrue( - {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( - found - ) - ) - - temp_table_id = "test-get-cluster-states" - temp_table = instance.table(temp_table_id) - temp_table.create() - - encryption_info = temp_table.get_encryption_info() - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, - enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, - enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - - result = temp_table.get_cluster_states() - ReplicationState = enums.Table.ReplicationState - expected_results = [ - ClusterState(ReplicationState.STATE_NOT_KNOWN), - ClusterState(ReplicationState.INITIALIZING), - ClusterState(ReplicationState.PLANNED_MAINTENANCE), - ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), - ClusterState(ReplicationState.READY), - ] - cluster_id_list = result.keys() - self.assertEqual(len(cluster_id_list), 2) - self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) - self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) - for clusterstate in result.values(): - self.assertIn(clusterstate, expected_results) - - # Test create app profile with multi_cluster_routing policy - app_profiles_to_delete = [] - description = "routing policy-multy" - app_profile_id_1 = "app_profile_id_1" - routing = enums.RoutingPolicyType.ANY - self._test_create_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - ignore_warnings=True, - ) - app_profiles_to_delete.append(app_profile_id_1) - - # Test list app profiles - self._test_list_app_profiles_helper(instance, [app_profile_id_1]) - - # Test modify app profile app_profile_id_1 - # routing policy to single cluster policy, - # cluster -> ALT_CLUSTER_ID_1, - # allow_transactional_writes -> disallowed - # modify description - description = "to routing policy-single" - routing = enums.RoutingPolicyType.SINGLE - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_1, - allow_transactional_writes=False, - ) - - # Test modify app profile app_profile_id_1 - # cluster -> ALT_CLUSTER_ID_2, - # allow_transactional_writes -> allowed - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ignore_warnings=True, - ) - - # Test create app profile with single cluster routing policy - description = "routing policy-single" - app_profile_id_2 = "app_profile_id_2" - routing = enums.RoutingPolicyType.SINGLE - self._test_create_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=False, - ) - app_profiles_to_delete.append(app_profile_id_2) - - # Test list app profiles - self._test_list_app_profiles_helper( - instance, [app_profile_id_1, app_profile_id_2] - ) - - # Test modify app profile app_profile_id_2 to - # allow transactional writes - # Note: no need to set ``ignore_warnings`` to True - # since we are not restrictings anything with this modification. - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ) - - # Test modify app profile app_profile_id_2 routing policy - # to multi_cluster_routing policy - # modify description - description = "to routing policy-multy" - routing = enums.RoutingPolicyType.ANY - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - allow_transactional_writes=False, - ignore_warnings=True, - ) - - # Test delete app profiles - for app_profile_id in app_profiles_to_delete: - self._test_delete_app_profile_helper(app_profile_id, instance) - - def test_update_display_name_and_labels(self): - OLD_DISPLAY_NAME = Config.INSTANCE.display_name - NEW_DISPLAY_NAME = "Foo Bar Baz" - n_label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") - ) - - NEW_LABELS = {LABEL_KEY: str(n_label_stamp)} - Config.INSTANCE.display_name = NEW_DISPLAY_NAME - Config.INSTANCE.labels = NEW_LABELS - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) - self.assertEqual(instance_alt.display_name, OLD_DISPLAY_NAME) - self.assertEqual(instance_alt.labels, LABELS) - instance_alt.reload() - self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) - self.assertEqual(instance_alt.labels, NEW_LABELS) - - # Make sure to put the instance back the way it was for the - # other test cases. - Config.INSTANCE.display_name = OLD_DISPLAY_NAME - Config.INSTANCE.labels = LABELS - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(timeout=10) - - def test_update_type(self): - from google.cloud.bigtable.enums import Instance - - _DEVELOPMENT = Instance.Type.DEVELOPMENT - _PRODUCTION = Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "ndif" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS - ) - operation = instance.create(location_id=LOCATION_ID) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Unset the display_name - instance.display_name = None - - instance.type_ = _PRODUCTION - operation = instance.update() - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - self.assertIsNone(instance_alt.type_) - instance_alt.reload() - self.assertEqual(instance_alt.type_, _PRODUCTION) - - def test_update_cluster(self): - NEW_SERVE_NODES = 4 - - Config.CLUSTER.serve_nodes = NEW_SERVE_NODES - - operation = Config.CLUSTER.update() - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new cluster instance and reload it. - alt_cluster = Config.INSTANCE.cluster(CLUSTER_ID) - alt_cluster.reload() - self.assertEqual(alt_cluster.serve_nodes, NEW_SERVE_NODES) - - # Make sure to put the cluster back the way it was for the - # other test cases. - Config.CLUSTER.serve_nodes = SERVE_NODES - operation = Config.CLUSTER.update() - operation.result(timeout=20) - - def test_create_cluster(self): - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - - ALT_CLUSTER_ID = INSTANCE_ID + "-c2" - ALT_LOCATION_ID = "us-central1-f" - ALT_SERVE_NODES = 2 - - cluster_2 = Config.INSTANCE.cluster( - ALT_CLUSTER_ID, - location_id=ALT_LOCATION_ID, - serve_nodes=ALT_SERVE_NODES, - default_storage_type=(StorageType.SSD), - kms_key_name=Config.KMS_KEY.name, - ) - operation = cluster_2.create() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new object instance, reload and make sure it is the same. - alt_cluster = Config.INSTANCE.cluster(ALT_CLUSTER_ID) - alt_cluster.reload() - - self.assertEqual(cluster_2, alt_cluster) - self.assertEqual(cluster_2.location_id, alt_cluster.location_id) - self.assertEqual(alt_cluster.state, Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster.default_storage_type - ) - - # Delete the newly created cluster and confirm - self.assertTrue(cluster_2.exists()) - cluster_2.delete() - self.assertFalse(cluster_2.exists()) - - def _test_create_app_profile_helper( - self, - app_profile_id, - instance, - routing_policy_type, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None, - ): - - app_profile = instance.app_profile( - app_profile_id=app_profile_id, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - self.assertEqual( - app_profile.allow_transactional_writes, allow_transactional_writes - ) - - app_profile = app_profile.create(ignore_warnings=ignore_warnings) - - # Load a different app_profile objec form the server and - # verrify that it is the same - alt_app_profile = instance.app_profile(app_profile_id) - alt_app_profile.reload() - - self.assertEqual(app_profile.app_profile_id, alt_app_profile.app_profile_id) - self.assertEqual(app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(app_profile.description, alt_app_profile.description) - self.assertFalse(app_profile.allow_transactional_writes) - self.assertFalse(alt_app_profile.allow_transactional_writes) - - def _test_list_app_profiles_helper(self, instance, app_profile_ids): - app_profiles = instance.list_app_profiles() - found = [app_prof.app_profile_id for app_prof in app_profiles] - for app_profile_id in app_profile_ids: - self.assertTrue(app_profile_id in found) - - def _test_modify_app_profile_helper( - self, - app_profile_id, - instance, - routing_policy_type, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None, - ): - app_profile = instance.app_profile( - app_profile_id=app_profile_id, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - - operation = app_profile.update(ignore_warnings) - operation.result(timeout=30) - - alt_app_profile = instance.app_profile(app_profile_id) - alt_app_profile.reload() - self.assertEqual(alt_app_profile.description, description) - self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(alt_app_profile.cluster_id, cluster_id) - self.assertEqual( - alt_app_profile.allow_transactional_writes, allow_transactional_writes - ) - - def _test_delete_app_profile_helper(self, app_profile_id, instance): - app_profile = instance.app_profile(app_profile_id) - self.assertTrue(app_profile.exists()) - app_profile.delete(ignore_warnings=True) - self.assertFalse(app_profile.exists()) - - -class TestTableAdminAPI(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls._table = Config.INSTANCE_DATA.table(TABLE_ID) - cls._table.create() - - @classmethod - def tearDownClass(cls): - cls._table.delete() - - def setUp(self): - self.tables_to_delete = [] - self.backups_to_delete = [] - - def tearDown(self): - for table in self.tables_to_delete: - table.delete() - for backup in self.backups_to_delete: - backup.delete() - - def _skip_if_emulated(self, message): - # NOTE: This method is necessary because ``Config.IN_EMULATOR`` - # is set at runtime rather than import time, which means we - # can't use the @unittest.skipIf decorator. - if Config.IN_EMULATOR: - self.skipTest(message) - - def test_list_tables(self): - # Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the - # table created in `setUpClass` here will be the only one. - tables = Config.INSTANCE_DATA.list_tables() - self.assertEqual(tables, [self._table]) - - def test_exists(self): - retry_until_true = RetryResult(lambda result: result) - retry_until_false = RetryResult(lambda result: not result) - temp_table_id = "test-table_existence" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - self.assertFalse(temp_table.exists()) - temp_table.create() - self.assertTrue(retry_until_true(temp_table.exists)()) - temp_table.delete() - self.assertFalse(retry_until_false(temp_table.exists)()) - - def test_create_table(self): - temp_table_id = "test-create-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - # First, create a sorted version of our expected result. - name_attr = operator.attrgetter("name") - expected_tables = sorted([temp_table, self._table], key=name_attr) - - # Then query for the tables in the instance and sort them by - # name as well. - tables = Config.INSTANCE_DATA.list_tables() - sorted_tables = sorted(tables, key=name_attr) - self.assertEqual(sorted_tables, expected_tables) - - def test_test_iam_permissions(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-test-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] - permissions_allowed = temp_table.test_iam_permissions(permissions) - self.assertEqual(permissions, permissions_allowed) - - def test_get_iam_policy(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-get-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - policy = temp_table.get_iam_policy().to_api_repr() - self.assertEqual(policy["etag"], "ACAB") - self.assertEqual(policy["version"], 0) - - def test_set_iam_policy(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-set-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - new_policy = Policy() - service_account_email = Config.CLIENT._credentials.service_account_email - new_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.service_account(service_account_email) - ] - policy_latest = temp_table.set_iam_policy(new_policy).to_api_repr() - - self.assertEqual(policy_latest["bindings"][0]["role"], "roles/bigtable.admin") - self.assertIn(service_account_email, policy_latest["bindings"][0]["members"][0]) - - def test_create_table_with_families(self): - temp_table_id = "test-create-table-with-failies" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - gc_rule = MaxVersionsGCRule(1) - temp_table.create(column_families={COLUMN_FAMILY_ID1: gc_rule}) - self.tables_to_delete.append(temp_table) - - col_fams = temp_table.list_column_families() - - self.assertEqual(len(col_fams), 1) - retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] - self.assertIs(retrieved_col_fam._table, temp_table) - self.assertEqual(retrieved_col_fam.column_family_id, COLUMN_FAMILY_ID1) - self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - - def test_create_table_with_split_keys(self): - self._skip_if_emulated("Split keys are not supported by Bigtable emulator") - temp_table_id = "foo-bar-baz-split-table" - initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create(initial_split_keys=initial_split_keys) - self.tables_to_delete.append(temp_table) - - # Read Sample Row Keys for created splits - sample_row_keys = temp_table.sample_row_keys() - actual_keys = [srk.row_key for srk in sample_row_keys] - - expected_keys = initial_split_keys - expected_keys.append(b"") - - self.assertEqual(actual_keys, expected_keys) - - def test_create_column_family(self): - temp_table_id = "test-create-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - self.assertEqual(temp_table.list_column_families(), {}) - gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) - column_family.create() - - col_fams = temp_table.list_column_families() - - self.assertEqual(len(col_fams), 1) - retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] - self.assertIs(retrieved_col_fam._table, column_family._table) - self.assertEqual( - retrieved_col_fam.column_family_id, column_family.column_family_id - ) - self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - - def test_update_column_family(self): - temp_table_id = "test-update-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) - column_family.create() - - # Check that our created table is as expected. - col_fams = temp_table.list_column_families() - self.assertEqual(col_fams, {COLUMN_FAMILY_ID1: column_family}) - - # Update the column family's GC rule and then try to update. - column_family.gc_rule = None - column_family.update() - - # Check that the update has propagated. - col_fams = temp_table.list_column_families() - self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) - - def test_delete_column_family(self): - temp_table_id = "test-delete-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - self.assertEqual(temp_table.list_column_families(), {}) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1) - column_family.create() - - # Make sure the family is there before deleting it. - col_fams = temp_table.list_column_families() - self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1]) - - retry_504 = RetryErrors(DeadlineExceeded) - retry_504(column_family.delete)() - # Make sure we have successfully deleted it. - self.assertEqual(temp_table.list_column_families(), {}) - - def test_backup(self): - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable import enums - - temp_table_id = "test-backup-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - temp_backup_id = "test-backup" - - # TODO: consider using `datetime.datetime.now().timestamp()` - # when support for Python 2 is fully dropped - expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 - - # Testing `Table.backup()` factory - temp_backup = temp_table.backup( - temp_backup_id, - cluster_id=CLUSTER_ID_DATA, - expire_time=datetime.datetime.utcfromtimestamp(expire), - ) - - # Sanity check for `Backup.exists()` method - self.assertFalse(temp_backup.exists()) - - # Testing `Backup.create()` method - temp_backup.create().result() - - # Implicit testing of `Backup.delete()` method - self.backups_to_delete.append(temp_backup) - - # Testing `Backup.exists()` method - self.assertTrue(temp_backup.exists()) - - # Testing `Table.list_backups()` method - temp_table_backup = temp_table.list_backups()[0] - self.assertEqual(temp_backup_id, temp_table_backup.backup_id) - self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) - self.assertEqual(expire, temp_table_backup.expire_time.seconds) - self.assertEqual( - temp_table_backup.encryption_info.encryption_type, - enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - - # Testing `Backup.update_expire_time()` method - expire += 3600 # A one-hour change in the `expire_time` parameter - updated_time = datetime.datetime.utcfromtimestamp(expire) - temp_backup.update_expire_time(updated_time) - test = _datetime_to_pb_timestamp(updated_time) - - # Testing `Backup.get()` method - temp_table_backup = temp_backup.get() - self.assertEqual( - test.seconds, - DatetimeWithNanoseconds.timestamp(temp_table_backup.expire_time), - ) - - # Testing `Table.restore()` and `Backup.retore()` methods - restored_table_id = "test-backup-table-restored" - restored_table = Config.INSTANCE_DATA.table(restored_table_id) - temp_table.restore( - restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id - ).result() - tables = Config.INSTANCE_DATA.list_tables() - self.assertIn(restored_table, tables) - restored_table.delete() - - -class TestDataAPI(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls._table = table = Config.INSTANCE_DATA.table("test-data-api") - table.create() - table.column_family(COLUMN_FAMILY_ID1).create() - table.column_family(COLUMN_FAMILY_ID2).create() - - @classmethod - def tearDownClass(cls): - # Will also delete any data contained in the table. - cls._table.delete() - - def _maybe_emulator_skip(self, message): - # NOTE: This method is necessary because ``Config.IN_EMULATOR`` - # is set at runtime rather than import time, which means we - # can't use the @unittest.skipIf decorator. - if Config.IN_EMULATOR: - self.skipTest(message) - - def setUp(self): - self.rows_to_delete = [] - - def tearDown(self): - for row in self.rows_to_delete: - row.clear() - row.delete() - row.commit() - - def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): - timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) - timestamp1_micros = _microseconds_from_datetime(timestamp1) - # Truncate to millisecond granularity. - timestamp1_micros -= timestamp1_micros % 1000 - timestamp1 = _datetime_from_microseconds(timestamp1_micros) - # 1000 microseconds is a millisecond - timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) - timestamp2_micros = _microseconds_from_datetime(timestamp2) - timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) - timestamp3_micros = _microseconds_from_datetime(timestamp3) - timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) - timestamp4_micros = _microseconds_from_datetime(timestamp4) - - if row1 is not None: - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) - if row2 is not None: - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, timestamp=timestamp2) - if row3 is not None: - row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, timestamp=timestamp3) - if row4 is not None: - row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, timestamp=timestamp4) - - # Create the cells we will check. - cell1 = Cell(CELL_VAL1, timestamp1_micros) - cell2 = Cell(CELL_VAL2, timestamp2_micros) - cell3 = Cell(CELL_VAL3, timestamp3_micros) - cell4 = Cell(CELL_VAL4, timestamp4_micros) - return cell1, cell2, cell3, cell4 - - def test_timestamp_filter_millisecond_granularity(self): - from google.cloud.bigtable import row_filters - - end = datetime.datetime.now() - start = end - datetime.timedelta(minutes=60) - timestamp_range = row_filters.TimestampRange(start=start, end=end) - timefilter = row_filters.TimestampRangeFilter(timestamp_range) - row_data = self._table.read_rows(filter_=timefilter) - row_data.consume_all() - - def test_mutate_rows(self): - row1 = self._table.row(ROW_KEY) - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row1.commit() - self.rows_to_delete.append(row1) - row2 = self._table.row(ROW_KEY_ALT) - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2) - row2.commit() - self.rows_to_delete.append(row2) - - # Change the contents - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3) - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4) - rows = [row1, row2] - statuses = self._table.mutate_rows(rows) - result = [status.code for status in statuses] - expected_result = [0, 0] - self.assertEqual(result, expected_result) - - # Check the contents - row1_data = self._table.read_row(ROW_KEY) - self.assertEqual( - row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3 - ) - row2_data = self._table.read_row(ROW_KEY_ALT) - self.assertEqual( - row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4 - ) - - def test_truncate_table(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_pr_1", - b"row_key_pr_2", - b"row_key_pr_3", - b"row_key_pr_4", - b"row_key_pr_5", - ] - - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - self.rows_to_delete.append(row) - - self._table.truncate(timeout=200) - - read_rows = self._table.yield_rows() - - for row in read_rows: - self.assertNotIn(row.row_key.decode("utf-8"), row_keys) - - def test_drop_by_prefix_table(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_pr_1", - b"row_key_pr_2", - b"row_key_pr_3", - b"row_key_pr_4", - b"row_key_pr_5", - ] - - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - self.rows_to_delete.append(row) - - self._table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) - - read_rows = self._table.yield_rows() - expected_rows_count = 5 - read_rows_count = 0 - - for row in read_rows: - if row.row_key in row_keys: - read_rows_count += 1 - - self.assertEqual(expected_rows_count, read_rows_count) - - def test_yield_rows_with_row_set(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - b"row_key_7", - b"row_key_8", - b"row_key_9", - ] - - rows = [] - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - self.rows_to_delete.append(row) - self._table.mutate_rows(rows) - - row_set = RowSet() - row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) - row_set.add_row_key(b"row_key_1") - - read_rows = self._table.yield_rows(row_set=row_set) - - expected_row_keys = [ - b"row_key_1", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - ] - found_row_keys = [row.row_key for row in read_rows] - self.assertEqual(found_row_keys, expected_row_keys) - - def test_add_row_range_by_prefix_from_keys(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"sample_row_key_1", - b"sample_row_key_2", - ] - - rows = [] - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - self.rows_to_delete.append(row) - self._table.mutate_rows(rows) - - row_set = RowSet() - row_set.add_row_range_with_prefix("row") - - read_rows = self._table.yield_rows(row_set=row_set) - - expected_row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - ] - found_row_keys = [row.row_key for row in read_rows] - self.assertEqual(found_row_keys, expected_row_keys) - - def test_read_large_cell_limit(self): - self._maybe_emulator_skip( - "Maximum gRPC received message size for emulator is 4194304 bytes." - ) - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - number_of_bytes = 10 * 1024 * 1024 - data = b"1" * number_of_bytes # 10MB of 1's. - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) - row.commit() - - # Read back the contents of the row. - partial_row_data = self._table.read_row(ROW_KEY) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - cell = partial_row_data.cells[COLUMN_FAMILY_ID1] - column = cell[COL_NAME1] - self.assertEqual(len(column), 1) - self.assertEqual(column[0].value, data) - - def test_read_row(self): - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - cell1, cell2, cell3, cell4 = self._write_to_row(row, row, row, row) - row.commit() - - # Read back the contents of the row. - partial_row_data = self._table.read_row(ROW_KEY) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - - # Check the cells match. - ts_attr = operator.attrgetter("timestamp") - expected_row_contents = { - COLUMN_FAMILY_ID1: { - COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), - COL_NAME2: [cell3], - }, - COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, - } - self.assertEqual(partial_row_data.cells, expected_row_contents) - - def test_read_rows(self): - row = self._table.row(ROW_KEY) - row_alt = self._table.row(ROW_KEY_ALT) - self.rows_to_delete.extend([row, row_alt]) - - cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt, row, row_alt) - row.commit() - row_alt.commit() - - rows_data = self._table.read_rows() - self.assertEqual(rows_data.rows, {}) - rows_data.consume_all() - - # NOTE: We should refrain from editing protected data on instances. - # Instead we should make the values public or provide factories - # for constructing objects with them. - row_data = PartialRowData(ROW_KEY) - row_data._chunks_encountered = True - row_data._committed = True - row_data._cells = {COLUMN_FAMILY_ID1: {COL_NAME1: [cell1], COL_NAME2: [cell3]}} - - row_alt_data = PartialRowData(ROW_KEY_ALT) - row_alt_data._chunks_encountered = True - row_alt_data._committed = True - row_alt_data._cells = { - COLUMN_FAMILY_ID1: {COL_NAME1: [cell2]}, - COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, - } - - expected_rows = {ROW_KEY: row_data, ROW_KEY_ALT: row_alt_data} - self.assertEqual(rows_data.rows, expected_rows) - - def test_read_with_label_applied(self): - self._maybe_emulator_skip("Labels not supported by Bigtable emulator") - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - cell1, _, cell3, _ = self._write_to_row(row, None, row) - row.commit() - - # Combine a label with column 1. - label1 = "label-red" - label1_filter = ApplyLabelFilter(label1) - col1_filter = ColumnQualifierRegexFilter(COL_NAME1) - chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) - - # Combine a label with column 2. - label2 = "label-blue" - label2_filter = ApplyLabelFilter(label2) - col2_filter = ColumnQualifierRegexFilter(COL_NAME2) - chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) - - # Bring our two labeled columns together. - row_filter = RowFilterUnion(filters=[chain1, chain2]) - partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - - cells_returned = partial_row_data.cells - col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1) - # Make sure COLUMN_FAMILY_ID1 was the only key. - self.assertEqual(len(cells_returned), 0) - - (cell1_new,) = col_fam1.pop(COL_NAME1) - (cell3_new,) = col_fam1.pop(COL_NAME2) - # Make sure COL_NAME1 and COL_NAME2 were the only keys. - self.assertEqual(len(col_fam1), 0) - - # Check that cell1 has matching values and gained a label. - self.assertEqual(cell1_new.value, cell1.value) - self.assertEqual(cell1_new.timestamp, cell1.timestamp) - self.assertEqual(cell1.labels, []) - self.assertEqual(cell1_new.labels, [label1]) - - # Check that cell3 has matching values and gained a label. - self.assertEqual(cell3_new.value, cell3.value) - self.assertEqual(cell3_new.timestamp, cell3.timestamp) - self.assertEqual(cell3.labels, []) - self.assertEqual(cell3_new.labels, [label2]) - - def test_access_with_non_admin_client(self): - client = Client(admin=False) - instance = client.instance(INSTANCE_ID_DATA) - table = instance.table(self._table.table_id) - self.assertIsNone(table.read_row("nonesuch")) diff --git a/tests/system/test_system.py b/tests/test_system.py similarity index 85% rename from tests/system/test_system.py rename to tests/test_system.py index ee792cbaf..9d62cbbb8 100644 --- a/tests/system/test_system.py +++ b/tests/test_system.py @@ -18,6 +18,8 @@ import time import unittest +import pytest + from google.api_core.datetime_helpers import DatetimeWithNanoseconds from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import TooManyRequests @@ -48,15 +50,15 @@ # ) UNIQUE_SUFFIX = unique_resource_id("-") -LOCATION_ID = "us-central1-c" +LOCATION_ID = f"us-central1-c" INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX TABLE_ID = "google-cloud-python-test-table" CLUSTER_ID = INSTANCE_ID + "-cluster" CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" SERVE_NODES = 3 -COLUMN_FAMILY_ID1 = u"col-fam-id1" -COLUMN_FAMILY_ID2 = u"col-fam-id2" +COLUMN_FAMILY_ID1 = "col-fam-id1" +COLUMN_FAMILY_ID2 = "col-fam-id2" COL_NAME1 = b"col-name1" COL_NAME2 = b"col-name2" COL_NAME3 = b"col-name3-but-other-fam" @@ -67,13 +69,14 @@ ROW_KEY = b"row-key" ROW_KEY_ALT = b"row-key-alt" EXISTING_INSTANCES = [] -LABEL_KEY = u"python-system" +LABEL_KEY = "python-system" label_stamp = ( datetime.datetime.utcnow() .replace(microsecond=0, tzinfo=UTC) .strftime("%Y-%m-%dt%H-%M-%S") ) LABELS = {LABEL_KEY: str(label_stamp)} +KMS_KEY_NAME = os.environ.get("KMS_KEY_NAME", None) class Config(object): @@ -102,6 +105,7 @@ def _retry_on_unavailable(exc): def setUpModule(): + from google.cloud import kms from google.cloud.exceptions import GrpcRendezvous from google.cloud.bigtable.enums import Instance @@ -122,13 +126,13 @@ def setUpModule(): Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) Config.CLUSTER = Config.INSTANCE.cluster( - CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES + CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, ) Config.INSTANCE_DATA = Config.CLIENT.instance( INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS ) Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( - CLUSTER_ID_DATA, location_id=LOCATION_ID + CLUSTER_ID_DATA, location_id=LOCATION_ID, ) if not Config.IN_EMULATOR: @@ -455,6 +459,209 @@ def test_create_instance_w_two_clusters(self): for app_profile_id in app_profiles_to_delete: self._test_delete_app_profile_helper(app_profile_id, instance) + @pytest.mark.skipif( + not KMS_KEY_NAME, reason="requires KMS_KEY_NAME environment variable" + ) + def test_create_instance_w_two_clusters_cmek(self): + from google.cloud.bigtable import enums + from google.cloud.bigtable.table import ClusterState + + _PRODUCTION = enums.Instance.Type.PRODUCTION + ALT_INSTANCE_ID = "dif" + UNIQUE_SUFFIX + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS + ) + + ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" + ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" + LOCATION_ID_2 = "us-central1-f" + STORAGE_TYPE = enums.StorageType.HDD + serve_nodes = 1 + cluster_1 = instance.cluster( + ALT_CLUSTER_ID_1, + location_id=LOCATION_ID, + serve_nodes=serve_nodes, + default_storage_type=STORAGE_TYPE, + kms_key_name=KMS_KEY_NAME, + ) + cluster_2 = instance.cluster( + ALT_CLUSTER_ID_2, + location_id=LOCATION_ID_2, + serve_nodes=serve_nodes, + default_storage_type=STORAGE_TYPE, + kms_key_name=KMS_KEY_NAME, + ) + operation = instance.create(clusters=[cluster_1, cluster_2]) + + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + operation.result(timeout=120) + + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + instance_alt.reload() + + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) + self.assertEqual(instance.type_, instance_alt.type_) + + clusters, failed_locations = instance_alt.list_clusters() + self.assertEqual(failed_locations, []) + + clusters.sort(key=lambda x: x.name) + alt_cluster_1, alt_cluster_2 = clusters + + self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) + self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) + self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) + self.assertEqual( + cluster_1.default_storage_type, alt_cluster_1.default_storage_type + ) + self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) + self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) + self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) + self.assertEqual( + cluster_2.default_storage_type, alt_cluster_2.default_storage_type + ) + + # Test list clusters in project via 'client.list_clusters' + clusters, failed_locations = Config.CLIENT.list_clusters() + self.assertFalse(failed_locations) + found = set([cluster.name for cluster in clusters]) + self.assertTrue( + {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( + found + ) + ) + + temp_table_id = "test-get-cluster-states" + temp_table = instance.table(temp_table_id) + temp_table.create() + + encryption_info = temp_table.get_encryption_info() + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, + enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, + enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + + result = temp_table.get_cluster_states() + ReplicationState = enums.Table.ReplicationState + expected_results = [ + ClusterState(ReplicationState.STATE_NOT_KNOWN), + ClusterState(ReplicationState.INITIALIZING), + ClusterState(ReplicationState.PLANNED_MAINTENANCE), + ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), + ClusterState(ReplicationState.READY), + ] + cluster_id_list = result.keys() + self.assertEqual(len(cluster_id_list), 2) + self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) + self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) + for clusterstate in result.values(): + self.assertIn(clusterstate, expected_results) + + # Test create app profile with multi_cluster_routing policy + app_profiles_to_delete = [] + description = "routing policy-multy" + app_profile_id_1 = "app_profile_id_1" + routing = enums.RoutingPolicyType.ANY + self._test_create_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + ignore_warnings=True, + ) + app_profiles_to_delete.append(app_profile_id_1) + + # Test list app profiles + self._test_list_app_profiles_helper(instance, [app_profile_id_1]) + + # Test modify app profile app_profile_id_1 + # routing policy to single cluster policy, + # cluster -> ALT_CLUSTER_ID_1, + # allow_transactional_writes -> disallowed + # modify description + description = "to routing policy-single" + routing = enums.RoutingPolicyType.SINGLE + self._test_modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_1, + allow_transactional_writes=False, + ) + + # Test modify app profile app_profile_id_1 + # cluster -> ALT_CLUSTER_ID_2, + # allow_transactional_writes -> allowed + self._test_modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=True, + ignore_warnings=True, + ) + + # Test create app profile with single cluster routing policy + description = "routing policy-single" + app_profile_id_2 = "app_profile_id_2" + routing = enums.RoutingPolicyType.SINGLE + self._test_create_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=False, + ) + app_profiles_to_delete.append(app_profile_id_2) + + # Test list app profiles + self._test_list_app_profiles_helper( + instance, [app_profile_id_1, app_profile_id_2] + ) + + # Test modify app profile app_profile_id_2 to + # allow transactional writes + # Note: no need to set ``ignore_warnings`` to True + # since we are not restrictings anything with this modification. + self._test_modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=True, + ) + + # Test modify app profile app_profile_id_2 routing policy + # to multi_cluster_routing policy + # modify description + description = "to routing policy-multy" + routing = enums.RoutingPolicyType.ANY + self._test_modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + allow_transactional_writes=False, + ignore_warnings=True, + ) + + # Test delete app profiles + for app_profile_id in app_profiles_to_delete: + self._test_delete_app_profile_helper(app_profile_id, instance) + def test_update_display_name_and_labels(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name NEW_DISPLAY_NAME = "Foo Bar Baz" @@ -891,7 +1098,7 @@ def test_backup(self): self.assertEqual(expire, temp_table_backup.expire_time.seconds) self.assertEqual( temp_table_backup.encryption_info.encryption_type, - enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, ) # Testing `Backup.update_expire_time()` method @@ -1227,13 +1434,13 @@ def test_read_with_label_applied(self): row.commit() # Combine a label with column 1. - label1 = u"label-red" + label1 = "label-red" label1_filter = ApplyLabelFilter(label1) col1_filter = ColumnQualifierRegexFilter(COL_NAME1) chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) # Combine a label with column 2. - label2 = u"label-blue" + label2 = "label-blue" label2_filter = ApplyLabelFilter(label2) col2_filter = ColumnQualifierRegexFilter(COL_NAME2) chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) From 2e3143645ff0b5d470bf4bebbf19df0f621d3776 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Mon, 22 Mar 2021 11:43:48 -0400 Subject: [PATCH 07/14] Fix test. --- tests/unit/test_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index f70b525d9..c95179560 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -590,7 +590,7 @@ def test_get_encryption_info(self): EncryptionInfo( encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, encryption_status=Status( - _StatusPB(Code.OK, "beats me", "I", "dunno") + _StatusPB(Code.OK, "beats me") ), kms_key_version="", ), From cf0176e62b95c671185eda9b146a68977f72751f Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Mon, 22 Mar 2021 11:45:28 -0400 Subject: [PATCH 08/14] Lint. --- tests/test_system.py | 3 +-- tests/unit/test_table.py | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/test_system.py b/tests/test_system.py index 9d62cbbb8..4d564a465 100644 --- a/tests/test_system.py +++ b/tests/test_system.py @@ -50,7 +50,7 @@ # ) UNIQUE_SUFFIX = unique_resource_id("-") -LOCATION_ID = f"us-central1-c" +LOCATION_ID = "us-central1-c" INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX TABLE_ID = "google-cloud-python-test-table" @@ -105,7 +105,6 @@ def _retry_on_unavailable(exc): def setUpModule(): - from google.cloud import kms from google.cloud.exceptions import GrpcRendezvous from google.cloud.bigtable.enums import Instance diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index c95179560..ddf5f9462 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -589,9 +589,7 @@ def test_get_encryption_info(self): "cluster-id1": ( EncryptionInfo( encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, - encryption_status=Status( - _StatusPB(Code.OK, "beats me") - ), + encryption_status=Status(_StatusPB(Code.OK, "beats me")), kms_key_version="", ), ), From 6601e0395a83c11d033da020983df18c324bd241 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Mon, 22 Mar 2021 11:47:18 -0400 Subject: [PATCH 09/14] Put system tests where nox is expecting to find them. --- tests/{test_system.py => system.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{test_system.py => system.py} (100%) diff --git a/tests/test_system.py b/tests/system.py similarity index 100% rename from tests/test_system.py rename to tests/system.py From 76e16515a66b3c3a16dcfbd07f566a5e71268967 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Mon, 22 Mar 2021 12:12:15 -0400 Subject: [PATCH 10/14] Test backup with CMEK. --- tests/system.py | 70 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/tests/system.py b/tests/system.py index 4d564a465..b5f8310e1 100644 --- a/tests/system.py +++ b/tests/system.py @@ -1090,6 +1090,76 @@ def test_backup(self): # Testing `Backup.exists()` method self.assertTrue(temp_backup.exists()) + # Testing `Table.list_backups()` method + temp_table_backup = temp_table.list_backups()[0] + self.assertEqual(temp_backup_id, temp_table_backup.backup_id) + self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) + self.assertEqual(expire, temp_table_backup.expire_time.seconds) + self.assertEqual( + temp_table_backup.encryption_info.encryption_type, + enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + ) + + # Testing `Backup.update_expire_time()` method + expire += 3600 # A one-hour change in the `expire_time` parameter + updated_time = datetime.datetime.utcfromtimestamp(expire) + temp_backup.update_expire_time(updated_time) + test = _datetime_to_pb_timestamp(updated_time) + + # Testing `Backup.get()` method + temp_table_backup = temp_backup.get() + self.assertEqual( + test.seconds, + DatetimeWithNanoseconds.timestamp(temp_table_backup.expire_time), + ) + + # Testing `Table.restore()` and `Backup.retore()` methods + restored_table_id = "test-backup-table-restored" + restored_table = Config.INSTANCE_DATA.table(restored_table_id) + temp_table.restore( + restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id + ).result() + tables = Config.INSTANCE_DATA.list_tables() + self.assertIn(restored_table, tables) + restored_table.delete() + + @pytest.mark.skipif( + not KMS_KEY_NAME, reason="requires KMS_KEY_NAME environment variable" + ) + def test_backup_cmek(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable import enums + + temp_table_id = "test-backup-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + temp_backup_id = "test-backup" + + # TODO: consider using `datetime.datetime.now().timestamp()` + # when support for Python 2 is fully dropped + expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 + + # Testing `Table.backup()` factory + temp_backup = temp_table.backup( + temp_backup_id, + cluster_id=CLUSTER_ID_DATA, + expire_time=datetime.datetime.utcfromtimestamp(expire), + ) + + # Sanity check for `Backup.exists()` method + self.assertFalse(temp_backup.exists()) + + # Testing `Backup.create()` method + temp_backup.create().result() + + # Implicit testing of `Backup.delete()` method + self.backups_to_delete.append(temp_backup) + + # Testing `Backup.exists()` method + self.assertTrue(temp_backup.exists()) + # Testing `Table.list_backups()` method temp_table_backup = temp_table.list_backups()[0] self.assertEqual(temp_backup_id, temp_table_backup.backup_id) From 731efdd43f6ea66f29a72d478a193201db509436 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Mon, 22 Mar 2021 13:08:07 -0400 Subject: [PATCH 11/14] Differentiate instance and cluster names for cmek test, so tests aren't stepping on each other's toes. Remove bogus backup with cmek test. --- tests/system.py | 72 +------------------------------------------------ 1 file changed, 1 insertion(+), 71 deletions(-) diff --git a/tests/system.py b/tests/system.py index b5f8310e1..9713006ff 100644 --- a/tests/system.py +++ b/tests/system.py @@ -466,7 +466,7 @@ def test_create_instance_w_two_clusters_cmek(self): from google.cloud.bigtable.table import ClusterState _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "dif" + UNIQUE_SUFFIX + ALT_INSTANCE_ID = "dif-cmek" + UNIQUE_SUFFIX instance = Config.CLIENT.instance( ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS ) @@ -1123,76 +1123,6 @@ def test_backup(self): self.assertIn(restored_table, tables) restored_table.delete() - @pytest.mark.skipif( - not KMS_KEY_NAME, reason="requires KMS_KEY_NAME environment variable" - ) - def test_backup_cmek(self): - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable import enums - - temp_table_id = "test-backup-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - temp_backup_id = "test-backup" - - # TODO: consider using `datetime.datetime.now().timestamp()` - # when support for Python 2 is fully dropped - expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 - - # Testing `Table.backup()` factory - temp_backup = temp_table.backup( - temp_backup_id, - cluster_id=CLUSTER_ID_DATA, - expire_time=datetime.datetime.utcfromtimestamp(expire), - ) - - # Sanity check for `Backup.exists()` method - self.assertFalse(temp_backup.exists()) - - # Testing `Backup.create()` method - temp_backup.create().result() - - # Implicit testing of `Backup.delete()` method - self.backups_to_delete.append(temp_backup) - - # Testing `Backup.exists()` method - self.assertTrue(temp_backup.exists()) - - # Testing `Table.list_backups()` method - temp_table_backup = temp_table.list_backups()[0] - self.assertEqual(temp_backup_id, temp_table_backup.backup_id) - self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) - self.assertEqual(expire, temp_table_backup.expire_time.seconds) - self.assertEqual( - temp_table_backup.encryption_info.encryption_type, - enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - - # Testing `Backup.update_expire_time()` method - expire += 3600 # A one-hour change in the `expire_time` parameter - updated_time = datetime.datetime.utcfromtimestamp(expire) - temp_backup.update_expire_time(updated_time) - test = _datetime_to_pb_timestamp(updated_time) - - # Testing `Backup.get()` method - temp_table_backup = temp_backup.get() - self.assertEqual( - test.seconds, - DatetimeWithNanoseconds.timestamp(temp_table_backup.expire_time), - ) - - # Testing `Table.restore()` and `Backup.retore()` methods - restored_table_id = "test-backup-table-restored" - restored_table = Config.INSTANCE_DATA.table(restored_table_id) - temp_table.restore( - restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id - ).result() - tables = Config.INSTANCE_DATA.list_tables() - self.assertIn(restored_table, tables) - restored_table.delete() - class TestDataAPI(unittest.TestCase): @classmethod From 090080ecaa93ce8650f125cc780e95b46b8ce8f5 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Wed, 7 Apr 2021 13:46:06 -0400 Subject: [PATCH 12/14] rename `encryption.py` to `encryption_info.py` --- google/cloud/bigtable/backup.py | 2 +- google/cloud/bigtable/{encryption.py => encryption_info.py} | 0 google/cloud/bigtable/table.py | 2 +- tests/unit/test_backup.py | 2 +- tests/unit/test_table.py | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename google/cloud/bigtable/{encryption.py => encryption_info.py} (100%) diff --git a/google/cloud/bigtable/backup.py b/google/cloud/bigtable/backup.py index 1c9dc5f02..3666b7132 100644 --- a/google/cloud/bigtable/backup.py +++ b/google/cloud/bigtable/backup.py @@ -19,7 +19,7 @@ from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable.encryption import EncryptionInfo +from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound from google.protobuf import field_mask_pb2 diff --git a/google/cloud/bigtable/encryption.py b/google/cloud/bigtable/encryption_info.py similarity index 100% rename from google/cloud/bigtable/encryption.py rename to google/cloud/bigtable/encryption_info.py diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index fd4896a1e..5ac049c05 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -28,7 +28,7 @@ from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES -from google.cloud.bigtable.encryption import EncryptionInfo +from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow from google.cloud.bigtable.row import ConditionalRow diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py index 9512eb759..6563c71ad 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/test_backup.py @@ -131,7 +131,7 @@ def test_from_pb_bad_name(self): klasse.from_pb(backup_pb, instance) def test_from_pb_success(self): - from google.cloud.bigtable.encryption import EncryptionInfo + from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.error import Status from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index ddf5f9462..cd43d1406 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -539,7 +539,7 @@ def test_get_encryption_info(self): from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( client as bigtable_table_admin, ) - from google.cloud.bigtable.encryption import EncryptionInfo + from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto from google.cloud.bigtable.error import Status From f1aae9d16c2010601e060b8fb01d046356863643 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Wed, 7 Apr 2021 13:53:51 -0400 Subject: [PATCH 13/14] make sure `kms_key_name` is set to `None` if `encryption_info` is not PB. --- google/cloud/bigtable/cluster.py | 2 ++ tests/unit/test_cluster.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/google/cloud/bigtable/cluster.py b/google/cloud/bigtable/cluster.py index eed5103f7..f3e79c6c2 100644 --- a/google/cloud/bigtable/cluster.py +++ b/google/cloud/bigtable/cluster.py @@ -162,6 +162,8 @@ def _update_from_pb(self, cluster_pb): self.default_storage_type = cluster_pb.default_storage_type if cluster_pb.encryption_config: self._kms_key_name = cluster_pb.encryption_config.kms_key_name + else: + self._kms_key_name = None self._state = cluster_pb.state @property diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index cba2473ed..49a32ea56 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -266,6 +266,7 @@ def test_reload(self): location_id=self.LOCATION_ID, serve_nodes=self.SERVE_NODES, default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=self.KMS_KEY_NAME, ) # Create response_pb @@ -304,6 +305,7 @@ def test_reload(self): self.assertEqual(cluster.state, STATE) self.assertEqual(cluster.serve_nodes, SERVE_NODES_FROM_SERVER) self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER) + self.assertEqual(cluster.kms_key_name, None) def test_exists(self): from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( From e7a45ee5a15b52611d1f9e54373538312d906cdc Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Thu, 15 Apr 2021 09:07:04 -0400 Subject: [PATCH 14/14] Fix typo. Use more realistic looking test strings. --- google/cloud/bigtable/error.py | 2 +- tests/unit/test_backup.py | 8 ++++---- tests/unit/test_table.py | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/google/cloud/bigtable/error.py b/google/cloud/bigtable/error.py index 6c9a0dc4d..261cfc2c3 100644 --- a/google/cloud/bigtable/error.py +++ b/google/cloud/bigtable/error.py @@ -35,7 +35,7 @@ def code(self): Values are defined in ``google.rpc.code_pb2.Code``. - See: `googe.rpc.Code + See: `google.rpc.Code `_ :rtype: int diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py index 6563c71ad..0a5ba74c1 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/test_backup.py @@ -155,8 +155,8 @@ def test_from_pb_success(self): state=state, encryption_info=table.EncryptionInfo( encryption_type=GOOGLE_DEFAULT_ENCRYPTION, - encryption_status=_StatusPB(Code.OK, "Looks good over here."), - kms_key_version="I dunno, like, 2?", + encryption_status=_StatusPB(Code.OK, "Status OK"), + kms_key_version="2", ), ) klasse = self._get_target_class() @@ -177,8 +177,8 @@ def test_from_pb_success(self): backup.encryption_info, EncryptionInfo( encryption_type=GOOGLE_DEFAULT_ENCRYPTION, - encryption_status=Status(_StatusPB(Code.OK, "Looks good over here.")), - kms_key_version="I dunno, like, 2?", + encryption_status=Status(_StatusPB(Code.OK, "Status OK")), + kms_key_version="2", ), ) diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index cd43d1406..ccb8350a3 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -563,7 +563,7 @@ def test_get_encryption_info(self): cluster_states={ "cluster-id1": _ClusterStateEncryptionInfoPB( encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, - encryption_status=_StatusPB(Code.OK, "beats me"), + encryption_status=_StatusPB(Code.OK, "Status OK"), ), "cluster-id2": _ClusterStateEncryptionInfoPB( encryption_type=GOOGLE_DEFAULT_ENCRYPTION, @@ -573,7 +573,7 @@ def test_get_encryption_info(self): encryption_status=_StatusPB( Code.UNKNOWN, "Key version is not yet known." ), - kms_key_version="shrug", + kms_key_version="UNKNOWN", ), } ) @@ -589,7 +589,7 @@ def test_get_encryption_info(self): "cluster-id1": ( EncryptionInfo( encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, - encryption_status=Status(_StatusPB(Code.OK, "beats me")), + encryption_status=Status(_StatusPB(Code.OK, "Status OK")), kms_key_version="", ), ), @@ -606,7 +606,7 @@ def test_get_encryption_info(self): encryption_status=Status( _StatusPB(Code.UNKNOWN, "Key version is not yet known.") ), - kms_key_version="shrug", + kms_key_version="UNKNOWN", ), ), }