id
stringlengths
8
78
source
stringclasses
743 values
chunk_id
int64
1
5.05k
text
stringlengths
593
49.7k
AmazonKeyspaces-157
AmazonKeyspaces.pdf
157
.build(); ColumnDefinition defYear = ColumnDefinition.builder() .name("year") .type("int") .build(); ColumnDefinition defReleaseDate = ColumnDefinition.builder() .name("release_date") .type("timestamp") .build(); ColumnDefinition defPlot = ColumnDefinition.builder() .name("plot") .type("text") .build(); List<ColumnDefinition> colList = new ArrayList<>(); colList.add(defTitle); colList.add(defYear); colList.add(defReleaseDate); colList.add(defPlot); // Set the keys. PartitionKey yearKey = PartitionKey.builder() .name("year") .build(); PartitionKey titleKey = PartitionKey.builder() .name("title") .build(); List<PartitionKey> keyList = new ArrayList<>(); keyList.add(yearKey); keyList.add(titleKey); SchemaDefinition schemaDefinition = SchemaDefinition.builder() .partitionKeys(keyList) .allColumns(colList) .build(); PointInTimeRecovery timeRecovery = PointInTimeRecovery.builder() Learn the basics 484 Amazon Keyspaces (for Apache Cassandra) Developer Guide .status(PointInTimeRecoveryStatus.ENABLED) .build(); CreateTableRequest tableRequest = CreateTableRequest.builder() .keyspaceName(keySpace) .tableName(tableName) .schemaDefinition(schemaDefinition) .pointInTimeRecovery(timeRecovery) .build(); CreateTableResponse response = keyClient.createTable(tableRequest); System.out.println("The table ARN is " + response.resourceArn()); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } public static void listKeyspacesPaginator(KeyspacesClient keyClient) { try { ListKeyspacesRequest keyspacesRequest = ListKeyspacesRequest.builder() .maxResults(10) .build(); ListKeyspacesIterable listRes = keyClient.listKeyspacesPaginator(keyspacesRequest); listRes.stream() .flatMap(r -> r.keyspaces().stream()) .forEach(content -> System.out.println(" Name: " + content.keyspaceName())); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } public static void checkKeyspaceExistence(KeyspacesClient keyClient, String keyspaceName) { try { GetKeyspaceRequest keyspaceRequest = GetKeyspaceRequest.builder() .keyspaceName(keyspaceName) Learn the basics 485 Amazon Keyspaces (for Apache Cassandra) Developer Guide .build(); GetKeyspaceResponse response = keyClient.getKeyspace(keyspaceRequest); String name = response.keyspaceName(); System.out.println("The " + name + " KeySpace is ready"); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } public static void createKeySpace(KeyspacesClient keyClient, String keyspaceName) { try { CreateKeyspaceRequest keyspaceRequest = CreateKeyspaceRequest.builder() .keyspaceName(keyspaceName) .build(); CreateKeyspaceResponse response = keyClient.createKeyspace(keyspaceRequest); System.out.println("The ARN of the KeySpace is " + response.resourceArn()); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } } • For API details, see the following topics in AWS SDK for Java 2.x API Reference. • CreateKeyspace • CreateTable • DeleteKeyspace • DeleteTable • GetKeyspace • GetTable Learn the basics 486 Amazon Keyspaces (for Apache Cassandra) Developer Guide • ListKeyspaces • ListTables • RestoreTable • UpdateTable Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /** Before running this Kotlin code example, set up your development environment, including your credentials. For more information, see the following documentation topic: https://docs.aws.amazon.com/sdk-for-kotlin/latest/developer-guide/setup.html This example uses a secure file format to hold certificate information for Kotlin applications. This is required to make a connection to Amazon Keyspaces. For more information, see the following documentation topic: https://docs.aws.amazon.com/keyspaces/latest/devguide/using_java_driver.html This Kotlin example performs the following tasks: 1. Create a keyspace. 2. Check for keyspace existence. 3. List keyspaces using a paginator. 4. Create a table with a simple movie data schema and enable point-in-time recovery. 5. Check for the table to be in an Active state. 6. List all tables in the keyspace. 7. Use a Cassandra driver to insert some records into the Movie table. Learn the basics 487 Amazon Keyspaces (for Apache Cassandra) Developer Guide 8. Get all records from the Movie table. 9. Get a specific Movie. 10. Get a UTC timestamp for the current time. 11. Update the table schema to add a ‘watched’ Boolean column. 12. Update an item as watched. 13. Query for items with watched = True. 14. Restore the table back to the previous state using the timestamp. 15. Check for completion of the restore action. 16. Delete the table. 17. Confirm that both tables are deleted. 18. Delete the keyspace. */ /* Usage: fileName - The name of the JSON file that contains movie data. (Get this file from the GitHub repo at resources/sample_file.) keyspaceName - The name of the keyspace to create. */ val DASHES: String = String(CharArray(80)).replace("\u0000", "-") suspend fun main() { val fileName = "<Replace with the JSON file that contains movie data>" val keyspaceName = "<Replace with the name of the keyspace to create>" val titleUpdate = "The Family" val yearUpdate = 2013 val tableName = "MovieKotlin" val tableNameRestore = "MovieRestore" val loader = DriverConfigLoader.fromClasspath("application.conf") val session = CqlSession .builder() .withConfigLoader(loader) .build() println(DASHES) println("Welcome to the Amazon Keyspaces example scenario.") println(DASHES) println(DASHES) println("1. Create a keyspace.") createKeySpace(keyspaceName) println(DASHES) Learn the basics 488 Amazon Keyspaces (for Apache Cassandra) Developer Guide println(DASHES) delay(5000) println("2. Check for keyspace existence.") checkKeyspaceExistence(keyspaceName) println(DASHES) println(DASHES) println("3. List keyspaces using a paginator.") listKeyspacesPaginator() println(DASHES) println(DASHES) println("4. Create a table with a simple movie data schema and enable point- in-time recovery.") createTable(keyspaceName, tableName) println(DASHES) println(DASHES) println("5. Check for the table to be in an Active state.") delay(6000) checkTable(keyspaceName, tableName) println(DASHES) println(DASHES) println("6. List all tables in the keyspace.") listTables(keyspaceName) println(DASHES) println(DASHES) println("7. Use a Cassandra driver to insert some records into the Movie table.") delay(6000) loadData(session, fileName, keyspaceName) println(DASHES) println(DASHES) println("8. Get all records from the Movie table.") getMovieData(session, keyspaceName) println(DASHES) println(DASHES) println("9. Get a specific Movie.") getSpecificMovie(session, keyspaceName) Learn the basics 489 Amazon Keyspaces (for Apache Cassandra) Developer Guide println(DASHES) println(DASHES) println("10. Get a UTC timestamp for the current time.") val utc = ZonedDateTime.now(ZoneOffset.UTC) println("DATETIME = ${Date.from(utc.toInstant())}") println(DASHES) println(DASHES) println("11. Update the table schema to add a watched Boolean column.") updateTable(keyspaceName, tableName) println(DASHES) println(DASHES) println("12.
AmazonKeyspaces-158
AmazonKeyspaces.pdf
158
delay(6000) checkTable(keyspaceName, tableName) println(DASHES) println(DASHES) println("6. List all tables in the keyspace.") listTables(keyspaceName) println(DASHES) println(DASHES) println("7. Use a Cassandra driver to insert some records into the Movie table.") delay(6000) loadData(session, fileName, keyspaceName) println(DASHES) println(DASHES) println("8. Get all records from the Movie table.") getMovieData(session, keyspaceName) println(DASHES) println(DASHES) println("9. Get a specific Movie.") getSpecificMovie(session, keyspaceName) Learn the basics 489 Amazon Keyspaces (for Apache Cassandra) Developer Guide println(DASHES) println(DASHES) println("10. Get a UTC timestamp for the current time.") val utc = ZonedDateTime.now(ZoneOffset.UTC) println("DATETIME = ${Date.from(utc.toInstant())}") println(DASHES) println(DASHES) println("11. Update the table schema to add a watched Boolean column.") updateTable(keyspaceName, tableName) println(DASHES) println(DASHES) println("12. Update an item as watched.") delay(10000) // Wait 10 seconds for the update. updateRecord(session, keyspaceName, titleUpdate, yearUpdate) println(DASHES) println(DASHES) println("13. Query for items with watched = True.") getWatchedData(session, keyspaceName) println(DASHES) println(DASHES) println("14. Restore the table back to the previous state using the timestamp.") println("Note that the restore operation can take up to 20 minutes.") restoreTable(keyspaceName, utc) println(DASHES) println(DASHES) println("15. Check for completion of the restore action.") delay(5000) checkRestoredTable(keyspaceName, "MovieRestore") println(DASHES) println(DASHES) println("16. Delete both tables.") deleteTable(keyspaceName, tableName) deleteTable(keyspaceName, tableNameRestore) println(DASHES) println(DASHES) Learn the basics 490 Amazon Keyspaces (for Apache Cassandra) Developer Guide println("17. Confirm that both tables are deleted.") checkTableDelete(keyspaceName, tableName) checkTableDelete(keyspaceName, tableNameRestore) println(DASHES) println(DASHES) println("18. Delete the keyspace.") deleteKeyspace(keyspaceName) println(DASHES) println(DASHES) println("The scenario has completed successfully.") println(DASHES) } suspend fun deleteKeyspace(keyspaceNameVal: String?) { val deleteKeyspaceRequest = DeleteKeyspaceRequest { keyspaceName = keyspaceNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient.deleteKeyspace(deleteKeyspaceRequest) } } suspend fun checkTableDelete( keyspaceNameVal: String?, tableNameVal: String?, ) { var status: String var response: GetTableResponse val tableRequest = GetTableRequest { keyspaceName = keyspaceNameVal tableName = tableNameVal } try { KeyspacesClient { region = "us-east-1" }.use { keyClient -> // Keep looping until the table cannot be found and a ResourceNotFoundException is thrown. while (true) { response = keyClient.getTable(tableRequest) Learn the basics 491 Amazon Keyspaces (for Apache Cassandra) Developer Guide status = response.status.toString() println(". The table status is $status") delay(500) } } } catch (e: ResourceNotFoundException) { println(e.message) } println("The table is deleted") } suspend fun deleteTable( keyspaceNameVal: String?, tableNameVal: String?, ) { val tableRequest = DeleteTableRequest { keyspaceName = keyspaceNameVal tableName = tableNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient.deleteTable(tableRequest) } } suspend fun checkRestoredTable( keyspaceNameVal: String?, tableNameVal: String?, ) { var tableStatus = false var status: String var response: GetTableResponse? = null val tableRequest = GetTableRequest { keyspaceName = keyspaceNameVal tableName = tableNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> while (!tableStatus) { response = keyClient.getTable(tableRequest) status = response!!.status.toString() Learn the basics 492 Amazon Keyspaces (for Apache Cassandra) Developer Guide println("The table status is $status") if (status.compareTo("ACTIVE") == 0) { tableStatus = true } delay(500) } val cols = response!!.schemaDefinition?.allColumns if (cols != null) { for (def in cols) { println("The column name is ${def.name}") println("The column type is ${def.type}") } } } } suspend fun restoreTable( keyspaceName: String?, utc: ZonedDateTime, ) { // Create an aws.smithy.kotlin.runtime.time.Instant value. val timeStamp = aws.smithy.kotlin.runtime.time .Instant(utc.toInstant()) val restoreTableRequest = RestoreTableRequest { restoreTimestamp = timeStamp sourceTableName = "MovieKotlin" targetKeyspaceName = keyspaceName targetTableName = "MovieRestore" sourceKeyspaceName = keyspaceName } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response = keyClient.restoreTable(restoreTableRequest) println("The ARN of the restored table is ${response.restoredTableArn}") } } fun getWatchedData( session: CqlSession, keyspaceName: String, Learn the basics 493 Amazon Keyspaces (for Apache Cassandra) Developer Guide ) { val resultSet = session.execute("SELECT * FROM \"$keyspaceName\". \"MovieKotlin\" WHERE watched = true ALLOW FILTERING;") resultSet.forEach { item: Row -> println("The Movie title is ${item.getString("title")}") println("The Movie year is ${item.getInt("year")}") println("The plot is ${item.getString("plot")}") } } fun updateRecord( session: CqlSession, keySpace: String, titleUpdate: String?, yearUpdate: Int, ) { val sqlStatement = "UPDATE \"$keySpace\".\"MovieKotlin\" SET watched=true WHERE title = :k0 AND year = :k1;" val builder = BatchStatement.builder(DefaultBatchType.UNLOGGED) builder.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM) val preparedStatement = session.prepare(sqlStatement) builder.addStatement( preparedStatement .boundStatementBuilder() .setString("k0", titleUpdate) .setInt("k1", yearUpdate) .build(), ) val batchStatement = builder.build() session.execute(batchStatement) } suspend fun updateTable( keySpace: String?, tableNameVal: String?, ) { val def = ColumnDefinition { name = "watched" type = "boolean" } val tableRequest = Learn the basics 494 Amazon Keyspaces (for Apache Cassandra) Developer Guide UpdateTableRequest { keyspaceName = keySpace tableName = tableNameVal addColumns = listOf(def) } KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient.updateTable(tableRequest) } } fun getSpecificMovie( session: CqlSession, keyspaceName: String, ) { val resultSet = session.execute("SELECT * FROM \"$keyspaceName\".\"MovieKotlin\" WHERE title = 'The Family' ALLOW FILTERING ;") resultSet.forEach { item: Row -> println("The Movie title is ${item.getString("title")}") println("The Movie year is ${item.getInt("year")}") println("The plot is ${item.getString("plot")}") } } // Get records from the Movie table. fun getMovieData( session: CqlSession, keyspaceName: String, ) { val resultSet = session.execute("SELECT * FROM \"$keyspaceName\". \"MovieKotlin\";") resultSet.forEach { item: Row -> println("The Movie title is ${item.getString("title")}") println("The Movie year is ${item.getInt("year")}") println("The plot is ${item.getString("plot")}") } } // Load data into the table. fun loadData(
AmazonKeyspaces-159
AmazonKeyspaces.pdf
159
-> keyClient.updateTable(tableRequest) } } fun getSpecificMovie( session: CqlSession, keyspaceName: String, ) { val resultSet = session.execute("SELECT * FROM \"$keyspaceName\".\"MovieKotlin\" WHERE title = 'The Family' ALLOW FILTERING ;") resultSet.forEach { item: Row -> println("The Movie title is ${item.getString("title")}") println("The Movie year is ${item.getInt("year")}") println("The plot is ${item.getString("plot")}") } } // Get records from the Movie table. fun getMovieData( session: CqlSession, keyspaceName: String, ) { val resultSet = session.execute("SELECT * FROM \"$keyspaceName\". \"MovieKotlin\";") resultSet.forEach { item: Row -> println("The Movie title is ${item.getString("title")}") println("The Movie year is ${item.getInt("year")}") println("The plot is ${item.getString("plot")}") } } // Load data into the table. fun loadData( session: CqlSession, fileName: String, Learn the basics 495 Amazon Keyspaces (for Apache Cassandra) Developer Guide keySpace: String, ) { val sqlStatement = "INSERT INTO \"$keySpace\".\"MovieKotlin\" (title, year, plot) values (:k0, :k1, :k2)" val parser = JsonFactory().createParser(File(fileName)) val rootNode = ObjectMapper().readTree<JsonNode>(parser) val iter: Iterator<JsonNode> = rootNode.iterator() var currentNode: ObjectNode var t = 0 while (iter.hasNext()) { if (t == 50) { break } currentNode = iter.next() as ObjectNode val year = currentNode.path("year").asInt() val title = currentNode.path("title").asText() val info = currentNode.path("info").toString() // Insert the data into the Amazon Keyspaces table. val builder = BatchStatement.builder(DefaultBatchType.UNLOGGED) builder.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM) val preparedStatement: PreparedStatement = session.prepare(sqlStatement) builder.addStatement( preparedStatement .boundStatementBuilder() .setString("k0", title) .setInt("k1", year) .setString("k2", info) .build(), ) val batchStatement = builder.build() session.execute(batchStatement) t++ } } suspend fun listTables(keyspaceNameVal: String?) { val tablesRequest = ListTablesRequest { keyspaceName = keyspaceNameVal Learn the basics 496 Amazon Keyspaces (for Apache Cassandra) Developer Guide } KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient .listTablesPaginated(tablesRequest) .transform { it.tables?.forEach { obj -> emit(obj) } } .collect { obj -> println(" ARN: ${obj.resourceArn} Table name: ${obj.tableName}") } } } suspend fun checkTable( keyspaceNameVal: String?, tableNameVal: String?, ) { var tableStatus = false var status: String var response: GetTableResponse? = null val tableRequest = GetTableRequest { keyspaceName = keyspaceNameVal tableName = tableNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> while (!tableStatus) { response = keyClient.getTable(tableRequest) status = response!!.status.toString() println(". The table status is $status") if (status.compareTo("ACTIVE") == 0) { tableStatus = true } delay(500) } val cols: List<ColumnDefinition>? = response!!.schemaDefinition?.allColumns if (cols != null) { for (def in cols) { println("The column name is ${def.name}") println("The column type is ${def.type}") } } } Learn the basics 497 Amazon Keyspaces (for Apache Cassandra) Developer Guide } suspend fun createTable( keySpaceVal: String?, tableNameVal: String?, ) { // Set the columns. val defTitle = ColumnDefinition { name = "title" type = "text" } val defYear = ColumnDefinition { name = "year" type = "int" } val defReleaseDate = ColumnDefinition { name = "release_date" type = "timestamp" } val defPlot = ColumnDefinition { name = "plot" type = "text" } val colList = ArrayList<ColumnDefinition>() colList.add(defTitle) colList.add(defYear) colList.add(defReleaseDate) colList.add(defPlot) // Set the keys. val yearKey = PartitionKey { name = "year" } val titleKey = Learn the basics 498 Amazon Keyspaces (for Apache Cassandra) Developer Guide PartitionKey { name = "title" } val keyList = ArrayList<PartitionKey>() keyList.add(yearKey) keyList.add(titleKey) val schemaDefinitionOb = SchemaDefinition { partitionKeys = keyList allColumns = colList } val timeRecovery = PointInTimeRecovery { status = PointInTimeRecoveryStatus.Enabled } val tableRequest = CreateTableRequest { keyspaceName = keySpaceVal tableName = tableNameVal schemaDefinition = schemaDefinitionOb pointInTimeRecovery = timeRecovery } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response = keyClient.createTable(tableRequest) println("The table ARN is ${response.resourceArn}") } } suspend fun listKeyspacesPaginator() { KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient .listKeyspacesPaginated(ListKeyspacesRequest {}) .transform { it.keyspaces?.forEach { obj -> emit(obj) } } .collect { obj -> println("Name: ${obj.keyspaceName}") } } } Learn the basics 499 Amazon Keyspaces (for Apache Cassandra) Developer Guide suspend fun checkKeyspaceExistence(keyspaceNameVal: String?) { val keyspaceRequest = GetKeyspaceRequest { keyspaceName = keyspaceNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response: GetKeyspaceResponse = keyClient.getKeyspace(keyspaceRequest) val name = response.keyspaceName println("The $name KeySpace is ready") } } suspend fun createKeySpace(keyspaceNameVal: String) { val keyspaceRequest = CreateKeyspaceRequest { keyspaceName = keyspaceNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response = keyClient.createKeyspace(keyspaceRequest) println("The ARN of the KeySpace is ${response.resourceArn}") } } • For API details, see the following topics in AWS SDK for Kotlin API reference. • CreateKeyspace • CreateTable • DeleteKeyspace • DeleteTable • GetKeyspace • GetTable • ListKeyspaces • ListTables • RestoreTable • UpdateTable Learn the basics 500 Amazon Keyspaces (for Apache Cassandra) Developer Guide Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. Run an interactive scenario at a command prompt. class KeyspaceScenario: """Runs an interactive scenario that shows how to get started using Amazon Keyspaces.""" def __init__(self, ks_wrapper): """ :param ks_wrapper: An object that wraps Amazon Keyspace actions. """ self.ks_wrapper = ks_wrapper @demo_func def create_keyspace(self): """ 1. Creates a keyspace. 2. Lists up to 10 keyspaces in your
AmazonKeyspaces-160
AmazonKeyspaces.pdf
160
ListTables • RestoreTable • UpdateTable Learn the basics 500 Amazon Keyspaces (for Apache Cassandra) Developer Guide Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. Run an interactive scenario at a command prompt. class KeyspaceScenario: """Runs an interactive scenario that shows how to get started using Amazon Keyspaces.""" def __init__(self, ks_wrapper): """ :param ks_wrapper: An object that wraps Amazon Keyspace actions. """ self.ks_wrapper = ks_wrapper @demo_func def create_keyspace(self): """ 1. Creates a keyspace. 2. Lists up to 10 keyspaces in your account. """ print("Let's create a keyspace.") ks_name = q.ask( "Enter a name for your new keyspace.\nThe name can contain only letters, " "numbers and underscores: ", q.non_empty, ) if self.ks_wrapper.exists_keyspace(ks_name): print(f"A keyspace named {ks_name} exists.") else: ks_arn = self.ks_wrapper.create_keyspace(ks_name) ks_exists = False while not ks_exists: wait(3) ks_exists = self.ks_wrapper.exists_keyspace(ks_name) Learn the basics 501 Amazon Keyspaces (for Apache Cassandra) Developer Guide print(f"Created a new keyspace.\n\t{ks_arn}.") print("The first 10 keyspaces in your account are:\n") self.ks_wrapper.list_keyspaces(10) @demo_func def create_table(self): """ 1. Creates a table in the keyspace. The table is configured with a schema to hold movie data and has point-in-time recovery enabled. 2. Waits for the table to be in an active state. 3. Displays schema information for the table. 4. Lists tables in the keyspace. """ print("Let's create a table for movies in your keyspace.") table_name = q.ask("Enter a name for your table: ", q.non_empty) table = self.ks_wrapper.get_table(table_name) if table is not None: print( f"A table named {table_name} already exists in keyspace " f"{self.ks_wrapper.ks_name}." ) else: table_arn = self.ks_wrapper.create_table(table_name) print(f"Created table {table_name}:\n\t{table_arn}") table = {"status": None} print("Waiting for your table to be ready...") while table["status"] != "ACTIVE": wait(5) table = self.ks_wrapper.get_table(table_name) print(f"Your table is {table['status']}. Its schema is:") pp(table["schemaDefinition"]) print("\nThe tables in your keyspace are:\n") self.ks_wrapper.list_tables() @demo_func def ensure_tls_cert(self): """ Ensures you have a TLS certificate available to use to secure the connection to the keyspace. This function downloads a default certificate or lets you specify your own. """ Learn the basics 502 Amazon Keyspaces (for Apache Cassandra) Developer Guide print("To connect to your keyspace, you must have a TLS certificate.") print("Checking for TLS certificate...") cert_path = os.path.join( os.path.dirname(__file__), QueryManager.DEFAULT_CERT_FILE ) if not os.path.exists(cert_path): cert_choice = q.ask( f"Press enter to download a certificate from {QueryManager.CERT_URL} " f"or enter the full path to the certificate you want to use: " ) if cert_choice: cert_path = cert_choice else: cert = requests.get(QueryManager.CERT_URL).text with open(cert_path, "w") as cert_file: cert_file.write(cert) else: q.ask(f"Certificate {cert_path} found. Press Enter to continue.") print( f"Certificate {cert_path} will be used to secure the connection to your keyspace." ) return cert_path @demo_func def query_table(self, qm, movie_file): """ 1. Adds movies to the table from a sample movie data file. 2. Gets a list of movies from the table and lets you select one. 3. Displays more information about the selected movie. """ qm.add_movies(self.ks_wrapper.table_name, movie_file) movies = qm.get_movies(self.ks_wrapper.table_name) print(f"Added {len(movies)} movies to the table:") sel = q.choose("Pick one to learn more about it: ", [m.title for m in movies]) movie_choice = qm.get_movie( self.ks_wrapper.table_name, movies[sel].title, movies[sel].year ) print(movie_choice.title) print(f"\tReleased: {movie_choice.release_date}") print(f"\tPlot: {movie_choice.plot}") Learn the basics 503 Amazon Keyspaces (for Apache Cassandra) Developer Guide @demo_func def update_and_restore_table(self, qm): """ 1. Updates the table by adding a column to track watched movies. 2. Marks some of the movies as watched. 3. Gets the list of watched movies from the table. 4. Restores to a movies_restored table at a previous point in time. 5. Gets the list of movies from the restored table. """ print("Let's add a column to record which movies you've watched.") pre_update_timestamp = datetime.utcnow() print( f"Recorded the current UTC time of {pre_update_timestamp} so we can restore the table later." ) self.ks_wrapper.update_table() print("Waiting for your table to update...") table = {"status": "UPDATING"} while table["status"] != "ACTIVE": wait(5) table = self.ks_wrapper.get_table(self.ks_wrapper.table_name) print("Column 'watched' added to table.") q.ask( "Let's mark some of the movies as watched. Press Enter when you're ready.\n" ) movies = qm.get_movies(self.ks_wrapper.table_name) for movie in movies[:10]: qm.watched_movie(self.ks_wrapper.table_name, movie.title, movie.year) print(f"Marked {movie.title} as watched.") movies = qm.get_movies(self.ks_wrapper.table_name, watched=True) print("-" * 88) print("The watched movies in our table are:\n") for movie in movies: print(movie.title) print("-" * 88) if q.ask( "Do you want to restore the table to the way it was before all of these\n" "updates? Keep in mind, this can take up to 20 minutes. (y/n) ", q.is_yesno, ): starting_table_name = self.ks_wrapper.table_name Learn the basics 504 Amazon Keyspaces (for Apache Cassandra) Developer Guide table_name_restored = self.ks_wrapper.restore_table(pre_update_timestamp) table = {"status": "RESTORING"} while table["status"] != "ACTIVE": wait(10) table = self.ks_wrapper.get_table(table_name_restored) print( f"Restored {starting_table_name} to {table_name_restored} " f"at a point in time of {pre_update_timestamp}." ) movies = qm.get_movies(table_name_restored) print("Now the movies in our table are:") for movie in movies: print(movie.title) def cleanup(self, cert_path): """ 1. Deletes the table and waits for it to be removed. 2. Deletes the keyspace.
AmazonKeyspaces-161
AmazonKeyspaces.pdf
161
the way it was before all of these\n" "updates? Keep in mind, this can take up to 20 minutes. (y/n) ", q.is_yesno, ): starting_table_name = self.ks_wrapper.table_name Learn the basics 504 Amazon Keyspaces (for Apache Cassandra) Developer Guide table_name_restored = self.ks_wrapper.restore_table(pre_update_timestamp) table = {"status": "RESTORING"} while table["status"] != "ACTIVE": wait(10) table = self.ks_wrapper.get_table(table_name_restored) print( f"Restored {starting_table_name} to {table_name_restored} " f"at a point in time of {pre_update_timestamp}." ) movies = qm.get_movies(table_name_restored) print("Now the movies in our table are:") for movie in movies: print(movie.title) def cleanup(self, cert_path): """ 1. Deletes the table and waits for it to be removed. 2. Deletes the keyspace. :param cert_path: The path of the TLS certificate used in the demo. If the certificate was downloaded during the demo, it is removed. """ if q.ask( f"Do you want to delete your {self.ks_wrapper.table_name} table and " f"{self.ks_wrapper.ks_name} keyspace? (y/n) ", q.is_yesno, ): table_name = self.ks_wrapper.table_name self.ks_wrapper.delete_table() table = self.ks_wrapper.get_table(table_name) print("Waiting for the table to be deleted.") while table is not None: wait(5) table = self.ks_wrapper.get_table(table_name) print("Table deleted.") self.ks_wrapper.delete_keyspace() print( "Keyspace deleted. If you chose to restore your table during the " "demo, the original table is also deleted." ) Learn the basics 505 Amazon Keyspaces (for Apache Cassandra) Developer Guide if cert_path == os.path.join( os.path.dirname(__file__), QueryManager.DEFAULT_CERT_FILE ) and os.path.exists(cert_path): os.remove(cert_path) print("Removed certificate that was downloaded for this demo.") def run_scenario(self): logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") print("-" * 88) print("Welcome to the Amazon Keyspaces (for Apache Cassandra) demo.") print("-" * 88) self.create_keyspace() self.create_table() cert_file_path = self.ensure_tls_cert() # Use a context manager to ensure the connection to the keyspace is closed. with QueryManager( cert_file_path, boto3.DEFAULT_SESSION, self.ks_wrapper.ks_name ) as qm: self.query_table(qm, "../../../resources/sample_files/movies.json") self.update_and_restore_table(qm) self.cleanup(cert_file_path) print("\nThanks for watching!") print("-" * 88) if __name__ == "__main__": try: scenario = KeyspaceScenario(KeyspaceWrapper.from_client()) scenario.run_scenario() except Exception: logging.exception("Something went wrong with the demo.") Define a class that wraps keyspace and table actions. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" Learn the basics 506 Amazon Keyspaces (for Apache Cassandra) Developer Guide def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def create_keyspace(self, name): """ Creates a keyspace. :param name: The name to give the keyspace. :return: The Amazon Resource Name (ARN) of the new keyspace. """ try: response = self.keyspaces_client.create_keyspace(keyspaceName=name) self.ks_name = name self.ks_arn = response["resourceArn"] except ClientError as err: logger.error( "Couldn't create %s. Here's why: %s: %s", name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise else: return self.ks_arn def exists_keyspace(self, name): """ Checks whether a keyspace exists. :param name: The name of the keyspace to look up. Learn the basics 507 Amazon Keyspaces (for Apache Cassandra) Developer Guide :return: True when the keyspace exists. Otherwise, False. """ try: response = self.keyspaces_client.get_keyspace(keyspaceName=name) self.ks_name = response["keyspaceName"] self.ks_arn = response["resourceArn"] exists = True except ClientError as err: if err.response["Error"]["Code"] == "ResourceNotFoundException": logger.info("Keyspace %s does not exist.", name) exists = False else: logger.error( "Couldn't verify %s exists. Here's why: %s: %s", name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise return exists def list_keyspaces(self, limit): """ Lists the keyspaces in your account. :param limit: The maximum number of keyspaces to list. """ try: ks_paginator = self.keyspaces_client.get_paginator("list_keyspaces") for page in ks_paginator.paginate(PaginationConfig={"MaxItems": limit}): for ks in page["keyspaces"]: print(ks["keyspaceName"]) print(f"\t{ks['resourceArn']}") except ClientError as err: logger.error( "Couldn't list keyspaces. Here's why: %s: %s", err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise Learn the basics 508 Amazon Keyspaces (for Apache Cassandra) Developer Guide def create_table(self, table_name): """ Creates a table in the keyspace. The table is created with a schema for storing movie data and has point-in-time recovery enabled. :param table_name: The name to give the table. :return: The ARN of the new table. """ try: response = self.keyspaces_client.create_table( keyspaceName=self.ks_name, tableName=table_name, schemaDefinition={ "allColumns": [ {"name": "title", "type": "text"}, {"name": "year", "type": "int"}, {"name": "release_date", "type": "timestamp"}, {"name": "plot", "type": "text"}, ], "partitionKeys": [{"name": "year"}, {"name": "title"}], }, pointInTimeRecovery={"status": "ENABLED"}, ) except ClientError as err: logger.error( "Couldn't create table %s. Here's why: %s: %s", table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise else: return response["resourceArn"] def get_table(self, table_name): """ Gets data about a table in the keyspace. :param table_name: The name of the table to look up. :return: Data about the table. """ try: Learn the basics 509 Amazon Keyspaces (for Apache Cassandra) Developer Guide response = self.keyspaces_client.get_table( keyspaceName=self.ks_name, tableName=table_name ) self.table_name = table_name except ClientError as err: if err.response["Error"]["Code"] == "ResourceNotFoundException": logger.info("Table %s does not exist.", table_name) self.table_name = None response = None else: logger.error( "Couldn't verify %s exists. Here's why: %s: %s", table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise return response def list_tables(self): """ Lists the tables in the keyspace. """ try: table_paginator = self.keyspaces_client.get_paginator("list_tables") for page in table_paginator.paginate(keyspaceName=self.ks_name): for table in page["tables"]: print(table["tableName"]) print(f"\t{table['resourceArn']}") except ClientError as err: logger.error( "Couldn't list tables in keyspace %s. Here's why: %s: %s", self.ks_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise def update_table(self): """ Updates the schema of the table. Learn the basics
AmazonKeyspaces-162
AmazonKeyspaces.pdf
162
self.keyspaces_client.get_table( keyspaceName=self.ks_name, tableName=table_name ) self.table_name = table_name except ClientError as err: if err.response["Error"]["Code"] == "ResourceNotFoundException": logger.info("Table %s does not exist.", table_name) self.table_name = None response = None else: logger.error( "Couldn't verify %s exists. Here's why: %s: %s", table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise return response def list_tables(self): """ Lists the tables in the keyspace. """ try: table_paginator = self.keyspaces_client.get_paginator("list_tables") for page in table_paginator.paginate(keyspaceName=self.ks_name): for table in page["tables"]: print(table["tableName"]) print(f"\t{table['resourceArn']}") except ClientError as err: logger.error( "Couldn't list tables in keyspace %s. Here's why: %s: %s", self.ks_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise def update_table(self): """ Updates the schema of the table. Learn the basics 510 Amazon Keyspaces (for Apache Cassandra) Developer Guide This example updates a table of movie data by adding a new column that tracks whether the movie has been watched. """ try: self.keyspaces_client.update_table( keyspaceName=self.ks_name, tableName=self.table_name, addColumns=[{"name": "watched", "type": "boolean"}], ) except ClientError as err: logger.error( "Couldn't update table %s. Here's why: %s: %s", self.table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise def restore_table(self, restore_timestamp): """ Restores the table to a previous point in time. The table is restored to a new table in the same keyspace. :param restore_timestamp: The point in time to restore the table. This time must be in UTC format. :return: The name of the restored table. """ try: restored_table_name = f"{self.table_name}_restored" self.keyspaces_client.restore_table( sourceKeyspaceName=self.ks_name, sourceTableName=self.table_name, targetKeyspaceName=self.ks_name, targetTableName=restored_table_name, restoreTimestamp=restore_timestamp, ) except ClientError as err: logger.error( "Couldn't restore table %s. Here's why: %s: %s", restore_timestamp, err.response["Error"]["Code"], err.response["Error"]["Message"], Learn the basics 511 Amazon Keyspaces (for Apache Cassandra) Developer Guide ) raise else: return restored_table_name def delete_table(self): """ Deletes the table from the keyspace. """ try: self.keyspaces_client.delete_table( keyspaceName=self.ks_name, tableName=self.table_name ) self.table_name = None except ClientError as err: logger.error( "Couldn't delete table %s. Here's why: %s: %s", self.table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise def delete_keyspace(self): """ Deletes the keyspace. """ try: self.keyspaces_client.delete_keyspace(keyspaceName=self.ks_name) self.ks_name = None except ClientError as err: logger.error( "Couldn't delete keyspace %s. Here's why: %s: %s", self.ks_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise Learn the basics 512 Amazon Keyspaces (for Apache Cassandra) Developer Guide Define a class that creates a TLS connection to a keyspace, authenticates with SigV4, and sends CQL queries to a table in the keyspace. class QueryManager: """ Manages queries to an Amazon Keyspaces (for Apache Cassandra) keyspace. Queries are secured by TLS and authenticated by using the Signature V4 (SigV4) AWS signing protocol. This is more secure than sending username and password with a plain-text authentication provider. This example downloads a default certificate to secure TLS, or lets you specify your own. This example uses a table of movie data to demonstrate basic queries. """ DEFAULT_CERT_FILE = "sf-class2-root.crt" CERT_URL = f"https://certs.secureserver.net/repository/sf-class2-root.crt" def __init__(self, cert_file_path, boto_session, keyspace_name): """ :param cert_file_path: The path and file name of the certificate used for TLS. :param boto_session: A Boto3 session. This is used to acquire your AWS credentials. :param keyspace_name: The name of the keyspace to connect. """ self.cert_file_path = cert_file_path self.boto_session = boto_session self.ks_name = keyspace_name self.cluster = None self.session = None def __enter__(self): """ Creates a session connection to the keyspace that is secured by TLS and authenticated by SigV4. """ ssl_context = SSLContext(PROTOCOL_TLSv1_2) Learn the basics 513 Amazon Keyspaces (for Apache Cassandra) Developer Guide ssl_context.load_verify_locations(self.cert_file_path) ssl_context.verify_mode = CERT_REQUIRED auth_provider = SigV4AuthProvider(self.boto_session) contact_point = f"cassandra. {self.boto_session.region_name}.amazonaws.com" exec_profile = ExecutionProfile( consistency_level=ConsistencyLevel.LOCAL_QUORUM, load_balancing_policy=DCAwareRoundRobinPolicy(), ) self.cluster = Cluster( [contact_point], ssl_context=ssl_context, auth_provider=auth_provider, port=9142, execution_profiles={EXEC_PROFILE_DEFAULT: exec_profile}, protocol_version=4, ) self.cluster.__enter__() self.session = self.cluster.connect(self.ks_name) return self def __exit__(self, *args): """ Exits the cluster. This shuts down all existing session connections. """ self.cluster.__exit__(*args) def add_movies(self, table_name, movie_file_path): """ Gets movies from a JSON file and adds them to a table in the keyspace. :param table_name: The name of the table. :param movie_file_path: The path and file name of a JSON file that contains movie data. """ with open(movie_file_path, "r") as movie_file: movies = json.loads(movie_file.read()) stmt = self.session.prepare( f"INSERT INTO {table_name} (year, title, release_date, plot) VALUES (?, ?, ?, ?);" ) for movie in movies[:20]: self.session.execute( stmt, Learn the basics 514 Amazon Keyspaces (for Apache Cassandra) Developer Guide parameters=[ movie["year"], movie["title"], date.fromisoformat(movie["info"] ["release_date"].partition("T")[0]), movie["info"]["plot"], ], ) def get_movies(self, table_name, watched=None): """ Gets the title and year of the full list of movies from the table. :param table_name: The name of the movie table. :param watched: When specified, the returned list of movies is filtered to either movies that have been watched or movies that have not been watched. Otherwise, all movies are returned. :return: A list of movies in the table. """ if watched is None: stmt = SimpleStatement(f"SELECT title, year from {table_name}") params = None else: stmt = SimpleStatement( f"SELECT title, year from {table_name} WHERE watched = %s ALLOW FILTERING" ) params = [watched] return self.session.execute(stmt, parameters=params).all() def get_movie(self, table_name, title, year): """ Gets a single movie from the table, by title and year. :param table_name: The name of the movie table. :param title: The title of the movie. :param year: The year of
AmazonKeyspaces-163
AmazonKeyspaces.pdf
163
either movies that have been watched or movies that have not been watched. Otherwise, all movies are returned. :return: A list of movies in the table. """ if watched is None: stmt = SimpleStatement(f"SELECT title, year from {table_name}") params = None else: stmt = SimpleStatement( f"SELECT title, year from {table_name} WHERE watched = %s ALLOW FILTERING" ) params = [watched] return self.session.execute(stmt, parameters=params).all() def get_movie(self, table_name, title, year): """ Gets a single movie from the table, by title and year. :param table_name: The name of the movie table. :param title: The title of the movie. :param year: The year of the movie's release. :return: The requested movie. """ return self.session.execute( SimpleStatement( f"SELECT * from {table_name} WHERE title = %s AND year = %s" Learn the basics 515 Amazon Keyspaces (for Apache Cassandra) Developer Guide ), parameters=[title, year], ).one() def watched_movie(self, table_name, title, year): """ Updates a movie as having been watched. :param table_name: The name of the movie table. :param title: The title of the movie. :param year: The year of the movie's release. """ self.session.execute( SimpleStatement( f"UPDATE {table_name} SET watched=true WHERE title = %s AND year = %s" ), parameters=[title, year], ) • For API details, see the following topics in AWS SDK for Python (Boto3) API Reference. • CreateKeyspace • CreateTable • DeleteKeyspace • DeleteTable • GetKeyspace • GetTable • ListKeyspaces • ListTables • RestoreTable • UpdateTable For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Learn the basics 516 Amazon Keyspaces (for Apache Cassandra) Developer Guide Actions for Amazon Keyspaces using AWS SDKs The following code examples demonstrate how to perform individual Amazon Keyspaces actions with AWS SDKs. Each example includes a link to GitHub, where you can find instructions for setting up and running the code. The following examples include only the most commonly used actions. For a complete list, see the Amazon Keyspaces (for Apache Cassandra) API Reference. Examples • Use CreateKeyspace with an AWS SDK • Use CreateTable with an AWS SDK • Use DeleteKeyspace with an AWS SDK • Use DeleteTable with an AWS SDK • Use GetKeyspace with an AWS SDK • Use GetTable with an AWS SDK • Use ListKeyspaces with an AWS SDK • Use ListTables with an AWS SDK • Use RestoreTable with an AWS SDK • Use UpdateTable with an AWS SDK Use CreateKeyspace with an AWS SDK The following code examples show how to use CreateKeyspace. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics Actions 517 Amazon Keyspaces (for Apache Cassandra) Developer Guide .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Create a new keyspace. /// </summary> /// <param name="keyspaceName">The name for the new keyspace.</param> /// <returns>The Amazon Resource Name (ARN) of the new keyspace.</returns> public async Task<string> CreateKeyspace(string keyspaceName) { var response = await _amazonKeyspaces.CreateKeyspaceAsync( new CreateKeyspaceRequest { KeyspaceName = keyspaceName }); return response.ResourceArn; } • For API details, see CreateKeyspace in AWS SDK for .NET API Reference. Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void createKeySpace(KeyspacesClient keyClient, String keyspaceName) { try { Actions 518 Amazon Keyspaces (for Apache Cassandra) Developer Guide CreateKeyspaceRequest keyspaceRequest = CreateKeyspaceRequest.builder() .keyspaceName(keyspaceName) .build(); CreateKeyspaceResponse response = keyClient.createKeyspace(keyspaceRequest); System.out.println("The ARN of the KeySpace is " + response.resourceArn()); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see CreateKeyspace in AWS SDK for Java 2.x API Reference. Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun createKeySpace(keyspaceNameVal: String) { val keyspaceRequest = CreateKeyspaceRequest { keyspaceName = keyspaceNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response = keyClient.createKeyspace(keyspaceRequest) println("The ARN of the KeySpace is ${response.resourceArn}") } } Actions 519 Amazon Keyspaces (for Apache Cassandra) Developer Guide • For API details, see CreateKeyspace in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def create_keyspace(self, name):
AmazonKeyspaces-164
AmazonKeyspaces.pdf
164
${response.resourceArn}") } } Actions 519 Amazon Keyspaces (for Apache Cassandra) Developer Guide • For API details, see CreateKeyspace in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def create_keyspace(self, name): """ Creates a keyspace. :param name: The name to give the keyspace. :return: The Amazon Resource Name (ARN) of the new keyspace. """ try: response = self.keyspaces_client.create_keyspace(keyspaceName=name) self.ks_name = name self.ks_arn = response["resourceArn"] Actions 520 Amazon Keyspaces (for Apache Cassandra) Developer Guide except ClientError as err: logger.error( "Couldn't create %s. Here's why: %s: %s", name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise else: return self.ks_arn • For API details, see CreateKeyspace in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use CreateTable with an AWS SDK The following code examples show how to use CreateTable. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Create a new Amazon Keyspaces table. Actions 521 Amazon Keyspaces (for Apache Cassandra) /// </summary> Developer Guide /// <param name="keyspaceName">The keyspace where the table will be created.</param> /// <param name="schema">The schema for the new table.</param> /// <param name="tableName">The name of the new table.</param> /// <returns>The Amazon Resource Name (ARN) of the new table.</returns> public async Task<string> CreateTable(string keyspaceName, SchemaDefinition schema, string tableName) { var request = new CreateTableRequest { KeyspaceName = keyspaceName, SchemaDefinition = schema, TableName = tableName, PointInTimeRecovery = new PointInTimeRecovery { Status = PointInTimeRecoveryStatus.ENABLED } }; var response = await _amazonKeyspaces.CreateTableAsync(request); return response.ResourceArn; } • For API details, see CreateTable in AWS SDK for .NET API Reference. Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void createTable(KeyspacesClient keyClient, String keySpace, String tableName) { try { // Set the columns. ColumnDefinition defTitle = ColumnDefinition.builder() .name("title") Actions 522 Amazon Keyspaces (for Apache Cassandra) Developer Guide .type("text") .build(); ColumnDefinition defYear = ColumnDefinition.builder() .name("year") .type("int") .build(); ColumnDefinition defReleaseDate = ColumnDefinition.builder() .name("release_date") .type("timestamp") .build(); ColumnDefinition defPlot = ColumnDefinition.builder() .name("plot") .type("text") .build(); List<ColumnDefinition> colList = new ArrayList<>(); colList.add(defTitle); colList.add(defYear); colList.add(defReleaseDate); colList.add(defPlot); // Set the keys. PartitionKey yearKey = PartitionKey.builder() .name("year") .build(); PartitionKey titleKey = PartitionKey.builder() .name("title") .build(); List<PartitionKey> keyList = new ArrayList<>(); keyList.add(yearKey); keyList.add(titleKey); SchemaDefinition schemaDefinition = SchemaDefinition.builder() .partitionKeys(keyList) .allColumns(colList) .build(); PointInTimeRecovery timeRecovery = PointInTimeRecovery.builder() .status(PointInTimeRecoveryStatus.ENABLED) Actions 523 Amazon Keyspaces (for Apache Cassandra) Developer Guide .build(); CreateTableRequest tableRequest = CreateTableRequest.builder() .keyspaceName(keySpace) .tableName(tableName) .schemaDefinition(schemaDefinition) .pointInTimeRecovery(timeRecovery) .build(); CreateTableResponse response = keyClient.createTable(tableRequest); System.out.println("The table ARN is " + response.resourceArn()); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see CreateTable in AWS SDK for Java 2.x API Reference. Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun createTable( keySpaceVal: String?, tableNameVal: String?, ) { // Set the columns. val defTitle = ColumnDefinition { name = "title" type = "text" } Actions 524 Amazon Keyspaces (for Apache Cassandra) Developer Guide val defYear = ColumnDefinition { name = "year" type = "int" } val defReleaseDate = ColumnDefinition { name = "release_date" type = "timestamp" } val defPlot = ColumnDefinition { name = "plot" type = "text" } val colList = ArrayList<ColumnDefinition>() colList.add(defTitle) colList.add(defYear) colList.add(defReleaseDate) colList.add(defPlot) // Set the keys. val yearKey = PartitionKey { name = "year" } val titleKey = PartitionKey { name = "title" } val keyList = ArrayList<PartitionKey>() keyList.add(yearKey) keyList.add(titleKey) val schemaDefinitionOb = SchemaDefinition { partitionKeys = keyList allColumns = colList } Actions 525 Amazon Keyspaces (for Apache Cassandra) Developer Guide val timeRecovery = PointInTimeRecovery { status = PointInTimeRecoveryStatus.Enabled } val tableRequest = CreateTableRequest { keyspaceName = keySpaceVal tableName = tableNameVal schemaDefinition = schemaDefinitionOb pointInTimeRecovery = timeRecovery } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response = keyClient.createTable(tableRequest) println("The table ARN is ${response.resourceArn}") } } • For API details, see CreateTable in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example
AmazonKeyspaces-165
AmazonKeyspaces.pdf
165
keyList = ArrayList<PartitionKey>() keyList.add(yearKey) keyList.add(titleKey) val schemaDefinitionOb = SchemaDefinition { partitionKeys = keyList allColumns = colList } Actions 525 Amazon Keyspaces (for Apache Cassandra) Developer Guide val timeRecovery = PointInTimeRecovery { status = PointInTimeRecoveryStatus.Enabled } val tableRequest = CreateTableRequest { keyspaceName = keySpaceVal tableName = tableNameVal schemaDefinition = schemaDefinitionOb pointInTimeRecovery = timeRecovery } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response = keyClient.createTable(tableRequest) println("The table ARN is ${response.resourceArn}") } } • For API details, see CreateTable in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client Actions 526 Amazon Keyspaces (for Apache Cassandra) Developer Guide self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def create_table(self, table_name): """ Creates a table in the keyspace. The table is created with a schema for storing movie data and has point-in-time recovery enabled. :param table_name: The name to give the table. :return: The ARN of the new table. """ try: response = self.keyspaces_client.create_table( keyspaceName=self.ks_name, tableName=table_name, schemaDefinition={ "allColumns": [ {"name": "title", "type": "text"}, {"name": "year", "type": "int"}, {"name": "release_date", "type": "timestamp"}, {"name": "plot", "type": "text"}, ], "partitionKeys": [{"name": "year"}, {"name": "title"}], }, pointInTimeRecovery={"status": "ENABLED"}, ) except ClientError as err: logger.error( "Couldn't create table %s. Here's why: %s: %s", table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise else: return response["resourceArn"] Actions 527 Amazon Keyspaces (for Apache Cassandra) Developer Guide • For API details, see CreateTable in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use DeleteKeyspace with an AWS SDK The following code examples show how to use DeleteKeyspace. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Delete an existing keyspace. /// </summary> /// <param name="keyspaceName"></param> /// <returns>A Boolean value indicating the success of the action.</returns> public async Task<bool> DeleteKeyspace(string keyspaceName) { var response = await _amazonKeyspaces.DeleteKeyspaceAsync( new DeleteKeyspaceRequest { KeyspaceName = keyspaceName }); return response.HttpStatusCode == HttpStatusCode.OK; } Actions 528 Amazon Keyspaces (for Apache Cassandra) Developer Guide • For API details, see DeleteKeyspace in AWS SDK for .NET API Reference. Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void deleteKeyspace(KeyspacesClient keyClient, String keyspaceName) { try { DeleteKeyspaceRequest deleteKeyspaceRequest = DeleteKeyspaceRequest.builder() .keyspaceName(keyspaceName) .build(); keyClient.deleteKeyspace(deleteKeyspaceRequest); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see DeleteKeyspace in AWS SDK for Java 2.x API Reference. Actions 529 Amazon Keyspaces (for Apache Cassandra) Developer Guide Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun deleteKeyspace(keyspaceNameVal: String?) { val deleteKeyspaceRequest = DeleteKeyspaceRequest { keyspaceName = keyspaceNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient.deleteKeyspace(deleteKeyspaceRequest) } } • For API details, see DeleteKeyspace in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ Actions 530 Amazon Keyspaces (for Apache Cassandra) Developer Guide :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def delete_keyspace(self): """ Deletes the keyspace. """ try: self.keyspaces_client.delete_keyspace(keyspaceName=self.ks_name) self.ks_name = None except ClientError as err: logger.error( "Couldn't delete keyspace %s. Here's why: %s: %s", self.ks_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise • For API details, see DeleteKeyspace in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use DeleteTable with an AWS SDK The following code examples show how to use DeleteTable. Actions 531 Amazon Keyspaces (for Apache Cassandra) Developer Guide Action examples are code excerpts from larger programs and must be run in
AmazonKeyspaces-166
AmazonKeyspaces.pdf
166
err: logger.error( "Couldn't delete keyspace %s. Here's why: %s: %s", self.ks_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise • For API details, see DeleteKeyspace in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use DeleteTable with an AWS SDK The following code examples show how to use DeleteTable. Actions 531 Amazon Keyspaces (for Apache Cassandra) Developer Guide Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Delete an Amazon Keyspaces table. /// </summary> /// <param name="keyspaceName">The keyspace containing the table.</param> /// <param name="tableName">The name of the table to delete.</param> /// <returns>A Boolean value indicating the success of the action.</returns> public async Task<bool> DeleteTable(string keyspaceName, string tableName) { var response = await _amazonKeyspaces.DeleteTableAsync( new DeleteTableRequest { KeyspaceName = keyspaceName, TableName = tableName }); return response.HttpStatusCode == HttpStatusCode.OK; } • For API details, see DeleteTable in AWS SDK for .NET API Reference. Actions 532 Amazon Keyspaces (for Apache Cassandra) Developer Guide Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void deleteTable(KeyspacesClient keyClient, String keyspaceName, String tableName) { try { DeleteTableRequest tableRequest = DeleteTableRequest.builder() .keyspaceName(keyspaceName) .tableName(tableName) .build(); keyClient.deleteTable(tableRequest); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see DeleteTable in AWS SDK for Java 2.x API Reference. Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun deleteTable( Actions 533 Amazon Keyspaces (for Apache Cassandra) Developer Guide keyspaceNameVal: String?, tableNameVal: String?, ) { val tableRequest = DeleteTableRequest { keyspaceName = keyspaceNameVal tableName = tableNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient.deleteTable(tableRequest) } } • For API details, see DeleteTable in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): Actions 534 Amazon Keyspaces (for Apache Cassandra) Developer Guide keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def delete_table(self): """ Deletes the table from the keyspace. """ try: self.keyspaces_client.delete_table( keyspaceName=self.ks_name, tableName=self.table_name ) self.table_name = None except ClientError as err: logger.error( "Couldn't delete table %s. Here's why: %s: %s", self.table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise • For API details, see DeleteTable in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use GetKeyspace with an AWS SDK The following code examples show how to use GetKeyspace. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics Actions 535 Amazon Keyspaces (for Apache Cassandra) Developer Guide .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Get data about a keyspace. /// </summary> /// <param name="keyspaceName">The name of the keyspace.</param> /// <returns>The Amazon Resource Name (ARN) of the keyspace.</returns> public async Task<string> GetKeyspace(string keyspaceName) { var response = await _amazonKeyspaces.GetKeyspaceAsync( new GetKeyspaceRequest { KeyspaceName = keyspaceName }); return response.ResourceArn; } • For API details, see GetKeyspace in AWS SDK for .NET API Reference. Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void checkKeyspaceExistence(KeyspacesClient keyClient, String keyspaceName) { try { GetKeyspaceRequest keyspaceRequest = GetKeyspaceRequest.builder() Actions 536 Amazon Keyspaces (for Apache Cassandra) Developer Guide .keyspaceName(keyspaceName) .build(); GetKeyspaceResponse response = keyClient.getKeyspace(keyspaceRequest); String name = response.keyspaceName(); System.out.println("The " + name + " KeySpace is ready"); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see GetKeyspace in AWS SDK for Java 2.x API Reference. Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set
AmazonKeyspaces-167
AmazonKeyspaces.pdf
167
the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void checkKeyspaceExistence(KeyspacesClient keyClient, String keyspaceName) { try { GetKeyspaceRequest keyspaceRequest = GetKeyspaceRequest.builder() Actions 536 Amazon Keyspaces (for Apache Cassandra) Developer Guide .keyspaceName(keyspaceName) .build(); GetKeyspaceResponse response = keyClient.getKeyspace(keyspaceRequest); String name = response.keyspaceName(); System.out.println("The " + name + " KeySpace is ready"); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see GetKeyspace in AWS SDK for Java 2.x API Reference. Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun checkKeyspaceExistence(keyspaceNameVal: String?) { val keyspaceRequest = GetKeyspaceRequest { keyspaceName = keyspaceNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response: GetKeyspaceResponse = keyClient.getKeyspace(keyspaceRequest) val name = response.keyspaceName println("The $name KeySpace is ready") } } Actions 537 Amazon Keyspaces (for Apache Cassandra) Developer Guide • For API details, see GetKeyspace in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def exists_keyspace(self, name): """ Checks whether a keyspace exists. :param name: The name of the keyspace to look up. :return: True when the keyspace exists. Otherwise, False. """ try: response = self.keyspaces_client.get_keyspace(keyspaceName=name) self.ks_name = response["keyspaceName"] self.ks_arn = response["resourceArn"] Actions 538 Amazon Keyspaces (for Apache Cassandra) Developer Guide exists = True except ClientError as err: if err.response["Error"]["Code"] == "ResourceNotFoundException": logger.info("Keyspace %s does not exist.", name) exists = False else: logger.error( "Couldn't verify %s exists. Here's why: %s: %s", name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise return exists • For API details, see GetKeyspace in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use GetTable with an AWS SDK The following code examples show how to use GetTable. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. Actions 539 Amazon Keyspaces (for Apache Cassandra) Developer Guide /// <summary> /// Get information about an Amazon Keyspaces table. /// </summary> /// <param name="keyspaceName">The keyspace containing the table.</param> /// <param name="tableName">The name of the Amazon Keyspaces table.</param> /// <returns>The response containing data about the table.</returns> public async Task<GetTableResponse> GetTable(string keyspaceName, string tableName) { var response = await _amazonKeyspaces.GetTableAsync( new GetTableRequest { KeyspaceName = keyspaceName, TableName = tableName }); return response; } • For API details, see GetTable in AWS SDK for .NET API Reference. Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void checkTable(KeyspacesClient keyClient, String keyspaceName, String tableName) throws InterruptedException { try { boolean tableStatus = false; String status; GetTableResponse response = null; GetTableRequest tableRequest = GetTableRequest.builder() .keyspaceName(keyspaceName) .tableName(tableName) .build(); Actions 540 Amazon Keyspaces (for Apache Cassandra) Developer Guide while (!tableStatus) { response = keyClient.getTable(tableRequest); status = response.statusAsString(); System.out.println(". The table status is " + status); if (status.compareTo("ACTIVE") == 0) { tableStatus = true; } Thread.sleep(500); } List<ColumnDefinition> cols = response.schemaDefinition().allColumns(); for (ColumnDefinition def : cols) { System.out.println("The column name is " + def.name()); System.out.println("The column type is " + def.type()); } } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see GetTable in AWS SDK for Java 2.x API Reference. Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun checkTable( keyspaceNameVal: String?, tableNameVal: String?, ) { var tableStatus = false Actions 541 Amazon Keyspaces (for Apache Cassandra) Developer Guide var status: String var response: GetTableResponse? = null val tableRequest = GetTableRequest { keyspaceName = keyspaceNameVal tableName = tableNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> while (!tableStatus) { response = keyClient.getTable(tableRequest) status = response!!.status.toString() println(". The table status is $status") if (status.compareTo("ACTIVE") == 0) { tableStatus = true } delay(500) } val cols: List<ColumnDefinition>? = response!!.schemaDefinition?.allColumns if (cols != null) { for (def in cols) { println("The column name is ${def.name}") println("The column
AmazonKeyspaces-168
AmazonKeyspaces.pdf
168
Repository. suspend fun checkTable( keyspaceNameVal: String?, tableNameVal: String?, ) { var tableStatus = false Actions 541 Amazon Keyspaces (for Apache Cassandra) Developer Guide var status: String var response: GetTableResponse? = null val tableRequest = GetTableRequest { keyspaceName = keyspaceNameVal tableName = tableNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> while (!tableStatus) { response = keyClient.getTable(tableRequest) status = response!!.status.toString() println(". The table status is $status") if (status.compareTo("ACTIVE") == 0) { tableStatus = true } delay(500) } val cols: List<ColumnDefinition>? = response!!.schemaDefinition?.allColumns if (cols != null) { for (def in cols) { println("The column name is ${def.name}") println("The column type is ${def.type}") } } } } • For API details, see GetTable in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. Actions 542 Amazon Keyspaces (for Apache Cassandra) Developer Guide class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def get_table(self, table_name): """ Gets data about a table in the keyspace. :param table_name: The name of the table to look up. :return: Data about the table. """ try: response = self.keyspaces_client.get_table( keyspaceName=self.ks_name, tableName=table_name ) self.table_name = table_name except ClientError as err: if err.response["Error"]["Code"] == "ResourceNotFoundException": logger.info("Table %s does not exist.", table_name) self.table_name = None response = None else: logger.error( "Couldn't verify %s exists. Here's why: %s: %s", table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) Actions 543 Amazon Keyspaces (for Apache Cassandra) Developer Guide raise return response • For API details, see GetTable in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use ListKeyspaces with an AWS SDK The following code examples show how to use ListKeyspaces. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Lists all keyspaces for the account. /// </summary> /// <returns>Async task.</returns> public async Task ListKeyspaces() { var paginator = _amazonKeyspaces.Paginators.ListKeyspaces(new ListKeyspacesRequest()); Console.WriteLine("{0, -30}\t{1}", "Keyspace name", "Keyspace ARN"); Actions 544 Amazon Keyspaces (for Apache Cassandra) Developer Guide Console.WriteLine(new string('-', Console.WindowWidth)); await foreach (var keyspace in paginator.Keyspaces) { Console.WriteLine($"{keyspace.KeyspaceName,-30}\t{keyspace.ResourceArn}"); } } • For API details, see ListKeyspaces in AWS SDK for .NET API Reference. Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void listKeyspacesPaginator(KeyspacesClient keyClient) { try { ListKeyspacesRequest keyspacesRequest = ListKeyspacesRequest.builder() .maxResults(10) .build(); ListKeyspacesIterable listRes = keyClient.listKeyspacesPaginator(keyspacesRequest); listRes.stream() .flatMap(r -> r.keyspaces().stream()) .forEach(content -> System.out.println(" Name: " + content.keyspaceName())); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } Actions 545 Amazon Keyspaces (for Apache Cassandra) Developer Guide • For API details, see ListKeyspaces in AWS SDK for Java 2.x API Reference. Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun listKeyspacesPaginator() { KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient .listKeyspacesPaginated(ListKeyspacesRequest {}) .transform { it.keyspaces?.forEach { obj -> emit(obj) } } .collect { obj -> println("Name: ${obj.keyspaceName}") } } } • For API details, see ListKeyspaces in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: Actions 546 Amazon Keyspaces (for Apache Cassandra) Developer Guide """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def list_keyspaces(self, limit): """ Lists the keyspaces in your account. :param limit: The maximum number of keyspaces to list. """ try: ks_paginator = self.keyspaces_client.get_paginator("list_keyspaces") for page in ks_paginator.paginate(PaginationConfig={"MaxItems": limit}): for ks in page["keyspaces"]: print(ks["keyspaceName"]) print(f"\t{ks['resourceArn']}") except ClientError as err: logger.error( "Couldn't list keyspaces. Here's why: %s: %s", err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise • For API details, see ListKeyspaces in AWS SDK for Python (Boto3) API Reference. Actions 547 Amazon Keyspaces (for Apache Cassandra) Developer Guide For a complete list of AWS SDK developer guides and code examples,
AmazonKeyspaces-169
AmazonKeyspaces.pdf
169
self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def list_keyspaces(self, limit): """ Lists the keyspaces in your account. :param limit: The maximum number of keyspaces to list. """ try: ks_paginator = self.keyspaces_client.get_paginator("list_keyspaces") for page in ks_paginator.paginate(PaginationConfig={"MaxItems": limit}): for ks in page["keyspaces"]: print(ks["keyspaceName"]) print(f"\t{ks['resourceArn']}") except ClientError as err: logger.error( "Couldn't list keyspaces. Here's why: %s: %s", err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise • For API details, see ListKeyspaces in AWS SDK for Python (Boto3) API Reference. Actions 547 Amazon Keyspaces (for Apache Cassandra) Developer Guide For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use ListTables with an AWS SDK The following code examples show how to use ListTables. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Lists the Amazon Keyspaces tables in a keyspace. /// </summary> /// <param name="keyspaceName">The name of the keyspace.</param> /// <returns>A list of TableSummary objects.</returns> public async Task<List<TableSummary>> ListTables(string keyspaceName) { var response = await _amazonKeyspaces.ListTablesAsync(new ListTablesRequest { KeyspaceName = keyspaceName }); response.Tables.ForEach(table => { Console.WriteLine($"{table.KeyspaceName}\t{table.TableName}\t{table.ResourceArn}"); }); return response.Tables; } Actions 548 Amazon Keyspaces (for Apache Cassandra) Developer Guide • For API details, see ListTables in AWS SDK for .NET API Reference. Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void listTables(KeyspacesClient keyClient, String keyspaceName) { try { ListTablesRequest tablesRequest = ListTablesRequest.builder() .keyspaceName(keyspaceName) .build(); ListTablesIterable listRes = keyClient.listTablesPaginator(tablesRequest); listRes.stream() .flatMap(r -> r.tables().stream()) .forEach(content -> System.out.println(" ARN: " + content.resourceArn() + " Table name: " + content.tableName())); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see ListTables in AWS SDK for Java 2.x API Reference. Actions 549 Amazon Keyspaces (for Apache Cassandra) Developer Guide Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun listTables(keyspaceNameVal: String?) { val tablesRequest = ListTablesRequest { keyspaceName = keyspaceNameVal } KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient .listTablesPaginated(tablesRequest) .transform { it.tables?.forEach { obj -> emit(obj) } } .collect { obj -> println(" ARN: ${obj.resourceArn} Table name: ${obj.tableName}") } } } • For API details, see ListTables in AWS SDK for Kotlin API reference. Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: Actions 550 Amazon Keyspaces (for Apache Cassandra) Developer Guide """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def list_tables(self): """ Lists the tables in the keyspace. """ try: table_paginator = self.keyspaces_client.get_paginator("list_tables") for page in table_paginator.paginate(keyspaceName=self.ks_name): for table in page["tables"]: print(table["tableName"]) print(f"\t{table['resourceArn']}") except ClientError as err: logger.error( "Couldn't list tables in keyspace %s. Here's why: %s: %s", self.ks_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise • For API details, see ListTables in AWS SDK for Python (Boto3) API Reference. Actions 551 Amazon Keyspaces (for Apache Cassandra) Developer Guide For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use RestoreTable with an AWS SDK The following code examples show how to use RestoreTable. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Restores the specified table to the specified point in time. /// </summary> /// <param name="keyspaceName">The keyspace containing the table.</param> /// <param name="tableName">The name of the table to restore.</param> /// <param name="timestamp">The time to which the table will be restored.</ param> /// <returns>The Amazon Resource Name (ARN) of the restored table.</returns> public async Task<string> RestoreTable(string keyspaceName, string tableName, string restoredTableName, DateTime timestamp) { var request = new RestoreTableRequest { RestoreTimestamp = timestamp, SourceKeyspaceName = keyspaceName, SourceTableName = tableName, TargetKeyspaceName = keyspaceName, TargetTableName = restoredTableName Actions 552 Amazon Keyspaces (for Apache Cassandra) Developer
AmazonKeyspaces-170
AmazonKeyspaces.pdf
170
in the AWS Code Examples Repository. /// <summary> /// Restores the specified table to the specified point in time. /// </summary> /// <param name="keyspaceName">The keyspace containing the table.</param> /// <param name="tableName">The name of the table to restore.</param> /// <param name="timestamp">The time to which the table will be restored.</ param> /// <returns>The Amazon Resource Name (ARN) of the restored table.</returns> public async Task<string> RestoreTable(string keyspaceName, string tableName, string restoredTableName, DateTime timestamp) { var request = new RestoreTableRequest { RestoreTimestamp = timestamp, SourceKeyspaceName = keyspaceName, SourceTableName = tableName, TargetKeyspaceName = keyspaceName, TargetTableName = restoredTableName Actions 552 Amazon Keyspaces (for Apache Cassandra) Developer Guide }; var response = await _amazonKeyspaces.RestoreTableAsync(request); return response.RestoredTableARN; } • For API details, see RestoreTable in AWS SDK for .NET API Reference. Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void restoreTable(KeyspacesClient keyClient, String keyspaceName, ZonedDateTime utc) { try { Instant myTime = utc.toInstant(); RestoreTableRequest restoreTableRequest = RestoreTableRequest.builder() .restoreTimestamp(myTime) .sourceTableName("Movie") .targetKeyspaceName(keyspaceName) .targetTableName("MovieRestore") .sourceKeyspaceName(keyspaceName) .build(); RestoreTableResponse response = keyClient.restoreTable(restoreTableRequest); System.out.println("The ARN of the restored table is " + response.restoredTableARN()); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } Actions 553 Amazon Keyspaces (for Apache Cassandra) Developer Guide } • For API details, see RestoreTable in AWS SDK for Java 2.x API Reference. Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun restoreTable( keyspaceName: String?, utc: ZonedDateTime, ) { // Create an aws.smithy.kotlin.runtime.time.Instant value. val timeStamp = aws.smithy.kotlin.runtime.time .Instant(utc.toInstant()) val restoreTableRequest = RestoreTableRequest { restoreTimestamp = timeStamp sourceTableName = "MovieKotlin" targetKeyspaceName = keyspaceName targetTableName = "MovieRestore" sourceKeyspaceName = keyspaceName } KeyspacesClient { region = "us-east-1" }.use { keyClient -> val response = keyClient.restoreTable(restoreTableRequest) println("The ARN of the restored table is ${response.restoredTableArn}") } } • For API details, see RestoreTable in AWS SDK for Kotlin API reference. Actions 554 Amazon Keyspaces (for Apache Cassandra) Developer Guide Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def restore_table(self, restore_timestamp): """ Restores the table to a previous point in time. The table is restored to a new table in the same keyspace. :param restore_timestamp: The point in time to restore the table. This time must be in UTC format. :return: The name of the restored table. """ try: restored_table_name = f"{self.table_name}_restored" self.keyspaces_client.restore_table( Actions 555 Amazon Keyspaces (for Apache Cassandra) Developer Guide sourceKeyspaceName=self.ks_name, sourceTableName=self.table_name, targetKeyspaceName=self.ks_name, targetTableName=restored_table_name, restoreTimestamp=restore_timestamp, ) except ClientError as err: logger.error( "Couldn't restore table %s. Here's why: %s: %s", restore_timestamp, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise else: return restored_table_name • For API details, see RestoreTable in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Use UpdateTable with an AWS SDK The following code examples show how to use UpdateTable. Action examples are code excerpts from larger programs and must be run in context. You can see this action in context in the following code example: • Learn the basics Actions 556 Amazon Keyspaces (for Apache Cassandra) Developer Guide .NET SDK for .NET Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. /// <summary> /// Updates the movie table to add a boolean column named watched. /// </summary> /// <param name="keyspaceName">The keyspace containing the table.</param> /// <param name="tableName">The name of the table to change.</param> /// <returns>The Amazon Resource Name (ARN) of the updated table.</returns> public async Task<string> UpdateTable(string keyspaceName, string tableName) { var newColumn = new ColumnDefinition { Name = "watched", Type = "boolean" }; var request = new UpdateTableRequest { KeyspaceName = keyspaceName, TableName = tableName, AddColumns = new List<ColumnDefinition> { newColumn } }; var response = await _amazonKeyspaces.UpdateTableAsync(request); return response.ResourceArn; } • For API details, see UpdateTable in AWS SDK for .NET API Reference. Actions 557 Amazon Keyspaces (for Apache Cassandra) Developer Guide Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void updateTable(KeyspacesClient keyClient, String keySpace, String tableName) { try { ColumnDefinition def = ColumnDefinition.builder() .name("watched") .type("boolean") .build(); UpdateTableRequest tableRequest = UpdateTableRequest.builder() .keyspaceName(keySpace) .tableName(tableName) .addColumns(def) .build(); keyClient.updateTable(tableRequest); } catch (KeyspacesException e)
AmazonKeyspaces-171
AmazonKeyspaces.pdf
171
= tableName, AddColumns = new List<ColumnDefinition> { newColumn } }; var response = await _amazonKeyspaces.UpdateTableAsync(request); return response.ResourceArn; } • For API details, see UpdateTable in AWS SDK for .NET API Reference. Actions 557 Amazon Keyspaces (for Apache Cassandra) Developer Guide Java SDK for Java 2.x Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. public static void updateTable(KeyspacesClient keyClient, String keySpace, String tableName) { try { ColumnDefinition def = ColumnDefinition.builder() .name("watched") .type("boolean") .build(); UpdateTableRequest tableRequest = UpdateTableRequest.builder() .keyspaceName(keySpace) .tableName(tableName) .addColumns(def) .build(); keyClient.updateTable(tableRequest); } catch (KeyspacesException e) { System.err.println(e.awsErrorDetails().errorMessage()); System.exit(1); } } • For API details, see UpdateTable in AWS SDK for Java 2.x API Reference. Actions 558 Amazon Keyspaces (for Apache Cassandra) Developer Guide Kotlin SDK for Kotlin Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. suspend fun updateTable( keySpace: String?, tableNameVal: String?, ) { val def = ColumnDefinition { name = "watched" type = "boolean" } val tableRequest = UpdateTableRequest { keyspaceName = keySpace tableName = tableNameVal addColumns = listOf(def) } KeyspacesClient { region = "us-east-1" }.use { keyClient -> keyClient.updateTable(tableRequest) } } • For API details, see UpdateTable in AWS SDK for Kotlin API reference. Actions 559 Amazon Keyspaces (for Apache Cassandra) Developer Guide Python SDK for Python (Boto3) Note There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository. class KeyspaceWrapper: """Encapsulates Amazon Keyspaces (for Apache Cassandra) keyspace and table actions.""" def __init__(self, keyspaces_client): """ :param keyspaces_client: A Boto3 Amazon Keyspaces client. """ self.keyspaces_client = keyspaces_client self.ks_name = None self.ks_arn = None self.table_name = None @classmethod def from_client(cls): keyspaces_client = boto3.client("keyspaces") return cls(keyspaces_client) def update_table(self): """ Updates the schema of the table. This example updates a table of movie data by adding a new column that tracks whether the movie has been watched. """ try: self.keyspaces_client.update_table( keyspaceName=self.ks_name, tableName=self.table_name, addColumns=[{"name": "watched", "type": "boolean"}], ) Actions 560 Amazon Keyspaces (for Apache Cassandra) Developer Guide except ClientError as err: logger.error( "Couldn't update table %s. Here's why: %s: %s", self.table_name, err.response["Error"]["Code"], err.response["Error"]["Message"], ) raise • For API details, see UpdateTable in AWS SDK for Python (Boto3) API Reference. For a complete list of AWS SDK developer guides and code examples, see Using this service with an AWS SDK. This topic also includes information about getting started and details about previous SDK versions. Actions 561 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces (for Apache Cassandra) libraries and tools This section provides information about Amazon Keyspaces (for Apache Cassandra) libraries, code examples, and tools. Topics • Libraries and examples • Highlighted sample and developer tool repos Libraries and examples You can find Amazon Keyspaces open-source libraries and developer tools on GitHub in the AWS and AWS samples repos. Amazon Keyspaces (for Apache Cassandra) developer toolkit This repository provides a docker image with helpful developer tools for Amazon Keyspaces. For example, it includes a CQLSHRC file with best practices, an optional AWS authentication expansion for cqlsh, and helper tools to perform common tasks. The toolkit is optimized for Amazon Keyspaces, but also works with Apache Cassandra clusters. https://github.com/aws-samples/amazon-keyspaces-toolkit. Amazon Keyspaces (for Apache Cassandra) examples This repo is our official list of Amazon Keyspaces example code. The repo is subdivided into sections by language (see Examples). Each language has its own subsection of examples. These examples demonstrate common Amazon Keyspaces service implementations and patterns that you can use when building applications. https://github.com/aws-samples/amazon-keyspaces-examples/. AWS Signature Version 4 (SigV4) authentication plugins The plugins enable you to manage access to Amazon Keyspaces by using AWS Identity and Access Management (IAM) users and roles. Libraries and examples 562 Amazon Keyspaces (for Apache Cassandra) Developer Guide Java: https://github.com/aws/aws-sigv4-auth-cassandra-java-driver-plugin. Node.js: https://github.com/aws/aws-sigv4-auth-cassandra-nodejs-driver-plugin. Python: https://github.com/aws/aws-sigv4-auth-cassandra-python-driver-plugin. Go: https://github.com/aws/aws-sigv4-auth-cassandra-gocql-driver-plugin. Highlighted sample and developer tool repos Below are a selection of helpful community tools for Amazon Keyspaces (for Apache Cassandra). Amazon Keyspaces Protocol Buffers You can use Protocol Buffers (Protobuf) with Amazon Keyspaces to provide an alternative to Apache Cassandra User Defined Types (UDTs). Protobuf is a free and open-source cross-platform data format which is used to serialize structured data. You can store Protobuf data using the CQL BLOB data type and refactor UDTs while preserving structured data across applications and programming languages. This repository provides a code example that connects to Amazon Keyspaces, creates a new table, and inserts a row containing a Protobuf message. Then the row is read with strong consistency. https://github.com/aws-samples/amazon-keyspaces-examples/tree/main/java/datastax-v4/ protobuf-user-defined-types AWS CloudFormation template to create Amazon CloudWatch dashboard for Amazon Keyspaces (for Apache Cassandra) metrics This repository provides AWS CloudFormation templates to quickly set up CloudWatch metrics for Amazon Keyspaces. Using this template will allow you to get started more easily by providing deployable prebuilt CloudWatch dashboards
AmazonKeyspaces-172
AmazonKeyspaces.pdf
172
Protobuf data using the CQL BLOB data type and refactor UDTs while preserving structured data across applications and programming languages. This repository provides a code example that connects to Amazon Keyspaces, creates a new table, and inserts a row containing a Protobuf message. Then the row is read with strong consistency. https://github.com/aws-samples/amazon-keyspaces-examples/tree/main/java/datastax-v4/ protobuf-user-defined-types AWS CloudFormation template to create Amazon CloudWatch dashboard for Amazon Keyspaces (for Apache Cassandra) metrics This repository provides AWS CloudFormation templates to quickly set up CloudWatch metrics for Amazon Keyspaces. Using this template will allow you to get started more easily by providing deployable prebuilt CloudWatch dashboards with commonly used metrics. https://github.com/aws-samples/amazon-keyspaces-cloudwatch-cloudformation-templates. Using Amazon Keyspaces (for Apache Cassandra) with AWS Lambda The repository contains examples that show how to connect to Amazon Keyspaces from Lambda. Below are some examples. Highlighted sample and developer tool repos 563 Amazon Keyspaces (for Apache Cassandra) Developer Guide C#/.NET: https://github.com/aws-samples/amazon-keyspaces-examples/tree/main/dotnet/ datastax-v3/connection-lambda. Java: https://github.com/aws-samples/amazon-keyspaces-examples/tree/main/java/datastax-v4/ connection-lambda. Another Lambda example that shows how to deploy and use Amazon Keyspaces from a Python Lambda is available from the following repo. https://github.com/aws-samples/aws-keyspaces-lambda-python Using Amazon Keyspaces (for Apache Cassandra) with Spring This is an example that shows you how to use Amazon Keyspaces with Spring Boot. https://github.com/aws-samples/amazon-keyspaces-examples/tree/main/java/datastax-v4/spring Using Amazon Keyspaces (for Apache Cassandra) with Scala This is an example that shows how to connect to Amazon Keyspaces using the SigV4 authentication plugin with Scala. https://github.com/aws-samples/amazon-keyspaces-examples/tree/main/scala/datastax-v4/ connection-sigv4 Using Amazon Keyspaces (for Apache Cassandra) with AWS Glue This is an example that shows how to use Amazon Keyspaces with AWS Glue. https://github.com/aws-samples/amazon-keyspaces-examples/tree/main/scala/datastax-v4/aws- glue Amazon Keyspaces (for Apache Cassandra) Cassandra query language (CQL) to AWS CloudFormation converter This package implements a command-line tool for converting Apache Cassandra Query Language (CQL) scripts to AWS CloudFormation (CloudFormation) templates, which allows Amazon Keyspaces schemas to be easily managed in CloudFormation stacks. https://github.com/aws/amazon-keyspaces-cql-to-cfn-converter. Using Amazon Keyspaces (for Apache Cassandra) with Spring 564 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces (for Apache Cassandra) helpers for Apache Cassandra driver for Java This repository contains driver policies, examples, and best practices when using the DataStax Java Driver with Amazon Keyspaces (for Apache Cassandra). https://github.com/aws-samples/amazon-keyspaces-java-driver-helpers. Amazon Keyspaces (for Apache Cassandra) snappy compression demo This repository demonstrates how to compress, store, and read/write large objects for faster performance and lower throughput and storage costs. https://github.com/aws-samples/amazon-keyspaces-compression-example. Amazon Keyspaces (for Apache Cassandra) and Amazon S3 codec demo Custom Amazon S3 Codec supports transparent, user-configurable mapping of UUID pointers to Amazon S3 objects. https://github.com/aws-samples/amazon-keyspaces-large-object-s3-demo. Amazon Keyspaces (for Apache Cassandra) helpers for Apache Cassandra driver for Java 565 Amazon Keyspaces (for Apache Cassandra) Developer Guide Best practices for designing and architecting with Amazon Keyspaces (for Apache Cassandra) Use this section to quickly find recommendations for maximizing performance and minimizing throughput costs when working with Amazon Keyspaces. Contents • Key differences and design principles of NoSQL design • Differences between relational data design and NoSQL • Two key concepts for NoSQL design • Approaching NoSQL design • Optimize client driver connections for the serverless environment • How connections work in Amazon Keyspaces • How to configure connections in Amazon Keyspaces • How to configure the retry policy for connections in Amazon Keyspaces • How to configure connections over VPC endpoints in Amazon Keyspaces • How to monitor connections in Amazon Keyspaces • How to handle connection errors in Amazon Keyspaces • Data modeling best practices: recommendations for designing data models • How to use partition keys effectively in Amazon Keyspaces • Use write sharding to evenly distribute workloads across partitions • Sharding using compound partition keys and random values • Sharding using compound partition keys and calculated values • Optimizing costs of Amazon Keyspaces tables • Evaluate your costs at the table level • How to view the costs of a single Amazon Keyspaces table • Cost Explorer's default view • How to use and apply table tags in Cost Explorer • Evaluate your table's capacity mode • What table capacity modes are available • When to select on-demand capacity mode 566 Amazon Keyspaces (for Apache Cassandra) Developer Guide • When to select provisioned capacity mode • Additional factors to consider when choosing a table capacity mode • Evaluate your table's Application Auto Scaling settings • Understanding your Application Auto Scaling settings • How to identify tables with low target utilization (<=50%) • How to address workloads with seasonal variance • How to address spiky workloads with unknown patterns • How to address workloads with linked applications • Identify your unused resources to optimize costs in Amazon Keyspaces • How to identify unused resources • Identifying unused table resources • Cleaning up unused table resources • Cleaning up unused point-in-time recovery (PITR) backups • Evaluate your table usage patterns to optimize performance and cost • Perform fewer strongly-consistent read operations • Enable Time to Live (TTL) • Evaluate your provisioned capacity for right-sized
AmazonKeyspaces-173
AmazonKeyspaces.pdf
173
How to identify tables with low target utilization (<=50%) • How to address workloads with seasonal variance • How to address spiky workloads with unknown patterns • How to address workloads with linked applications • Identify your unused resources to optimize costs in Amazon Keyspaces • How to identify unused resources • Identifying unused table resources • Cleaning up unused table resources • Cleaning up unused point-in-time recovery (PITR) backups • Evaluate your table usage patterns to optimize performance and cost • Perform fewer strongly-consistent read operations • Enable Time to Live (TTL) • Evaluate your provisioned capacity for right-sized provisioning • How to retrieve consumption metrics from your Amazon Keyspaces tables • How to identify under-provisioned Amazon Keyspaces tables • How to identify over-provisioned Amazon Keyspaces tables Key differences and design principles of NoSQL design NoSQL database systems like Amazon Keyspaces use alternative models for data management, such as key-value pairs or document storage. When you switch from a relational database management system to a NoSQL database system like Amazon Keyspaces, it's important to understand the key differences and specific design approaches. Topics • Differences between relational data design and NoSQL • Two key concepts for NoSQL design • Approaching NoSQL design NoSQL design 567 Amazon Keyspaces (for Apache Cassandra) Developer Guide Differences between relational data design and NoSQL Relational database systems (RDBMS) and NoSQL databases have different strengths and weaknesses: • In RDBMS, data can be queried flexibly, but queries are relatively expensive and don't scale well in high-traffic situations (see the section called “Data modeling”). • In a NoSQL database such as Amazon Keyspaces, data can be queried efficiently in a limited number of ways, outside of which queries can be expensive and slow. These differences make database design different between the two systems: • In RDBMS, you design for flexibility without worrying about implementation details or performance. Query optimization generally doesn't affect schema design, but normalization is important. • In Amazon Keyspaces, you design your schema specifically to make the most common and important queries as fast and as inexpensive as possible. Your data structures are tailored to the specific requirements of your business use cases. Two key concepts for NoSQL design NoSQL design requires a different mindset than RDBMS design. For an RDBMS, you can go ahead and create a normalized data model without thinking about access patterns. You can then extend it later when new questions and query requirements arise. You can organize each type of data into its own table. How NoSQL design is different • By contrast, you shouldn't start designing your schema for Amazon Keyspaces until you know the questions it needs to answer. Understanding the business problems and the application use cases up front is essential. • You should maintain as few tables as possible in an Amazon Keyspaces application. Having fewer tables keeps things more scalable, requires less permissions management, and reduces overhead for your Amazon Keyspaces application. It can also help keep backup costs lower overall. NoSQL vs. RDBMS 568 Amazon Keyspaces (for Apache Cassandra) Developer Guide Approaching NoSQL design The first step in designing your Amazon Keyspaces application is to identify the specific query patterns that the system must satisfy. In particular, it is important to understand three fundamental properties of your application's access patterns before you begin: • Data size: Knowing how much data will be stored and requested at one time helps to determine the most effective way to partition the data. • Data shape: Instead of reshaping data when a query is processed (as an RDBMS system does), a NoSQL database organizes data so that its shape in the database corresponds with what will be queried. This is a key factor in increasing speed and scalability. • Data velocity: Amazon Keyspaces scales by increasing the number of physical partitions that are available to process queries, and by efficiently distributing data across those partitions. Knowing in advance what the peak query loads will be might help determine how to partition data to best use I/O capacity. After you identify specific query requirements, you can organize data according to general principles that govern performance: • Keep related data together. Research on routing-table optimization 20 years ago found that "locality of reference" was the single most important factor in speeding up response time: keeping related data together in one place. This is equally true in NoSQL systems today, where keeping related data in close proximity has a major impact on cost and performance. Instead of distributing related data items across multiple tables, you should keep related items in your NoSQL system as close together as possible. As a general rule, you should maintain as few tables as possible in an Amazon Keyspaces application. Exceptions are cases where high-volume time series data are involved, or datasets that have
AmazonKeyspaces-174
AmazonKeyspaces.pdf
174
found that "locality of reference" was the single most important factor in speeding up response time: keeping related data together in one place. This is equally true in NoSQL systems today, where keeping related data in close proximity has a major impact on cost and performance. Instead of distributing related data items across multiple tables, you should keep related items in your NoSQL system as close together as possible. As a general rule, you should maintain as few tables as possible in an Amazon Keyspaces application. Exceptions are cases where high-volume time series data are involved, or datasets that have very different access patterns. A single table with inverted indexes can usually enable simple queries to create and retrieve the complex hierarchical data structures required by your application. • Use sort order. Related items can be grouped together and queried efficiently if their key design causes them to sort together. This is an important NoSQL design strategy. General approach 569 Amazon Keyspaces (for Apache Cassandra) Developer Guide • Distribute queries. It is also important that a high volume of queries not be focused on one part of the database, where they can exceed I/O capacity. Instead, you should design data keys to distribute traffic evenly across partitions as much as possible, avoiding "hot spots." These general principles translate into some common design patterns that you can use to model data efficiently in Amazon Keyspaces. Optimize client driver connections for the serverless environment To communicate with Amazon Keyspaces, you can use any of the existing Apache Cassandra client drivers of your choice. Because Amazon Keyspaces is a serverless service, we recommend that you optimize the connection configuration of your client driver for the throughput needs of your application. This topic introduces best practices including how to calculate how many connections your application requires, as well as monitoring and error handling of connections. Topics • How connections work in Amazon Keyspaces • How to configure connections in Amazon Keyspaces • How to configure the retry policy for connections in Amazon Keyspaces • How to configure connections over VPC endpoints in Amazon Keyspaces • How to monitor connections in Amazon Keyspaces • How to handle connection errors in Amazon Keyspaces How connections work in Amazon Keyspaces This sections gives an overview of how client driver connections work in Amazon Keyspaces. Because Cassandra client driver misconfiguration can result in PerConnectionRequestExceeded events in Amazon Keyspaces, configuring the right amount of connections in the client driver configuration is required to avoid these and similar connection errors. When connecting to Amazon Keyspaces, the driver requires a seed endpoint to establish an initial connection. Amazon Keyspaces uses DNS to route the initial connection to one of the many available endpoints. The endpoints are attached to network load balancers that in turn establish Connections 570 Amazon Keyspaces (for Apache Cassandra) Developer Guide a connection to one of the request handlers in the fleet. After the initial connection is established, the client driver gathers information about all available endpoints from the system.peers table. With this information, the client driver can create additional connections to the listed endpoints. The number of connections the client driver can create is limited by the number of local connections specified in the client driver settings. By default, most client drivers establish one connection per endpoint and establish a connection pool to Cassandra and load balance queries over that pool of connections. Although multiple connections can be established to the same endpoint, behind the network load balancer they may be connected to many different request handlers. When connecting through the public endpoint, establishing one connection to each of the nine endpoints listed in the system.peers table results in nine connections to different request handlers. How to configure connections in Amazon Keyspaces Amazon Keyspaces supports up to 3,000 CQL queries per TCP connection per second. Because there's no limit on the number of connections a driver can establish, we recommend to target only 500 CQL requests per second per connection to allow for overhead, traffic bursts, and better load balancing. Follow these steps to ensure that your driver's connection is correctly configured for the needs of your application. Increase the number of connections per IP address your driver is maintaining in its connection pool. • Most Cassandra drivers establish a connection pool to Cassandra and load balance queries over that pool of connections. The default behavior of most drivers is to establish a single connection to each endpoint. Amazon Keyspaces exposes nine peer IP addresses to drivers, so How to configure connections 571 Amazon Keyspaces (for Apache Cassandra) Developer Guide based on the default behavior of most drivers, this results in 9 connections. Amazon Keyspaces supports up to 3,000 CQL queries per TCP connection per second, therefore, the maximum CQL query throughput of a driver using the
AmazonKeyspaces-175
AmazonKeyspaces.pdf
175
driver is maintaining in its connection pool. • Most Cassandra drivers establish a connection pool to Cassandra and load balance queries over that pool of connections. The default behavior of most drivers is to establish a single connection to each endpoint. Amazon Keyspaces exposes nine peer IP addresses to drivers, so How to configure connections 571 Amazon Keyspaces (for Apache Cassandra) Developer Guide based on the default behavior of most drivers, this results in 9 connections. Amazon Keyspaces supports up to 3,000 CQL queries per TCP connection per second, therefore, the maximum CQL query throughput of a driver using the default settings is 27,000 CQL queries per second. If you use the driver's default settings, a single connection may have to process more than the maximum CQL query throughput of 3,000 CQL queries per second. This could result in PerConnectionRequestExceeded events. • To avoid PerConnectionRequestExceeded events, you must configure the driver to create additional connections per endpoint to distribute the throughput. • As a best practice in Amazon Keyspaces, assume that each connection can support 500 CQL queries per second. • That means that for a production application that needs to support an estimated 27,000 CQL queries per second distributed over the nine available endpoints, you must configure six connections per endpoint. This ensures that each connection processes no more than 500 requests per second. Calculate the number of connections per IP address you need to configure for your driver based on the needs of your application. To determine the number of connections you need to configure per endpoint for your application, consider the following example. You have an application that needs to support 20,000 CQL queries per second consisting of 10,000 INSERT, 5,000 SELECT, and 5,000 DELETE operations. The Java application is running on three instances on Amazon Elastic Container Service (Amazon ECS) where each instance establishes a single session to Amazon Keyspaces. The calculation you can use to estimate how many connections you need to configure for your driver uses the following input. 1. The number of requests per second your application needs to support. 2. The number of the available instances with one subtracted to account for maintenance or failure. 3. The number of available endpoints. If you're connecting over public endpoints, you have nine available endpoints. If you're using VPC endpoints, you have between two and five available endpoints, depending on the Region. 4. Use 500 CQL queries per second per connection as a best practice for Amazon Keyspaces. 5. Round up the result. For this example, the formula looks like this. How to configure connections 572 Amazon Keyspaces (for Apache Cassandra) Developer Guide 20,000 CQL queries / (3 instances - 1 failure) / 9 public endpoints / 500 CQL queries per second = ROUND(2.22) = 3 Based on this calculation, you need to specify three local connections per endpoint in the driver configuration. For remote connections, configure only one connection per endpoint. How to configure the retry policy for connections in Amazon Keyspaces When configuring the retry policy for a connection to Amazon Keyspaces, we recommend that you implement the Amazon Keyspaces retry policy AmazonKeyspacesExponentialRetryPolicy. This retry policy is better suited to retry across different connections to Amazon Keyspaces than the driver's DefaultRetryPolicy. With the AmazonKeyspacesExponentialRetryPolicy, you can configure the number of retry attempts for the connection that meet your needs. By default, the number of retry attempts for the AmazonKeyspacesExponentialRetryPolicy is set to 3. An additional advantage is that the Amazon Keyspaces retry policy passes back the original exception returned by the service, which indicates why the request attempt failed. The default retry policy only returns the generic NoHostAvailableException, which might hide insights into the request failure. To configure the request retry policy using the AmazonKeyspacesExponentialRetryPolicy, we recommend that you configure a small number of retries, and handle any returned exceptions in your application code. For code examples implementing retry policies, see Amazon Keyspaces retry policies on Github. How to configure connections over VPC endpoints in Amazon Keyspaces When connecting over private VPC endpoints, you have most likely 3 endpoints available. The number of VPC endpoints can be different per Region, based on the number of Availability Zones, and the number of subnets in the assigned VPC. US East (N. Virginia) Region has five Availability Zones and you can have up to five Amazon Keyspaces endpoints. US West (N. California) Region has two Availability Zones and you can have up to two Amazon Keyspaces endpoints. The number of endpoints does not impact scale, but it does increase the number of connections you need to establish in the driver configuration. Consider the following example. Your application needs to support 20,000 CQL queries and is running on three instances on Amazon ECS where each instance How to configure retry policies 573 Amazon Keyspaces (for Apache Cassandra)
AmazonKeyspaces-176
AmazonKeyspaces.pdf
176
the assigned VPC. US East (N. Virginia) Region has five Availability Zones and you can have up to five Amazon Keyspaces endpoints. US West (N. California) Region has two Availability Zones and you can have up to two Amazon Keyspaces endpoints. The number of endpoints does not impact scale, but it does increase the number of connections you need to establish in the driver configuration. Consider the following example. Your application needs to support 20,000 CQL queries and is running on three instances on Amazon ECS where each instance How to configure retry policies 573 Amazon Keyspaces (for Apache Cassandra) Developer Guide establishes a single session to Amazon Keyspaces. The only difference is how many endpoints are available in the different AWS Regions. Connections required in the US East (N. Virginia) Region: 20,000 CQL queries / (3 instances - 1 failure) / 5 private VPC endpoints / 500 CQL queries per second = 4 local connections Connections required in the US West (N. California) Region: 20,000 CQL queries / (3 instances - 1 failure) / 2 private VPC endpoints / 500 CQL queries per second = 10 local connections Important When using private VPC endpoints, additional permissions are required for Amazon Keyspaces to discover the available VPC endpoints dynamically and populate the system.peers table. For more information, see the section called “Populating system.peers table entries with interface VPC endpoint information”. When accessing Amazon Keyspaces through a private VPC endpoint using a different AWS account, it’s likely that you only see a single Amazon Keyspaces endpoint. Again this doesn't impact the scale of possible throughput to Amazon Keyspaces, but it may require you to increase the number of connections in your driver configuration. This example shows the same calculation for a single available endpoint. 20,000 CQL queries / (3 instances - 1 failure) / 1 private VPC endpoints / 500 CQL queries per second = 20 local connections To learn more about cross-account access to Amazon Keyspaces using a shared VPC, see the section called “Configure cross-account access in a shared VPC”. How to monitor connections in Amazon Keyspaces To help identify the number of endpoints your application is connected to, you can log the number of peers discovered in the system.peers table. The following example is an example of Java code which prints the number of peers after the connection has been established. How to monitor connections 574 Amazon Keyspaces (for Apache Cassandra) Developer Guide ResultSet result = session.execute(new SimpleStatement("SELECT * FROM system.peers")); logger.info("number of Amazon Keyspaces endpoints:" + result.all().stream().count()); Note The CQL console or AWS console are not deployed within a VPC and therefore use the public endpoint. As a result, running the system.peers query from applications located outside of the VPCE often results in 9 peers. It may also be helpful to print the IP addresses of each peer. You can also observe the number of peers when using a VPC endpoint by setting up VPCE Amazon CloudWatch metrics. In CloudWatch, you can see the number of connections established to the VPC endpoint. The Cassandra drivers establish a connection for each endpoint to send CQL queries and a control connection to gather system table information. The image below shows the VPC endpoint CloudWatch metrics after connecting to Amazon Keyspaces with 1 connection configured in the driver settings. The metric is showing six active connections consisting of one control connection and five connections (1 per endpoint across Availability Zones). To get started with monitoring the number of connections using a CloudWatch graph, you can deploy this AWS CloudFormation template available on GitHub in the Amazon Keyspaces template repository. How to handle connection errors in Amazon Keyspaces When exceeding the 3,000 request per connection quota, Amazon Keyspaces returns a PerConnectionRequestExceededevent and the Cassandra driver receives a WriteTimeout or ReadTimeout exception. You should retry this exception with exponential backoff in your Cassandra retry policy or in your application. You should provide exponential backoff to avoid sending additional request. How to handle connection errors 575 Amazon Keyspaces (for Apache Cassandra) Developer Guide The default retry policy attempts to try next host in the query plan. Because Amazon Keyspaces may have one to three available endpoints when connecting to the VPC endpoint, you may also see the NoHostAvailableException in addition to the WriteTimeout and ReadTimeout exceptions in your application logs. You can use Amazon Keyspaces provided retry policies, which retry on the same endpoint but across different connections. You can find examples for exponential retry policies for Java on GitHub in the Amazon Keyspaces Java code examples repository. You can find additional language examples on Github in the Amazon Keyspaces code examples repository. Data modeling best practices: recommendations for designing data models Effective data modeling is crucial for optimizing performance and minimizing costs when working with Amazon Keyspaces (for Apache Cassandra). This topic
AmazonKeyspaces-177
AmazonKeyspaces.pdf
177
may also see the NoHostAvailableException in addition to the WriteTimeout and ReadTimeout exceptions in your application logs. You can use Amazon Keyspaces provided retry policies, which retry on the same endpoint but across different connections. You can find examples for exponential retry policies for Java on GitHub in the Amazon Keyspaces Java code examples repository. You can find additional language examples on Github in the Amazon Keyspaces code examples repository. Data modeling best practices: recommendations for designing data models Effective data modeling is crucial for optimizing performance and minimizing costs when working with Amazon Keyspaces (for Apache Cassandra). This topic covers key considerations and recommendations for designing data models that suit your application's data access patterns. • Partition Key Design – The partition key plays a critical role in determining how data is distributed across partitions in Amazon Keyspaces. Choosing an appropriate partition key can significantly impact query performance and throughput costs. This section discusses strategies for designing partition keys that promote even distribution of read and write activity across partitions. • Key Considerations: • Uniform activity distribution – Aim for uniform read and write activity across all partitions to minimize throughput costs and leverage burst capacity effectively. • Access patterns – Align your partition key design with your application's primary data access patterns. • Partition size – Avoid creating partitions that grow too large, as this can impact performance and increase costs. To visualize and design data models more easily, you can use the NoSQL Workbench. Topics • How to use partition keys effectively in Amazon Keyspaces Data modeling 576 Amazon Keyspaces (for Apache Cassandra) Developer Guide How to use partition keys effectively in Amazon Keyspaces The primary key that uniquely identifies each row in an Amazon Keyspaces table can consist of one or multiple partition key columns, which determine which partitions the data is stored in, and one or more optional clustering column, which define how data is clustered and sorted within a partition. Because the partition key establishes the number of partitions your data is stored in and how the data is distributed across these partitions, how you chose your partition key can have a significant impact upon the performance of your queries. In general, you should design your application for uniform activity across all partitions on disk. Distributing read and write activity of your application evenly across all partitions helps to minimize throughput costs and this applies to on-demand as well as provisioned read/write capacity modes. For example, if you are using provisioned capacity mode, you can determine the access patterns that your application needs, and estimate the total read capacity units (RCU) and write capacity units (WCU) that each table requires. Amazon Keyspaces supports your access patterns using the throughput that you provisioned as long as the traffic against a given partition does not exceed 3,000 RCUs and 1,000 WCUs. Amazon Keyspaces offers additional flexibility in your per-partition throughput provisioning by providing burst capacity, for more information see the section called “Use burst capacity”. Topics • Use write sharding to evenly distribute workloads across partitions Use write sharding to evenly distribute workloads across partitions One way to better distribute writes across a partition in Amazon Keyspaces is to expand the space. You can do this in several different ways. You can add an additional partition key column to which you write random numbers to distribute the rows among partitions. Or you can use a number that is calculated based on something that you're querying on. Sharding using compound partition keys and random values One strategy for distributing loads more evenly across a partition is to add an additional partition key column to which you write random numbers. Then you randomize the writes across the larger space. For example, consider the following table which has a single partition key representing a date. Partition key design 577 Amazon Keyspaces (for Apache Cassandra) Developer Guide CREATE TABLE IF NOT EXISTS tracker.blogs ( publish_date date, title text, description int, PRIMARY KEY (publish_date)); To more evenly distribute this table across partitions, you could include an additional partition key column shard that stores random numbers. For example: CREATE TABLE IF NOT EXISTS tracker.blogs ( publish_date date, shard int, title text, description int, PRIMARY KEY ((publish_date, shard))); When inserting data you might choose a random number between 1 and 200 for the shard column. This yields compound partition key values like (2020-07-09, 1), (2020-07-09, 2), and so on, through (2020-07-09, 200). Because you are randomizing the partition key, the writes to the table on each day are spread evenly across multiple partitions. This results in better parallelism and higher overall throughput. However, to read all the rows for a given day, you would have to query the rows for all the shards and then merge the results. For example, you would first
AmazonKeyspaces-178
AmazonKeyspaces.pdf
178
int, PRIMARY KEY ((publish_date, shard))); When inserting data you might choose a random number between 1 and 200 for the shard column. This yields compound partition key values like (2020-07-09, 1), (2020-07-09, 2), and so on, through (2020-07-09, 200). Because you are randomizing the partition key, the writes to the table on each day are spread evenly across multiple partitions. This results in better parallelism and higher overall throughput. However, to read all the rows for a given day, you would have to query the rows for all the shards and then merge the results. For example, you would first issue a SELECT statement for the partition key value (2020-07-09, 1). Then issue another SELECT statement for (2020-07-09, 2), and so on, through (2020-07-09, 200). Finally, your application would have to merge the results from all those SELECT statements. Sharding using compound partition keys and calculated values A randomizing strategy can greatly improve write throughput. But it's difficult to read a specific row because you don't know which value was written to the shard column when the row was written. To make it easier to read individual rows, you can use a different strategy. Instead of using a random number to distribute the rows among partitions, use a number that you can calculate based upon something that you want to query on. Consider the previous example, in which a table uses today's date in the partition key. Now suppose that each row has an accessible title column, and that you most often need to find rows by title in addition to date. Before your application writes the row to the table, it could calculate a hash Partition key design 578 Amazon Keyspaces (for Apache Cassandra) Developer Guide value based on the title and use it to populate the shard column. The calculation might generate a number between 1 and 200 that is fairly evenly distributed, similar to what the random strategy produces. A simple calculation would likely suffice, such as the product of the UTF-8 code point values for the characters in the title, modulo 200, + 1. The compound partition key value would then be the combination of the date and calculation result. With this strategy, the writes are spread evenly across the partition key values, and thus across the physical partitions. You can easily perform a SELECT statement for a particular row and date because you can calculate the partition key value for a specific title value. To read all the rows for a given day, you still must SELECT each of the (2020-07-09, N) keys (where N is 1–200), and your application then has to merge all the results. The benefit is that you avoid having a single "hot" partition key value taking all of the workload. Optimizing costs of Amazon Keyspaces tables This section covers best practices on how to optimize costs for your existing Amazon Keyspaces tables. You should look at the following strategies to see which cost optimization strategy best suits your needs and approach them iteratively. Each strategy provides an overview of what might be impacting your costs, how to look for opportunities to optimize costs, and prescriptive guidance on how to implement these best practices to help you save. Topics • Evaluate your costs at the table level • Evaluate your table's capacity mode • Evaluate your table's Application Auto Scaling settings • Identify your unused resources to optimize costs in Amazon Keyspaces • Evaluate your table usage patterns to optimize performance and cost • Evaluate your provisioned capacity for right-sized provisioning Evaluate your costs at the table level The Cost Explorer tool found within the AWS Management Console allows you to see costs broken down by type, for example read, write, storage, and backup charges. You can also see these costs summarized by period such as month or day. Cost optimization 579 Amazon Keyspaces (for Apache Cassandra) Developer Guide One common challenge with Cost Explorer is that you can't review the costs of only one particular table easily, because Cost Explorer doesn't let you filter or group by costs of a specific table. You can view the metric Billable table size (Bytes) of each table in the Amazon Keyspaces console on the table's Monitor tab. If you need more cost related information per table, this section shows you how to use tagging to perform individual table cost analysis in Cost Explorer. Topics • How to view the costs of a single Amazon Keyspaces table • Cost Explorer's default view • How to use and apply table tags in Cost Explorer How to view the costs of a single Amazon Keyspaces table You can see basic information about an Amazon Keyspaces table in the console, including the primary key schema, the billable table size, and capacity related metrics. You can use the
AmazonKeyspaces-179
AmazonKeyspaces.pdf
179
the table's Monitor tab. If you need more cost related information per table, this section shows you how to use tagging to perform individual table cost analysis in Cost Explorer. Topics • How to view the costs of a single Amazon Keyspaces table • Cost Explorer's default view • How to use and apply table tags in Cost Explorer How to view the costs of a single Amazon Keyspaces table You can see basic information about an Amazon Keyspaces table in the console, including the primary key schema, the billable table size, and capacity related metrics. You can use the size of the table to calculate the monthly storage cost for the table. For example, $0.25 per GB in the US East (N. Virginia) AWS Region. If the table is using provisioned capacity mode, the current read capacity unit (RCU) and write capacity unit (WCU) settings are returned as well. You can use this information to calculate the current read and write costs for the table. Note that these costs could change, especially if you have configured the table with Amazon Keyspaces automatic scaling. Cost Explorer's default view The default view in Cost Explorer provides charts showing the cost of consumed resources, for example throughput and storage. You can choose to group these costs by period, such as totals by month or by day. The costs of storage, reads, writes, and other categories can be broken out and compared as well. Evaluate your costs at the table level 580 Amazon Keyspaces (for Apache Cassandra) Developer Guide How to use and apply table tags in Cost Explorer By default, Cost Explorer does not provide a summary of the costs for any one specific table, because it combines the costs of multiple tables into a total. However, you can use AWS resource tagging to identify each table by a metadata tag. Tags are key-value pairs that you can use for a variety of purposes, for example to identify all resources belonging to a project or department. For more information, see the section called “Working with tags”. For this example, we use a table with the name MyTable. 1. Set a tag with the key of table_name and the value of MyTable. 2. Activate the tag within Cost Explorer and then filter on the tag value to gain more visibility into each table's costs. Note It may take one or two days for the tag to start appearing in Cost Explorer You can set metadata tags yourself in the console, or programmatically with CQL, the AWS CLI, or the AWS SDK. Consider requiring a table_name tag to be set as part of your organization’s new table creation process. For more information, see the section called “Create cost allocation reports”. Evaluate your table's capacity mode This section provides an overview of how to select the appropriate capacity mode for your Amazon Keyspaces table. Each mode is tuned to meet the needs of a different workload in terms of Evaluate your table's capacity mode 581 Amazon Keyspaces (for Apache Cassandra) Developer Guide responsiveness to change in throughput, as well as how that usage is billed. You must balance these factors when making your decision. Topics • What table capacity modes are available • When to select on-demand capacity mode • When to select provisioned capacity mode • Additional factors to consider when choosing a table capacity mode What table capacity modes are available When you create an Amazon Keyspaces table, you must select either on-demand or provisioned capacity mode. For more information, see the section called “Configure read/write capacity modes”. On-demand capacity mode The on-demand capacity mode is designed to eliminate the need to plan or provision the capacity of your Amazon Keyspaces table. In this mode, your table instantly accommodates requests without the need to scale any resources up or down (up to twice the previous peak throughput of the table). On-demand tables are billed by counting the number of actual requests against the table, so you only pay for what you use rather than what has been provisioned. Provisioned capacity mode The provisioned capacity mode is a more traditional model where you can define how much capacity the table has available for requests either directly or with the assistance of Application Auto Scaling. Because a specific capacity is provisioned for the table at any given time, billing is based off of the capacity provisioned rather than the number of requests. Going over the allocated capacity can also cause the table to reject requests and reduce the experience of your application's users. Provisioned capacity mode requires a balance between not over-provisioning or under provisioning the table to achieve both, low occurrence of insufficient throughput capacity errors, and optimized costs. Evaluate your table's capacity mode 582 Amazon Keyspaces (for Apache Cassandra) Developer Guide When to
AmazonKeyspaces-180
AmazonKeyspaces.pdf
180
either directly or with the assistance of Application Auto Scaling. Because a specific capacity is provisioned for the table at any given time, billing is based off of the capacity provisioned rather than the number of requests. Going over the allocated capacity can also cause the table to reject requests and reduce the experience of your application's users. Provisioned capacity mode requires a balance between not over-provisioning or under provisioning the table to achieve both, low occurrence of insufficient throughput capacity errors, and optimized costs. Evaluate your table's capacity mode 582 Amazon Keyspaces (for Apache Cassandra) Developer Guide When to select on-demand capacity mode When optimizing for cost, on-demand mode is your best choice when you have an unpredictable workload similar to the one shown in the following graph. These factors contribute to this type of workload: • Unpredictable request timing (resulting in traffic spikes) • Variable volume of requests (resulting from batch workloads) • Drops to zero or below 18% of the peak for a given hour (resulting from development or test environments) For workloads with the above characteristics, using Application Auto Scaling to maintain enough capacity for the table to respond to spikes in traffic may lead to undesirable outcomes. Either the table could be over-provisioned and costing more than necessary, or the table could be under provisioned and requests are leading to unnecessary low capacity throughput errors. In cases like this, on-demand tables are the better choice. Because on-demand tables are billed by request, there is nothing further you need to do at the table level to optimize for cost. You should regularly evaluate your on-demand tables to verify the workload still has the above characteristics. If the workload has stabilized, consider changing to provisioned mode to maintain cost optimization. When to select provisioned capacity mode An ideal workload for provisioned capacity mode is one with a more predictable usage pattern like shown in the graph below. Evaluate your table's capacity mode 583 Amazon Keyspaces (for Apache Cassandra) Developer Guide The following factors contribute to a predictable workload: • Predicable/cyclical traffic for a given hour or day • Limited short term bursts of traffic Since the traffic volumes within a given time or day are more stable, you can set the provisioned capacity relatively close to the actual consumed capacity of the table. Cost optimizing a provisioned capacity table is ultimately an exercise in getting the provisioned capacity (blue line) as close to the consumed capacity (orange line) as possible without increasing ThrottledRequests events for the table. The space between the two lines is both, wasted capacity as well as insurance against a bad user experience due to insufficient throughput capacity errors. Amazon Keyspaces provides Application Auto Scaling for provisioned capacity tables, which automatically balances this on your behalf. You can track your consumed capacity throughout the day and configure the provisioned capacity of the table based on a handful of variables. Minimum capacity units You can set the minimum capacity of a table to limit the occurrence of insufficient throughput capacity errors, but it doesn't reduce the cost of the table. If your table has periods of low usage followed by a sudden burst of high usage, setting the minimum can prevent Application Auto Scaling from setting the table capacity too low. Maximum capacity units You can set the maximum capacity of a table to limit a table scaling higher than intended. Consider applying a maximum for development or test tables, where large-scale load testing is not desired. You can set a maximum for any table, but be sure to regularly evaluate this setting against the Evaluate your table's capacity mode 584 Amazon Keyspaces (for Apache Cassandra) Developer Guide table baseline when using it in production, to prevent accidental insufficient throughput capacity errors. Target utilization Setting the target utilization of the table is the primary means of cost optimization for a provisioned capacity table. Setting a lower percent value here increases how much the table is over-provisioned, increasing cost, but reducing the risk of insufficient throughput capacity errors. Setting a higher percentage value decreases by how much the table is over-provisioned, but increases the risk of insufficient throughput capacity errors. Additional factors to consider when choosing a table capacity mode When deciding between the two capacity modes, there are some additional factors worth considering. When deciding between the two table modes, consider how much this additional discount affects the cost of the table. In many cases, even a relatively unpredictable workload can be more cost effective to run on an over-provisioned provisioned capacity table with reserved capacity. Improving predictability of your workload In some situations, a workload may seemingly have both, a predictable and an unpredictable pattern. While this can be easily supported with an on-demand table, costs will likely be lower if the unpredictable patterns
AmazonKeyspaces-181
AmazonKeyspaces.pdf
181
a table capacity mode When deciding between the two capacity modes, there are some additional factors worth considering. When deciding between the two table modes, consider how much this additional discount affects the cost of the table. In many cases, even a relatively unpredictable workload can be more cost effective to run on an over-provisioned provisioned capacity table with reserved capacity. Improving predictability of your workload In some situations, a workload may seemingly have both, a predictable and an unpredictable pattern. While this can be easily supported with an on-demand table, costs will likely be lower if the unpredictable patterns in the workload can be improved. One of the most common causes of these patterns are batch imports. This type of traffic can often exceed the baseline capacity of the table to such a degree that insufficient throughput capacity errors would occur if it were to run. To keep a workload like this running on a provisioned capacity table, consider the following options: • If the batch occurs at scheduled times, you can schedule an increase to your application auto- scaling capacity before it runs. • If the batch occurs randomly, consider trying to extend the time it takes to run rather than executing as fast as possible. • Add a ramp up period to the import, where the velocity of the import starts small but is slowly increased over a few minutes until Application Auto Scaling has had the opportunity to start adjusting table capacity. Evaluate your table's capacity mode 585 Amazon Keyspaces (for Apache Cassandra) Developer Guide Evaluate your table's Application Auto Scaling settings This section provides an overview of how to evaluate the Application Auto Scaling settings of your Amazon Keyspaces tables. Amazon Keyspaces Application Auto Scaling is a feature that manages table throughput based on your application traffic and your target utilization metric. This ensures your tables have the required capacity required for your application patterns. The Application Auto Scaling service monitors your current table utilization and compares it to the target utilization value: TargetValue. It notifies you if it is time to increase or decrease the allocated capacity. Topics • Understanding your Application Auto Scaling settings • How to identify tables with low target utilization (<=50%) • How to address workloads with seasonal variance • How to address spiky workloads with unknown patterns • How to address workloads with linked applications Understanding your Application Auto Scaling settings Defining the correct value for the target utilization, initial step, and final values is an activity that requires involvement from your operations team. This allows you to properly define the values based on historical application usage, which is used to trigger the Application Auto Scaling policies. The utilization target is the percentage of your total capacity that needs to be met during a period of time before the Application Auto Scaling rules apply. When you set a high utilization target (a target around 90%) it means your traffic needs to be higher than 90% for a period of time before the Application Auto Scaling is activated. You should not use a high utilization target unless your application is very constant and doesn’t receive spikes in traffic. When you set a very low utilization (a target less than 50%) it means your application would need to reach 50% of the provisioned capacity before it triggers an Application Auto Scaling policy. Unless your application traffic grows at a very aggressive rate, this usually translates into unused capacity and wasted resources. Evaluate your table's Application Auto Scaling settings 586 Amazon Keyspaces (for Apache Cassandra) Developer Guide How to identify tables with low target utilization (<=50%) You can use either the AWS CLI or AWS Management Console to monitor and identify the TargetValues for your Application Auto Scaling policies in your Amazon Keyspaces resources. Note When you're using multi-Region tables in provisioned capacity mode with Amazon Keyspaces auto scaling, make sure to use the Amazon Keyspaces API operations to configure auto scaling. The underlying Application Auto Scaling API operations that Amazon Keyspaces calls on your behalf don't have multi-Region capabilities. For more information, see the section called “View provisioned capacity and auto scaling settings for a multi-Region table”. AWS CLI 1. Return the entire list of resources by running the following command: aws application-autoscaling describe-scaling-policies --service-namespace cassandra This command will return the entire list of Application Auto Scaling policies that are issued to any Amazon Keyspaces resource. If you only want to retrieve the resources from a particular table, you can add the –resource-id parameter. For example: aws application-autoscaling describe-scaling-policies --service-namespace cassandra --resource-id "keyspace/keyspace-name/table/table-name” 2. Return only the auto scaling policies for a particular table by running the following command aws application-autoscaling describe-scaling-policies --service-namespace cassandra --resource-id "keyspace/keyspace-name/table/table-name” The values for the Application Auto Scaling policies are highlighted below. You need to ensure
AmazonKeyspaces-182
AmazonKeyspaces.pdf
182
the entire list of resources by running the following command: aws application-autoscaling describe-scaling-policies --service-namespace cassandra This command will return the entire list of Application Auto Scaling policies that are issued to any Amazon Keyspaces resource. If you only want to retrieve the resources from a particular table, you can add the –resource-id parameter. For example: aws application-autoscaling describe-scaling-policies --service-namespace cassandra --resource-id "keyspace/keyspace-name/table/table-name” 2. Return only the auto scaling policies for a particular table by running the following command aws application-autoscaling describe-scaling-policies --service-namespace cassandra --resource-id "keyspace/keyspace-name/table/table-name” The values for the Application Auto Scaling policies are highlighted below. You need to ensure that the target value is greater than 50% to avoid over-provisioning. You should obtain a result similar to the following: Evaluate your table's Application Auto Scaling settings 587 Amazon Keyspaces (for Apache Cassandra) Developer Guide { "ScalingPolicies": [ { "PolicyARN": "arn:aws:autoscaling:<region>:<account- id>:scalingPolicy:<uuid>:resource/keyspaces/table/table-name-scaling-policy", "PolicyName": $<full-gsi-name>”, "ServiceNamespace": "cassandra", "ResourceId": "keyspace/keyspace-name/table/table-name", "ScalableDimension": "cassandra:index:WriteCapacityUnits", "PolicyType": "TargetTrackingScaling", "TargetTrackingScalingPolicyConfiguration": { "TargetValue": 70.0, "PredefinedMetricSpecification": { "PredefinedMetricType": "KeyspacesWriteCapacityUtilization" } }, "Alarms": [ ... ], "CreationTime": "2022-03-04T16:23:48.641000+10:00" }, { "PolicyARN": "arn:aws:autoscaling:<region>:<account- id>:scalingPolicy:<uuid>:resource/keyspaces/table/table-name/index/<index- name>:policyName/$<full-gsi-name>-scaling-policy", "PolicyName":$<full-table-name>”, "ServiceNamespace": "cassandra", "ResourceId": "keyspace/keyspace-name/table/table-name", "ScalableDimension": "cassandra:index:ReadCapacityUnits", "PolicyType": "TargetTrackingScaling", "TargetTrackingScalingPolicyConfiguration": { "TargetValue": 70.0, "PredefinedMetricSpecification": { "PredefinedMetricType": "CassandraReadCapacityUtilization" } }, "Alarms": [ ... ], "CreationTime": "2022-03-04T16:23:47.820000+10:00" } ] } Evaluate your table's Application Auto Scaling settings 588 Amazon Keyspaces (for Apache Cassandra) Developer Guide AWS Management Console 1. Log into the AWS Management Console and navigate to the CloudWatch service page at Getting Started with the AWS Management Console. Select the appropriate AWS Region if necessary. 2. On the left navigation bar, select Tables. On the Tables page, select the table's Name. 3. On the Table Details page on the Capacity tab, review your table's Application Auto Scaling settings. If your target utilization values are less than or equal to 50%, you should explore your table utilization metrics to see if they are under-provisioned or over-provisioned. How to address workloads with seasonal variance Consider the following scenario: your application is operating under a minimum average value most of the time, but the utilization target is low so your application can react quickly to events that happen at certain hours in the day and you have enough capacity and avoid getting throttled. This scenario is common when you have an application that is very busy during normal office hours (9 AM to 5 PM) but then it works at a base level during after hours. Since some users start to connect before 9 am, the application uses this low threshold to ramp up quickly to get to the required capacity during peak hours. This scenario could look like this: • Between 5 PM and 9 AM the ConsumedWriteCapacityUnits units stay between 90 and 100 • Users start to connect to the application before 9 AM and the capacity units increases considerably (the maximum value you’ve seen is 1500 WCU) • On average, your application usage varies between 800 to 1200 during working hours If the previous scenario applies to your application, consider using scheduled application auto scaling, where your table could still have an Application Auto Scaling rule configured, but with a less aggressive target utilization that only provisions the extra capacity at the specific intervals you require. Evaluate your table's Application Auto Scaling settings 589 Amazon Keyspaces (for Apache Cassandra) Developer Guide You can use the AWS CLI to execute the following steps to create a scheduled auto scaling rule that executes based on the time of day and the day of the week. 1. Register your Amazon Keyspaces table as a scalable target with Application Auto Scaling. A scalable target is a resource that Application Auto Scaling can scale out or in. aws application-autoscaling register-scalable-target \ --service-namespace cassandra \ --scalable-dimension cassandra:table:WriteCapacityUnits \ --resource-id keyspace/keyspace-name/table/table-name \ --min-capacity 90 \ --max-capacity 1500 2. Set up scheduled actions according to your requirements. You need two rules to cover the scenario: one to scale up and another to scale down. The first rule to scale up the scheduled action is shown in the following example. aws application-autoscaling put-scheduled-action \ --service-namespace cassandra \ --scalable-dimension cassandra:table:WriteCapacityUnits \ --resource-id keyspace/keyspace-name/table/table-name \ --scheduled-action-name my-8-5-scheduled-action \ --scalable-target-action MinCapacity=800,MaxCapacity=1500 \ --schedule "cron(45 8 ? * MON-FRI *)" \ --timezone "Australia/Brisbane" The second rule to scale down the scheduled action is shown in this example. aws application-autoscaling put-scheduled-action \ --service-namespace cassandra \ --scalable-dimension cassandra:table:WriteCapacityUnits \ --resource-id keyspace/keyspace-name/table/table-name \ --scheduled-action-name my-5-8-scheduled-down-action \ --scalable-target-action MinCapacity=90,MaxCapacity=1500 \ --schedule "cron(15 17 ? * MON-FRI *)" \ --timezone "Australia/Brisbane" 3. Run the following command to validate both rules have been activated: Evaluate your table's Application Auto Scaling settings 590 Amazon Keyspaces (for Apache Cassandra) Developer Guide aws application-autoscaling describe-scheduled-actions --service-namespace cassandra You should get a result like this: { "ScheduledActions": [ { "ScheduledActionName": "my-5-8-scheduled-down-action", "ScheduledActionARN": "arn:aws:autoscaling:<region>:<account>:scheduledAction:<uuid>:resource/keyspaces/ table/table-name:scheduledActionName/my-5-8-scheduled-down-action", "ServiceNamespace": "cassandra", "Schedule": "cron(15 17 ? * MON-FRI *)", "Timezone": "Australia/Brisbane", "ResourceId": "keyspace/keyspace-name/table/table-name", "ScalableDimension": "cassandra:table:WriteCapacityUnits",
AmazonKeyspaces-183
AmazonKeyspaces.pdf
183
the scheduled action is shown in this example. aws application-autoscaling put-scheduled-action \ --service-namespace cassandra \ --scalable-dimension cassandra:table:WriteCapacityUnits \ --resource-id keyspace/keyspace-name/table/table-name \ --scheduled-action-name my-5-8-scheduled-down-action \ --scalable-target-action MinCapacity=90,MaxCapacity=1500 \ --schedule "cron(15 17 ? * MON-FRI *)" \ --timezone "Australia/Brisbane" 3. Run the following command to validate both rules have been activated: Evaluate your table's Application Auto Scaling settings 590 Amazon Keyspaces (for Apache Cassandra) Developer Guide aws application-autoscaling describe-scheduled-actions --service-namespace cassandra You should get a result like this: { "ScheduledActions": [ { "ScheduledActionName": "my-5-8-scheduled-down-action", "ScheduledActionARN": "arn:aws:autoscaling:<region>:<account>:scheduledAction:<uuid>:resource/keyspaces/ table/table-name:scheduledActionName/my-5-8-scheduled-down-action", "ServiceNamespace": "cassandra", "Schedule": "cron(15 17 ? * MON-FRI *)", "Timezone": "Australia/Brisbane", "ResourceId": "keyspace/keyspace-name/table/table-name", "ScalableDimension": "cassandra:table:WriteCapacityUnits", "ScalableTargetAction": { "MinCapacity": 90, "MaxCapacity": 1500 }, "CreationTime": "2022-03-15T17:30:25.100000+10:00" }, { "ScheduledActionName": "my-8-5-scheduled-action", "ScheduledActionARN": "arn:aws:autoscaling:<region>:<account>:scheduledAction:<uuid>:resource/keyspaces/ table/table-name:scheduledActionName/my-8-5-scheduled-action", "ServiceNamespace": "cassandra", "Schedule": "cron(45 8 ? * MON-FRI *)", "Timezone": "Australia/Brisbane", "ResourceId": "keyspace/keyspace-name/table/table-name", "ScalableDimension": "cassandra:table:WriteCapacityUnits", "ScalableTargetAction": { "MinCapacity": 800, "MaxCapacity": 1500 }, "CreationTime": "2022-03-15T17:28:57.816000+10:00" } ] } Evaluate your table's Application Auto Scaling settings 591 Amazon Keyspaces (for Apache Cassandra) Developer Guide The following picture shows a sample workload that always keeps the 70% target utilization. Notice how the auto scaling rules are still applying and the throughput is not getting reduced. Zooming in, we can see there was a spike in the application that triggered the 70% auto scaling threshold, forcing the autoscaling to kick in and provide the extra capacity required for the table. The scheduled auto scaling action will affect maximum and minimum values, and it's your responsibility to set them up. Evaluate your table's Application Auto Scaling settings 592 Amazon Keyspaces (for Apache Cassandra) Developer Guide How to address spiky workloads with unknown patterns In this scenario, the application uses a very low utilization target, because you don’t know the application patterns yet, and you want to ensure your workload is not experiencing low capacity throughput errors. Consider using on-demand capacity mode instead. On-demand tables are perfect for spiky workloads where you don’t know the traffic patterns. With on-demand capacity mode, you pay per request for the data reads and writes your application performs on your tables. You do not need to specify how much read and write throughput you expect your application to perform, as Amazon Keyspaces instantly accommodates your workloads as they ramp up or down. How to address workloads with linked applications In this scenario, the application depends on other systems, like batch processing scenarios where you can have big spikes in traffic according to events in the application logic. Consider developing custom application auto-scaling logic that reacts to those events where you can increase table capacity and TargetValues depending on your specific needs. You could benefit from Amazon EventBridge and use a combination of AWS services like Λ and Step Functions to react to your specific application needs. Identify your unused resources to optimize costs in Amazon Keyspaces This section provides an overview of how to evaluate your unused resources regularly. As your application requirements evolve, you should ensure no resources are unused and contributing to unnecessary Amazon Keyspaces costs. The procedures described below use Amazon CloudWatch metrics to identify unused resources and take action to reduce costs. You can monitor Amazon Keyspaces using CloudWatch, which collects and processes raw data from Amazon Keyspaces into readable, near real-time metrics. These statistics are retained for a period of time, so that you can access historical information to better understand your utilization. By default, Amazon Keyspaces metric data is sent to CloudWatch automatically. For more information, see What is Amazon CloudWatch? and Metrics retention in the Amazon CloudWatch User Guide. Topics • How to identify unused resources • Identifying unused table resources • Cleaning up unused table resources Identify your unused resources 593 Amazon Keyspaces (for Apache Cassandra) Developer Guide • Cleaning up unused point-in-time recovery (PITR) backups How to identify unused resources To identify unused tables you can take a look at the following CloudWatch metrics over a period of 30 days to understand if there are any active reads or writes on a specific table: ConsumedReadCapacityUnits The number of read capacity units consumed over the specified time period, so you can track how much consumed capacity you have used. You can retrieve the total consumed read capacity for a table. ConsumedWriteCapacityUnits The number of write capacity units consumed over the specified time period, so you can track how much consumed capacity you have used. You can retrieve the total consumed write capacity for a table. Identifying unused table resources Amazon CloudWatch is a monitoring and observability service which provides the Amazon Keyspaces table metrics you can use to identify unused resources. CloudWatch metrics can be viewed through the AWS Management Console as well as through the AWS Command Line Interface. AWS Command Line Interface To view your tables metrics through the AWS Command Line Interface, you can use the following commands. 1. First, evaluate your table's
AmazonKeyspaces-184
AmazonKeyspaces.pdf
184
units consumed over the specified time period, so you can track how much consumed capacity you have used. You can retrieve the total consumed write capacity for a table. Identifying unused table resources Amazon CloudWatch is a monitoring and observability service which provides the Amazon Keyspaces table metrics you can use to identify unused resources. CloudWatch metrics can be viewed through the AWS Management Console as well as through the AWS Command Line Interface. AWS Command Line Interface To view your tables metrics through the AWS Command Line Interface, you can use the following commands. 1. First, evaluate your table's reads: Note If the table name is not unique within your account, you must also specify the name of the keyspace. aws cloudwatch get-metric-statistics --metric-name Identify your unused resources 594 Amazon Keyspaces (for Apache Cassandra) Developer Guide ConsumedReadCapacityUnits --start-time <start-time> --end-time <end- time> --period <period> --namespace AWS/Cassandra --statistics Sum -- dimensions Name=TableName,Value=<table-name> To avoid falsely identifying a table as unused, evaluate metrics over a longer period. Choose an appropriate start-time and end-time range, such as 30 days, and an appropriate period, such as 86400. In the returned data, any Sum above 0 indicates that the table you are evaluating received read traffic during that period. The following result shows a table receiving read traffic in the evaluated period: { "Timestamp": "2022-08-25T19:40:00Z", "Sum": 36023355.0, "Unit": "Count" }, { "Timestamp": "2022-08-12T19:40:00Z", "Sum": 38025777.5, "Unit": "Count" }, The following result shows a table not receiving read traffic in the evaluated period: { "Timestamp": "2022-08-01T19:50:00Z", "Sum": 0.0, "Unit": "Count" }, { "Timestamp": "2022-08-20T19:50:00Z", "Sum": 0.0, "Unit": "Count" }, 2. Next, evaluate your table’s writes: aws cloudwatch get-metric-statistics --metric-name ConsumedWriteCapacityUnits --start-time <start-time> --end-time <end- time> --period <period> --namespace AWS/Cassandra --statistics Sum -- Identify your unused resources 595 Amazon Keyspaces (for Apache Cassandra) Developer Guide dimensions Name=TableName,Value=<table-name> To avoid falsely identifying a table as unused, you will want to evaluate metrics over a longer period. Choose an appropriate start-time and end-time range, such as 30 days, and an appropriate period, such as 86400. In the returned data, any Sum above 0 indicates that the table you are evaluating received read traffic during that period. The following result shows a table receiving write traffic in the evaluated period: { "Timestamp": "2022-08-19T20:15:00Z", "Sum": 41014457.0, "Unit": "Count" }, { "Timestamp": "2022-08-18T20:15:00Z", "Sum": 40048531.0, "Unit": "Count" }, The following result shows a table not receiving write traffic in the evaluated period: { "Timestamp": "2022-07-31T20:15:00Z", "Sum": 0.0, "Unit": "Count" }, { "Timestamp": "2022-08-19T20:15:00Z", "Sum": 0.0, "Unit": "Count" }, AWS Management Console The following steps allow you to evaluate your resource utilization through the AWS Management Console. Identify your unused resources 596 Amazon Keyspaces (for Apache Cassandra) Developer Guide 1. Log into the AWS Management Console and navigate to the CloudWatch service page at https://console.aws.amazon.com/cloudwatch/. Select the appropriate AWS Region in the top right of the console, if necessary. 2. On the left navigation bar, locate the Metrics section and choose All metrics. 3. The action above opens a dashboard with two panels. In the top panel, you can see currently graphed metrics. On the bottom you can select the metrics available to graph. Choose Amazon Keyspaces in the bottom panel. 4. In the Amazon Keyspaces metrics selection panel, choose the Table Metrics category to show the metrics for your tables in the current region. 5. Identify your table name by scrolling down the menu, then choose the metrics ConsumedReadCapacityUnits and ConsumedWriteCapacityUnits for your table. 6. Choose the Graphed metrics (2) tab and adjust the Statistic column to Sum. 7. To avoid falsely identifying a table as unused, evaluate the table metrics over a longer period. At the top of the graph panel, choose an appropriate time frame, such as 1 month, to evaluate your table. Choose Custom, choose 1 Months in the drop-down menu, and choose Apply. 8. Evaluate the graphed metrics for your table to determine if it is being used. Metrics that have gone above 0 indicate that a table has been used during the evaluated time period. A flat graph at 0 for both read and write indicates that a table is unused. Cleaning up unused table resources If you have identified unused table resources, you can reduce their ongoing costs in the following ways. Note If you have identified an unused table but would still like to keep it available in case it needs to be accessed in the future, consider switching it to on-demand mode. Otherwise, you can consider deleting the table. Capacity modes Amazon Keyspaces charges for reading, writing, and storing data in your Amazon Keyspaces tables. Identify your unused resources 597 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces has two capacity modes, which come with specific billing options for processing reads and writes on your tables: on-demand and provisioned. The read/write capacity
AmazonKeyspaces-185
AmazonKeyspaces.pdf
185
their ongoing costs in the following ways. Note If you have identified an unused table but would still like to keep it available in case it needs to be accessed in the future, consider switching it to on-demand mode. Otherwise, you can consider deleting the table. Capacity modes Amazon Keyspaces charges for reading, writing, and storing data in your Amazon Keyspaces tables. Identify your unused resources 597 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces has two capacity modes, which come with specific billing options for processing reads and writes on your tables: on-demand and provisioned. The read/write capacity mode controls how you are charged for read and write throughput and how you manage capacity. For on-demand mode tables, you don't need to specify how much read and write throughput you expect your application to perform. Amazon Keyspaces charges you for the reads and writes that your application performs on your tables in terms of read request units and write request units. If there is no activity on your table, you do not pay for throughput but you still incur a storage charge. Deleting tables If you’ve discovered an unused table and would like to delete it, consider to make a backup or export the data first. Backups taken through AWS Backup can leverage cold storage tiering, further reducing cost. Refer to the Managing backup plans documentation for information on how to use a lifecycle to move your backup to cold storage. After your table has been backed up, you may choose to delete it either through the AWS Management Console or through the AWS Command Line Interface. Cleaning up unused point-in-time recovery (PITR) backups Amazon Keyspaces offers Point-in-time recovery, which provides continuous backups for 35 days to help you protect against accidental writes or deletes. PITR backups have costs associated with them. Refer to the documentation at the section called “Backup and restore with point-in-time recovery” to determine if your tables have backups enabled that may no longer be needed. Evaluate your table usage patterns to optimize performance and cost This section provides an overview of how to evaluate if you are efficiently using your Amazon Keyspaces tables. There are certain usage patterns which are not optimal for Amazon Keyspaces, and they allow room for optimization from both a performance and a cost perspective. Topics • Perform fewer strongly-consistent read operations Evaluate your table usage patterns 598 Amazon Keyspaces (for Apache Cassandra) • Enable Time to Live (TTL) Developer Guide Perform fewer strongly-consistent read operations Amazon Keyspaces allows you to configure read consistency on a per-request basis. Read requests are eventually consistent by default. Eventually consistent reads are charged at 0.5 RCU for up to 4 KB of data. Most parts of distributed workloads are flexible and can tolerate eventual consistency. However, there can be access patterns requiring strongly consistent reads. Strongly consistent reads are charged at 1 RCU for up to 4 KB of data, essentially doubling your read costs. Amazon Keyspaces provides you with the flexibility to use both consistency models on the same table. You can evaluate your workload and application code to confirm if strongly consistent reads are used only where required. Enable Time to Live (TTL) Time to Live (TTL) helps you simplify your application logic and optimize the price of storage by expiring data from tables automatically. Data that you no longer need is automatically deleted from your table based on the Time to Live value that you set. Evaluate your provisioned capacity for right-sized provisioning This section provides an overview of how to evaluate if you have right-sized provisioning on your Amazon Keyspaces tables. As your workload evolves, you should modify your operational procedures appropriately, especially when your Amazon Keyspaces table is configured in provisioned mode and you have the risk to over-provision or under-provision your tables. The procedures described in this section require statistical information that should be captured from the Amazon Keyspaces tables that are supporting your production application. To understand your application behavior, you should define a period of time that is significant enough to capture the data seasonality of your application. For example, if your application shows weekly patterns, using a three week period should give you enough room for analysing application throughput needs. If you don’t know where to start, use at least one month’s worth of data usage for the calculations below. Evaluate your provisioned capacity for right-sized provisioning 599 Amazon Keyspaces (for Apache Cassandra) Developer Guide While evaluating capacity, for Amazon Keyspaces tables you can configure Read Capacity Units (RCUs) and Write Capacity Units (WCU) independently. Topics • How to retrieve consumption metrics from your Amazon Keyspaces tables • How to identify under-provisioned Amazon Keyspaces tables • How to identify over-provisioned Amazon Keyspaces tables How to retrieve consumption metrics from your Amazon Keyspaces tables
AmazonKeyspaces-186
AmazonKeyspaces.pdf
186
enough room for analysing application throughput needs. If you don’t know where to start, use at least one month’s worth of data usage for the calculations below. Evaluate your provisioned capacity for right-sized provisioning 599 Amazon Keyspaces (for Apache Cassandra) Developer Guide While evaluating capacity, for Amazon Keyspaces tables you can configure Read Capacity Units (RCUs) and Write Capacity Units (WCU) independently. Topics • How to retrieve consumption metrics from your Amazon Keyspaces tables • How to identify under-provisioned Amazon Keyspaces tables • How to identify over-provisioned Amazon Keyspaces tables How to retrieve consumption metrics from your Amazon Keyspaces tables To evaluate the table capacity, monitor the following CloudWatch metrics and select the appropriate dimension to retrieve table information: Read Capacity Units Write Capacity Units ConsumedReadCapacityUnits ConsumedWriteCapacityUnits ProvisionedReadCapacityUnits ProvisionedWriteCapacityUnits ReadThrottleEvents WriteThrottleEvents You can do this either through the AWS CLI or the AWS Management Console. AWS CLI Before you retrieve the table consumption metrics, you need to start by capturing some historical data points using the CloudWatch API. Start by creating two files: write-calc.json and read-calc.json. These files represent the calculations for the table. You need to update some of the fields, as indicated in the table below, to match your environment. Note If the table name is not unique within your account, you must also specify the name of the keyspace. Evaluate your provisioned capacity for right-sized provisioning 600 Amazon Keyspaces (for Apache Cassandra) Developer Guide Field Name Definition Example <table-name> <period> <start-time> <end-time> The name of the table that you are analysing SampleTable The period of time that you are using to evaluate the For a 1-hour period you should specify: 3600 utilization target, based in seconds The beginning of your evaluation interval, specified in ISO8601 format The end of your evaluatio n interval, specified in ISO8601 format 2022-02-21T23:00:00 2022-02-22T06:00:00 The write calculations file retrieves the number of WCU provisioned and consumed in the time period for the date range specified. It also generates a utilization percentage that can be used for analysis. The full content of the write-calc.json file should look like in the following example. { "MetricDataQueries": [ { "Id": "provisionedWCU", "MetricStat": { "Metric": { "Namespace": "AWS/Cassandra", "MetricName": "ProvisionedWriteCapacityUnits", "Dimensions": [ { "Name": "TableName", "Value": "<table-name>" } ] }, "Period": <period>, Evaluate your provisioned capacity for right-sized provisioning 601 Amazon Keyspaces (for Apache Cassandra) Developer Guide "Stat": "Average" }, "Label": "Provisioned", "ReturnData": false }, { "Id": "consumedWCU", "MetricStat": { "Metric": { "Namespace": "AWS/Cassandra", "MetricName": "ConsumedWriteCapacityUnits", "Dimensions": [ { "Name": "TableName", "Value": "<table-name>"" } ] }, "Period": <period>, "Stat": "Sum" }, "Label": "", "ReturnData": false }, { "Id": "m1", "Expression": "consumedWCU/PERIOD(consumedWCU)", "Label": "Consumed WCUs", "ReturnData": false }, { "Id": "utilizationPercentage", "Expression": "100*(m1/provisionedWCU)", "Label": "Utilization Percentage", "ReturnData": true } ], "StartTime": "<start-time>", "EndTime": "<end-time>", "ScanBy": "TimestampDescending", "MaxDatapoints": 24 } Evaluate your provisioned capacity for right-sized provisioning 602 Amazon Keyspaces (for Apache Cassandra) Developer Guide The read calculations file uses a similar metrics. This file retrieves how many RCUs were provisioned and consumed during the time period for the date range specified. The contents of the read-calc.json file should look like in this example. { "MetricDataQueries": [ { "Id": "provisionedRCU", "MetricStat": { "Metric": { "Namespace": "AWS/Cassandra", "MetricName": "ProvisionedReadCapacityUnits", "Dimensions": [ { "Name": "TableName", "Value": "<table-name>" } ] }, "Period": <period>, "Stat": "Average" }, "Label": "Provisioned", "ReturnData": false }, { "Id": "consumedRCU", "MetricStat": { "Metric": { "Namespace": "AWS/Cassandra", "MetricName": "ConsumedReadCapacityUnits", "Dimensions": [ { "Name": "TableName", "Value": "<table-name>" } ] }, "Period": <period>, "Stat": "Sum" }, "Label": "", "ReturnData": false Evaluate your provisioned capacity for right-sized provisioning 603 Amazon Keyspaces (for Apache Cassandra) Developer Guide }, { "Id": "m1", "Expression": "consumedRCU/PERIOD(consumedRCU)", "Label": "Consumed RCUs", "ReturnData": false }, { "Id": "utilizationPercentage", "Expression": "100*(m1/provisionedRCU)", "Label": "Utilization Percentage", "ReturnData": true } ], "StartTime": "<start-time>", "EndTime": "<end-time>", "ScanBy": "TimestampDescending", "MaxDatapoints": 24 } Once you've created the files, you can start retrieving utilization data. 1. To retrieve the write utilization data, issue the following command: aws cloudwatch get-metric-data --cli-input-json file://write-calc.json 2. To retrieve the read utilization data, issue the following command: aws cloudwatch get-metric-data --cli-input-json file://read-calc.json The result for both queries is a series of data points in JSON format that can be used for analysis. Your results depend on the number of data points you specified, the period, and your own specific workload data. It could look like in the following example. { "MetricDataResults": [ { "Id": "utilizationPercentage", "Label": "Utilization Percentage", "Timestamps": [ "2022-02-22T05:00:00+00:00", Evaluate your provisioned capacity for right-sized provisioning 604 Amazon Keyspaces (for Apache Cassandra) Developer Guide "2022-02-22T04:00:00+00:00", "2022-02-22T03:00:00+00:00", "2022-02-22T02:00:00+00:00", "2022-02-22T01:00:00+00:00", "2022-02-22T00:00:00+00:00", "2022-02-21T23:00:00+00:00" ], "Values": [ 91.55364583333333, 55.066631944444445, 2.6114930555555556, 24.9496875, 40.94725694444445, 25.61819444444444, 0.0 ], "StatusCode": "Complete" } ], "Messages": [] } Note If you specify a short period and a long time range, you might need to modify the MaxDatapoints value, which is by default set to 24 in the script. This represents
AmazonKeyspaces-187
AmazonKeyspaces.pdf
187
specified, the period, and your own specific workload data. It could look like in the following example. { "MetricDataResults": [ { "Id": "utilizationPercentage", "Label": "Utilization Percentage", "Timestamps": [ "2022-02-22T05:00:00+00:00", Evaluate your provisioned capacity for right-sized provisioning 604 Amazon Keyspaces (for Apache Cassandra) Developer Guide "2022-02-22T04:00:00+00:00", "2022-02-22T03:00:00+00:00", "2022-02-22T02:00:00+00:00", "2022-02-22T01:00:00+00:00", "2022-02-22T00:00:00+00:00", "2022-02-21T23:00:00+00:00" ], "Values": [ 91.55364583333333, 55.066631944444445, 2.6114930555555556, 24.9496875, 40.94725694444445, 25.61819444444444, 0.0 ], "StatusCode": "Complete" } ], "Messages": [] } Note If you specify a short period and a long time range, you might need to modify the MaxDatapoints value, which is by default set to 24 in the script. This represents one data point per hour and 24 per day. AWS Management Console 1. Log into the AWS Management Console and navigate to the CloudWatch service page at Getting Started with the AWS Management Console. Select the appropriate AWS Region if necessary. 2. 3. Locate the Metrics section on the left navigation bar and choose All metrics. This opens a dashboard with two panels. The top panel shows you the graphic, and the bottom panel has the metrics that you want to graph. Choose the Amazon Keyspaces panel. 4. Choose the Table Metrics category from the sub panels. This shows you the tables in your current AWS Region. Evaluate your provisioned capacity for right-sized provisioning 605 Amazon Keyspaces (for Apache Cassandra) Developer Guide 5. Identify your table name by scrolling down the menu and selecting the write operation metrics: ConsumedWriteCapacityUnits and ProvisionedWriteCapacityUnits Note This example talks about write operation metrics, but you can also use these steps to graph the read operation metrics. 6. Select the Graphed metrics (2) tab to modify the formulas. By default CloudWatch chooses the statistical function Average for the graphs. 7. While having both graphed metrics selected (the checkbox on the left) select the menu Add math, followed by Common, and then select the Percentage function. Repeat the procedure twice. First time selecting the Percentage function. Second time selecting the Percentage function. 8. At this point you should have four metrics in the bottom menu. Let’s work on the ConsumedWriteCapacityUnits calculation. To be consistent, you need to match the names with the ones you used in the AWS CLI section. Click on the m1 ID and change this value to consumedWCU. 9. Change the statistic from Average to Sum. This action automatically creates another metric called ANOMALY_DETECTION_BAND. For the scope of this procedure, you can ignore this by removing the checkbox on the newly generated ad1 metric. 10. Repeat step 8 to rename the m2 ID to provisionedWCU. Leave the statistic set to Average. 11. Choose the Expression1 label and update the value to m1 and the label to Consumed WCUs. Note Make sure you have only selected m1 (checkbox on the left) and provisionedWCU to properly visualize the data. Update the formula by clicking in Details and changing the formula to consumedWCU/PERIOD(consumedWCU). This step might also generate another ANOMALY_DETECTION_BAND metric, but for the scope of this procedure you can ignore it. Evaluate your provisioned capacity for right-sized provisioning 606 Amazon Keyspaces (for Apache Cassandra) Developer Guide 12. You should now have two graphics: one that indicates your provisioned WCUs on the table and another that indicates the consumed WCUs. 13. Update the percentage formula by selecting the Expression2 graphic (e2). Rename the labels and IDs to utilizationPercentage. Rename the formula to match 100*(m1/ provisionedWCU). 14. Remove the checkbox from all the metrics except utilizationPercentage to visualize your utilization patterns. The default interval is set to 1 minute, but feel free to modify it as needed. The results you get depend on the actual data from your workload. Intervals with more than 100% utilization are prone to low throughput capacity error events. Amazon Keyspaces offers burst capacity, but as soon as the burst capacity is exhausted, anything above 100% experiences low throughput capacity error events. How to identify under-provisioned Amazon Keyspaces tables For most workloads, a table is considered under-provisioned when it constantly consumes more than 80% of its provisioned capacity. Burst capacity is an Amazon Keyspaces feature that allow customers to temporarily consume more RCUs/WCUs than originally provisioned (more than the per-second provisioned throughput that was defined for the table). The burst capacity was created to absorb sudden increases in traffic due to special events or usage spikes. This burst capacity limited, for more information, see the section called “Use burst capacity”. As soon as the unused RCUs and WCUs are depleted, you can experience low capacity throughput error events if you try to consume more capacity than provisioned. When your application traffic is getting close to the 80% utilization rate, your risk of experiencing low capacity throughput error events is significantly higher. The 80% utilization rate rule varies from the seasonality of your data and your traffic growth. Consider the following scenarios: • If
AmazonKeyspaces-188
AmazonKeyspaces.pdf
188
to absorb sudden increases in traffic due to special events or usage spikes. This burst capacity limited, for more information, see the section called “Use burst capacity”. As soon as the unused RCUs and WCUs are depleted, you can experience low capacity throughput error events if you try to consume more capacity than provisioned. When your application traffic is getting close to the 80% utilization rate, your risk of experiencing low capacity throughput error events is significantly higher. The 80% utilization rate rule varies from the seasonality of your data and your traffic growth. Consider the following scenarios: • If your traffic has been stable at ~90% utilization rate for the last 12 months, your table has just the right capacity • If your application traffic is growing at a rate of 8% monthly in less than 3 months, you will arrive at 100% • If your application traffic is growing at a rate of 5% in a little more than 4 months, you will still arrive at 100% Evaluate your provisioned capacity for right-sized provisioning 607 Amazon Keyspaces (for Apache Cassandra) Developer Guide The results from the queries above provide a picture of your utilization rate. Use them as a guide to further evaluate other metrics that can help you choose to increase your table capacity as required (for example: a monthly or weekly growth rate). Work with your operations team to define what is a good percentage for your workload and your tables. There are special scenarios where the data is skewed when you analyse it on a daily or weekly basis. For example, with seasonal applications that have spikes in usage during working hours (but then drop to almost zero outside of working hours), you could benefit from scheduling application auto-scaling, where you specify the hours of the day (and the days of the week) to increase the provisioned capacity, as well as when to reduce it. Instead of aiming for higher capacity so you can cover the busy hours, you can also benefit from Amazon Keyspaces table auto-scaling configurations if your seasonality is less pronounced. How to identify over-provisioned Amazon Keyspaces tables The query results obtained from the scripts above provide the data points required to perform some initial analysis. If your data set presents values lower than 20% utilization for several intervals, your table might be over-provisioned. To further define if you need to reduce the number of WCUs and RCUS, you should revisit the other readings in the intervals. When your table contains several low usage intervals, you can benefit from using Application Auto Scaling policies, either by scheduling Application Auto Scaling or by just configuring the default Application Auto Scaling policies for the table that are based on utilization. If you have a workload with a low utilization to high throttle ratio (Max(ThrottleEvents)/ Min(ThrottleEvents) in the interval), this could happen when you have a very spiky workload where traffic increases significantly on specific days (or times of day), but is otherwise consistently low. In these scenarios, it might be beneficial to use scheduled Application Auto Scaling. Evaluate your provisioned capacity for right-sized provisioning 608 Amazon Keyspaces (for Apache Cassandra) Developer Guide Troubleshooting Amazon Keyspaces (for Apache Cassandra) This guide covers troubleshooting steps for various scenarios when working with Amazon Keyspaces (for Apache Cassandra). It includes information on resolving general errors, connection issues, capacity management problems, and Data Definition Language (DDL) errors. • General errors • Troubleshooting top-level exceptions like NoNodeAvailableException, NoHostAvailableException, and AllNodesFailedException. • Isolating underlying errors from Java driver exceptions. • Implementing retry policies and configuring connections correctly. • Connection issues • Resolving errors when connecting to Amazon Keyspaces endpoints using cqlsh or Apache Cassandra client drivers. • Troubleshooting VPC endpoint connections, Cassandra-stress connections, and IAM configuration errors. • Handling connection losses during data imports. • Capacity management errors • Recognizing and resolving insufficient capacity errors related to tables, partitions, and connections. • Monitoring relevant Amazon Keyspaces metrics in Amazon CloudWatch Logs. • Optimizing connections and throughput for improved performance. • Data Definition Language (DDL) errors • Troubleshooting errors when creating, accessing, or restoring keyspaces and tables. • Handling failures related to custom Time to Live (TTL) settings, column limits, and range deletes. • Considerations for heavy delete workloads. For troubleshooting guidance specific to IAM access, see the section called “Troubleshooting”. For more information about security best practices, see the section called “Security best practices”. 609 Amazon Keyspaces (for Apache Cassandra) Developer Guide Topics • Troubleshooting general errors in Amazon Keyspaces • Troubleshooting connection errors in Amazon Keyspaces • Troubleshooting capacity management errors in Amazon Keyspaces • Troubleshooting data definition language errors in Amazon Keyspaces Troubleshooting general errors in Amazon Keyspaces Getting general errors? Here are some common issues and how to resolve them. General errors You're getting one of the following
AmazonKeyspaces-189
AmazonKeyspaces.pdf
189
deletes. • Considerations for heavy delete workloads. For troubleshooting guidance specific to IAM access, see the section called “Troubleshooting”. For more information about security best practices, see the section called “Security best practices”. 609 Amazon Keyspaces (for Apache Cassandra) Developer Guide Topics • Troubleshooting general errors in Amazon Keyspaces • Troubleshooting connection errors in Amazon Keyspaces • Troubleshooting capacity management errors in Amazon Keyspaces • Troubleshooting data definition language errors in Amazon Keyspaces Troubleshooting general errors in Amazon Keyspaces Getting general errors? Here are some common issues and how to resolve them. General errors You're getting one of the following top-level exceptions that can occur due to many different reasons. • NoNodeAvailableException • NoHostAvailableException • AllNodesFailedException These exceptions are generated by the client driver and can occur either when you're establishing the control connection or when you're performing read/write/prepare/execute/batch requests. When the error occurs while you're establishing the control connection, it's a sign that all the contact points specified in your application are unreachable. When the error occurs while performing read/write/prepare/execute queries, it indicates that all of the retries for that request have been exhausted. Each retry is attempted on a different node when you're using the default retry policy. How to isolate the underlying error from top-level Java driver exceptions These general errors can be caused either by connection issues or when performing read/write/ prepare/execute operations. Transient failures have to be expected in distributed systems, and should be handled by retrying the request. The Java driver doesn't automatically retry when connection errors are encountered, so it's recommended to implement the retry policy when establishing the driver connection in your application. For a detailed overview of connection best practices, see the section called “Connections”. General errors 610 Amazon Keyspaces (for Apache Cassandra) Developer Guide By default, the Java driver sets idempotence to false for all request, which means the Java driver doesn't automatically retry failed read/write/prepare request. To set idempotence to true and tell the driver to retry failed requests, you can do so in a few different ways. Here's one example how you can set idempotence programmatically for a single request in your Java application. Statement s = new SimpleStatement("SELECT * FROM my_table WHERE id = 1"); s.setIdempotent(true); Or you can set the default idempotence for your entire Java application programmatically as shown in the following example. // Make all statements idempotent by default: cluster.getConfiguration().getQueryOptions().setDefaultIdempotence(true); //Set the default idempotency to true in your Cassandra configuration basic.request.default-idempotence = true Another recommendation is to create a retry policy at the application level. In this case, the application needs to catch the NoNodeAvailableException and retry the request. We recommend 10 retries with exponential backoff starting at 10ms and working up to 100ms with a total time of 1 second for all retries. Another option is to apply the Amazon Keyspaces exponential retry policy when establishing the Java driver connection available on Github. Confirm that you have established connections to more than one node when using the default retry policy. You can do so using the following query in Amazon Keyspaces. SELECT * FROM system.peers; If the response for this query is empty, this indicates that you're working with a single node for Amazon Keyspaces. If you're using the default retry policy, there will be no retries because the default retry always occurs on a different node. To learn more about establishing connections over VPC endpoints, see the section called “VPC endpoint connections”. For a step-by-step tutorial that shows how to establish a connection to Amazon Keyspaces using the Datastax 4.x Cassandra driver, see the section called “Authentication plugin for Java 4.x”. General errors 611 Amazon Keyspaces (for Apache Cassandra) Developer Guide Troubleshooting connection errors in Amazon Keyspaces Having trouble connecting? Here are some common issues and how to resolve them. Errors connecting to an Amazon Keyspaces endpoint Failed connections and connection errors can result in different error messages. The following section covers the most common scenarios. Topics • I can't connect to Amazon Keyspaces with cqlsh • I can't connect to Amazon Keyspaces using a Cassandra client driver I can't connect to Amazon Keyspaces with cqlsh You're trying to connect to an Amazon Keyspaces endpoint using cqlsh and the connection fails with a Connection error. If you try to connect to an Amazon Keyspaces table and cqlsh hasn't been configured properly, the connection fails. The following section provides examples of the most common configuration issues that result in connection errors when you're trying to establish a connection using cqlsh. Note If you're trying to connect to Amazon Keyspaces from a VPC, additional permissions are required. To successfully configure a connection using VPC endpoints, follow the steps in the the section called “Connecting with VPC endpoints”. You're trying to connect to Amazon Keyspaces using cqlsh, but you get a connection timed out error.
AmazonKeyspaces-190
AmazonKeyspaces.pdf
190
a Connection error. If you try to connect to an Amazon Keyspaces table and cqlsh hasn't been configured properly, the connection fails. The following section provides examples of the most common configuration issues that result in connection errors when you're trying to establish a connection using cqlsh. Note If you're trying to connect to Amazon Keyspaces from a VPC, additional permissions are required. To successfully configure a connection using VPC endpoints, follow the steps in the the section called “Connecting with VPC endpoints”. You're trying to connect to Amazon Keyspaces using cqlsh, but you get a connection timed out error. This might be the case if you didn't supply the correct port, which results in the following error. # cqlsh cassandra.us-east-1.amazonaws.com 9140 -u "USERNAME" -p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.199': error(None, "Tried connecting to [('3.234.248.199', 9140)]. Last error: timed out")}) To resolve this issue, verify that you're using port 9142 for the connection. Connection errors 612 Amazon Keyspaces (for Apache Cassandra) Developer Guide You're trying to connect to Amazon Keyspaces using cqlsh, but you get a Name or service not known error. This might be the case if you used an endpoint that is misspelled or doesn't exist. In the following example, the name of the endpoint is misspelled. # cqlsh cassandra.us-east-1.amazon.com 9142 -u "USERNAME" -p "PASSWORD" --ssl Traceback (most recent call last): File "/usr/bin/cqlsh.py", line 2458, in >module> main(*read_options(sys.argv[1:], os.environ)) File "/usr/bin/cqlsh.py", line 2436, in main encoding=options.encoding) File "/usr/bin/cqlsh.py", line 484, in __init__ load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]), File "/usr/share/cassandra/lib/cassandra-driver-internal-only-3.11.0-bb96859b.zip/ cassandra-driver-3.11.0-bb96859b/cassandra/policies.py", line 417, in __init__ socket.gaierror: [Errno -2] Name or service not known To resolve this issue when you're using public endpoints to connect, select an available endpoint from the section called “Service endpoints”, and verify that the name of the endpoint doesn't have any errors. If you're using VPC endpoints to connect, verify that the VPC endpoint information is correct in your cqlsh configuration. You're trying to connect to Amazon Keyspaces using cqlsh, but you receive an OperationTimedOut error. Amazon Keyspaces requires that SSL is enabled for connections to ensure strong security. The SSL parameter might be missing if you receive the following error. # cqlsh cassandra.us-east-1.amazonaws.com -u "USERNAME" -p "PASSWORD" Connection error: ('Unable to connect to any servers', {'3.234.248.192': OperationTimedOut('errors=Timed out creating connection (5 seconds), last_host=None',)}) # To resolve this issue, add the following flag to the cqlsh connection command. --ssl You're trying to connect to Amazon Keyspaces using cqlsh, and you receive a SSL transport factory requires a valid certfile to be specified error. Errors connecting to an Amazon Keyspaces endpoint 613 Amazon Keyspaces (for Apache Cassandra) Developer Guide In this case, the path to the SSL/TLS certificate is missing, which results in the following error. # cat .cassandra/cqlshrc [connection] port = 9142 factory = cqlshlib.ssl.ssl_transport_factory # # cqlsh cassandra.us-east-1.amazonaws.com -u "USERNAME" -p "PASSWORD" --ssl Validation is enabled; SSL transport factory requires a valid certfile to be specified. Please provide path to the certfile in [ssl] section as 'certfile' option in / root/.cassandra/cqlshrc (or use [certfiles] section) or set SSL_CERTFILE environment variable. # To resolve this issue, add the path to the certfile on your computer. certfile = path_to_file/sf-class2-root.crt You're trying to connect to Amazon Keyspaces using cqlsh, but you receive a No such file or directory error. This might be the case if the path to the certificate file on your computer is wrong, which results in the following error. # cat .cassandra/cqlshrc [connection] port = 9142 factory = cqlshlib.ssl.ssl_transport_factory [ssl] validate = true certfile = /root/wrong_path/sf-class2-root.crt # # cqlsh cassandra.us-east-1.amazonaws.com -u "USERNAME" -p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.192': IOError(2, 'No such file or directory')}) # Errors connecting to an Amazon Keyspaces endpoint 614 Amazon Keyspaces (for Apache Cassandra) Developer Guide To resolve this issue, verify that the path to the certfile on your computer is correct. You're trying to connect to Amazon Keyspaces using cqlsh, but you receive a [X509] PEM lib error. This might be the case if the SSL/TLS certificate file sf-class2-root.crt is not valid, which results in the following error. # cqlsh cassandra.us-east-1.amazonaws.com -u "USERNAME" -p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.241': error(185090057, u"Tried connecting to [('3.234.248.241', 9142)]. Last error: [X509] PEM lib (_ssl.c:3063)")}) # To resolve this issue, download the Starfield digital certificate using the following command. Save sf-class2-root.crt locally or in your home directory. curl https://certs.secureserver.net/repository/sf-class2-root.crt -O You're trying to connect to Amazon Keyspaces using cqlsh, but you receive an unknown SSL error. This might be the case if the SSL/TLS certificate file sf-class2-root.crt is empty, which results in the following error. # cqlsh cassandra.us-east-1.amazonaws.com -u "USERNAME" -p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.220': error(0, u"Tried connecting to [('3.234.248.220', 9142)]. Last error: unknown error (_ssl.c:3063)")}) # To resolve this issue, download
AmazonKeyspaces-191
AmazonKeyspaces.pdf
191
Last error: [X509] PEM lib (_ssl.c:3063)")}) # To resolve this issue, download the Starfield digital certificate using the following command. Save sf-class2-root.crt locally or in your home directory. curl https://certs.secureserver.net/repository/sf-class2-root.crt -O You're trying to connect to Amazon Keyspaces using cqlsh, but you receive an unknown SSL error. This might be the case if the SSL/TLS certificate file sf-class2-root.crt is empty, which results in the following error. # cqlsh cassandra.us-east-1.amazonaws.com -u "USERNAME" -p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.220': error(0, u"Tried connecting to [('3.234.248.220', 9142)]. Last error: unknown error (_ssl.c:3063)")}) # To resolve this issue, download the Starfield digital certificate using the following command. Save sf-class2-root.crt locally or in your home directory. curl https://certs.secureserver.net/repository/sf-class2-root.crt -O You're trying to connect to Amazon Keyspaces using cqlsh, but you receive a SSL: CERTIFICATE_VERIFY_FAILED error. This might be the case if the SSL/TLS certificate file could not be verified, which results in the following error. Errors connecting to an Amazon Keyspaces endpoint 615 Amazon Keyspaces (for Apache Cassandra) Developer Guide Connection error: ('Unable to connect to any servers', {'3.234.248.223': error(1, u"Tried connecting to [('3.234.248.223', 9142)]. Last error: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:727)")}) To resolve this issue, download the certificate file again using the following command. Save sf- class2-root.crt locally or in your home directory. curl https://certs.secureserver.net/repository/sf-class2-root.crt -O You're trying to connect to Amazon Keyspaces using cqlsh, but you're receiving a Last error: timed out error. This might be the case if you didn't configure an outbound rule for Amazon Keyspaces in your Amazon EC2 security group, which results in the following error. # cqlsh cassandra.us-east-1.amazonaws.com 9142 -u "USERNAME" -p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.206': error(None, "Tried connecting to [('3.234.248.206', 9142)]. Last error: timed out")}) # To confirm that this issue is caused by the configuration of the Amazon EC2 instance and not cqlsh, you can try to connect to your keyspace using the AWS CLI, for example with the following command. aws keyspaces list-tables --keyspace-name 'my_keyspace' If this command also times out, the Amazon EC2 instance is not correctly configured. To confirm that you have sufficient permissions to access Amazon Keyspaces, you can use the AWS CloudShell to connect with cqlsh. If that connections gets established, you need to configure the Amazon EC2 instance. To resolve this issue, confirm that your Amazon EC2 instance has an outbound rule that allows traffic to Amazon Keyspaces. If that is not the case, you need to create a new security group for the EC2 instance, and add a rule that allows outbound traffic to Amazon Keyspaces resources. To update the outbound rule to allow traffic to Amazon Keyspaces, choose CQLSH/CASSANDRA from the Type drop-down menu. After creating the new security group with the outbound traffic rule, you need to add it to the instance. Select the instance and then choose Actions, then Security, and then Change security Errors connecting to an Amazon Keyspaces endpoint 616 Amazon Keyspaces (for Apache Cassandra) Developer Guide groups. Add the new security group with the outbound rule, but make sure that the default group also remains available. For more information about how to view and edit EC2 outbound rules, see Add rules to a security group in the Amazon EC2 User Guide. You're trying to connect to Amazon Keyspaces using cqlsh, but you receive an Unauthorized error. This might be the case if you're missing Amazon Keyspaces permissions in the IAM user policy, which results in the following error. # cqlsh cassandra.us-east-1.amazonaws.com 9142 -u "testuser-at-12345678910" -p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.241': AuthenticationFailed('Failed to authenticate to 3.234.248.241: Error from server: code=2100 [Unauthorized] message="User arn:aws:iam::12345678910:user/testuser has no permissions."',)}) # To resolve this issue, ensure that the IAM user testuser-at-12345678910 has permissions to access Amazon Keyspaces. For examples of IAM policies that grant access to Amazon Keyspaces, see the section called “Identity-based policy examples”. For troubleshooting guidance that's specific to IAM access, see the section called “Troubleshooting”. You're trying to connect to Amazon Keyspaces using cqlsh, but you receive a Bad credentials error. This might be the case if the user name or password is wrong, which results in the following error. # cqlsh cassandra.us-east-1.amazonaws.com 9142 -u "USERNAME" -p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.248': AuthenticationFailed('Failed to authenticate to 3.234.248.248: Error from server: code=0100 [Bad credentials] message="Provided username USERNAME and/or password are incorrect"',)}) # To resolve this issue, verify that the USERNAME and PASSWORD in your code match the user name and password you obtained when you generated service-specific credentials. Errors connecting to an Amazon Keyspaces endpoint 617 Amazon Keyspaces (for Apache Cassandra) Developer Guide Important If you continue to see errors when trying to connect with cqlsh, rerun the command with the --debug option and include the detailed output when contacting Support.
AmazonKeyspaces-192
AmazonKeyspaces.pdf
192
-p "PASSWORD" --ssl Connection error: ('Unable to connect to any servers', {'3.234.248.248': AuthenticationFailed('Failed to authenticate to 3.234.248.248: Error from server: code=0100 [Bad credentials] message="Provided username USERNAME and/or password are incorrect"',)}) # To resolve this issue, verify that the USERNAME and PASSWORD in your code match the user name and password you obtained when you generated service-specific credentials. Errors connecting to an Amazon Keyspaces endpoint 617 Amazon Keyspaces (for Apache Cassandra) Developer Guide Important If you continue to see errors when trying to connect with cqlsh, rerun the command with the --debug option and include the detailed output when contacting Support. I can't connect to Amazon Keyspaces using a Cassandra client driver The following sections shows the most common errors when connecting with a Cassandra client driver. You're trying to connect to an Amazon Keyspaces table using the DataStax Java driver, but you receive an NodeUnavailableException error. If the connection on which the request is attempted is broken, it results in the following error. [com.datastax.oss.driver.api.core.NodeUnavailableException: No connection was available to Node(endPoint=vpce-22ff22f2f22222fff-aa1bb234.cassandra.us- west-2.vpce.amazonaws.com/11.1.1111.222:9142, hostId=1a23456b- c77d-8888-9d99-146cb22d6ef6, hashCode=123ca4567)] To resolve this issue, find the heartbeat value and lower it to 30 seconds if it's higher. advanced.heartbeat.interval = 30 seconds Then look for the associated time out and ensure the value is set to at least 5 seconds. advanced.connection.init-query-timeout = 5 seconds You're trying to connect to an Amazon Keyspaces table using a driver and the SigV4 plugin, but you receive an AttributeError error. If the credentials are not correctly configured, it results in the following error. cassandra.cluster.NoHostAvailable: (‘Unable to connect to any servers’, {‘44.234.22.154:9142’: AttributeError(“‘NoneType’ object has no attribute ‘access_key’“)}) To resolve this issue, verify that you're passing the credentials associated with your IAM user or role when using the SigV4 plugin. The SigV4 plugin requires the following credentials. Errors connecting to an Amazon Keyspaces endpoint 618 Amazon Keyspaces (for Apache Cassandra) Developer Guide • AWS_ACCESS_KEY_ID – Specifies an AWS access key associated with an IAM user or role. • AWS_SECRET_ACCESS_KEY– Specifies the secret key associated with the access key. This is essentially the "password" for the access key. To learn more about access keys and the SigV4 plugin, see the section called “Create IAM credentials for AWS authentication”. You're trying to connect to an Amazon Keyspaces table using a driver, but you receive a PartialCredentialsError error. If the AWS_SECRET_ACCESS_KEY is missing, it can result in the following error. cassandra.cluster.NoHostAvailable: (‘Unable to connect to any servers’, {‘44.234.22.153:9142’: PartialCredentialsError(‘Partial credentials found in config-file, missing: aws_secret_access_key’)}) To resolve this issue, verify that you're passing both the AWS_ACCESS_KEY_ID and the AWS_SECRET_ACCESS_KEY when using the SigV4 plugin. To learn more about access keys and the SigV4 plugin, see the section called “Create IAM credentials for AWS authentication”. You're trying to connect to an Amazon Keyspaces table using a driver, but you receive an Invalid signature error. This might be the case if any of the components required for the signature are wrong or not correctly defined for the session. • AWS_ACCESS_KEY_ID • AWS_SECRET_ACCESS_KEY • AWS_DEFAULT_REGION The following error is an examples of invalid access keys. cassandra.cluster.NoHostAvailable: (‘Unable to connect to any servers’, {‘11.234.11.234:9142’: AuthenticationFailed(‘Failed to authenticate to 11.234.11.234:9142: Error from server: code=0100 [Bad credentials] message=“Authentication failure: Invalid signature”’)}) Errors connecting to an Amazon Keyspaces endpoint 619 Amazon Keyspaces (for Apache Cassandra) Developer Guide To resolve this issue, verify that the access keys and the AWS Region have been correctly configured for the SigV4 plugin to access Amazon Keyspaces. To learn more about access keys and the SigV4 plugin, see the section called “Create IAM credentials for AWS authentication”. My VPC endpoint connection doesn't work properly You're trying to connect to Amazon Keyspaces using VPC endpoints, but you're receiving token map errors or you are experiencing low throughput. This might be the case if the VPC endpoint connection isn't correctly configured. To resolve these issues, verify the following configuration details. To follow a step-by-step tutorial to learn how to configure a connection over interface VPC endpoints for Amazon Keyspaces see the section called “Connecting with VPC endpoints”. 1. Confirm that the IAM entity used to connect to Amazon Keyspaces has read/write access to the user table and read access to the system tables as shown in the following example. { "Version":"2012-10-17", "Statement":[ { "Effect":"Allow", "Action":[ "cassandra:Select", "cassandra:Modify" ], "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } 2. Confirm that the IAM entity used to connect to Amazon Keyspaces has the required read permissions to access the VPC endpoint information on your Amazon EC2 instance as shown in the following example. { "Version":"2012-10-17", Errors connecting to an Amazon Keyspaces endpoint 620 Amazon Keyspaces (for Apache Cassandra) Developer Guide "Statement":[ { "Sid":"ListVPCEndpoints", "Effect":"Allow", "Action":[ "ec2:DescribeNetworkInterfaces", "ec2:DescribeVpcEndpoints" ], "Resource":"*" } ] } Note The managed policies AmazonKeyspacesReadOnlyAccess_v2 and AmazonKeyspacesFullAccess include the required permissions to let Amazon Keyspaces access the Amazon EC2 instance
AmazonKeyspaces-193
AmazonKeyspaces.pdf
193
{ "Version":"2012-10-17", "Statement":[ { "Effect":"Allow", "Action":[ "cassandra:Select", "cassandra:Modify" ], "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } 2. Confirm that the IAM entity used to connect to Amazon Keyspaces has the required read permissions to access the VPC endpoint information on your Amazon EC2 instance as shown in the following example. { "Version":"2012-10-17", Errors connecting to an Amazon Keyspaces endpoint 620 Amazon Keyspaces (for Apache Cassandra) Developer Guide "Statement":[ { "Sid":"ListVPCEndpoints", "Effect":"Allow", "Action":[ "ec2:DescribeNetworkInterfaces", "ec2:DescribeVpcEndpoints" ], "Resource":"*" } ] } Note The managed policies AmazonKeyspacesReadOnlyAccess_v2 and AmazonKeyspacesFullAccess include the required permissions to let Amazon Keyspaces access the Amazon EC2 instance to read information about available interface VPC endpoints. For more information about VPC endpoints, see the section called “Using interface VPC endpoints for Amazon Keyspaces” 3. Confirm that the SSL configuration of the Java driver sets hostname validation to false as shown in this example. hostname-validation = false For more information about driver configuration, see the section called “Step 2: Configure the driver”. 4. To confirm that the VPC endpoint has been configured correctly, you can run the following statement from within your VPC. Note You can't use your local developer environment or the Amazon Keyspaces CQL editor to confirm this configuration, because they use the public endpoint. Errors connecting to an Amazon Keyspaces endpoint 621 Amazon Keyspaces (for Apache Cassandra) Developer Guide SELECT peer FROM system.peers; The output should look similar to this example and return between 2 to 6 nodes with private IP addresses, depending on your VPC setup and AWS Region. peer --------------- 192.0.2.0.15 192.0.2.0.24 192.0.2.0.13 192.0.2.0.7 192.0.2.0.8 (5 rows) I can't connect using cassandra-stress You're trying to connect to Amazon Keyspaces using the cassandra-stress command, but you're receiving an SSL context error. This happens if you try to connect to Amazon Keyspaces, but you don't have the trustStore setup correctly. Amazon Keyspaces requires the use of Transport Layer Security (TLS) to help secure connections with clients. In this case, you see the following error. Error creating the initializing the SSL Context To resolve this issue, follow the instructions to set up a trustStore as shown in this topic the section called “Before you begin”. Once the trustStore is setup, you should be able to connect with the following command. ./cassandra-stress user profile=./profile.yaml n=100 "ops(insert=1,select=1)" cl=LOCAL_QUORUM -node "cassandra.eu-north-1.amazonaws.com" -port native=9142 -transport ssl-alg="PKIX" truststore="./cassandra_truststore.jks" truststore- password="trustStore_pw" -mode native cql3 user="user_name" password="password" Errors connecting to an Amazon Keyspaces endpoint 622 Amazon Keyspaces (for Apache Cassandra) Developer Guide I can't connect using IAM identities You're trying to connect to an Amazon Keyspaces table using an IAM identity, but you're receiving an Unauthorized error. This happens if you try to connect to an Amazon Keyspaces table using an IAM identity (for example, an IAM user) without implementing the policy and giving the user the required permissions first. In this case, you see the following error. Connection error: ('Unable to connect to any servers', {'3.234.248.202': AuthenticationFailed('Failed to authenticate to 3.234.248.202: Error from server: code=2100 [Unauthorized] message="User arn:aws:iam::1234567890123:user/testuser has no permissions."',)}) To resolve this issue, verify the permissions of the IAM user. To connect with a standard driver, a user must have at least SELECT access to the system tables, because most drivers read the system keyspaces/tables when they establish the connection. For example IAM policies that grant access to Amazon Keyspaces system and user tables, see the section called “Accessing Amazon Keyspaces tables”. To review the troubleshooting section specific to IAM, see the section called “Troubleshooting”. I'm trying to import data with cqlsh and the connection to my Amazon Keyspaces table is lost You're trying to upload data to Amazon Keyspaces with cqlsh, but you're receiving connection errors. The connection to Amazon Keyspaces fails after the cqlsh client receives three consecutive errors of any type from the server. The cqlsh client fails with the following message. Failed to import 1 rows: NoHostAvailable - , will retry later, attempt 3 of 100 To resolve this error, you need to make sure that the data to be imported matches the table schema in Amazon Keyspaces. Review the import file for parsing errors. You can try using a single row of data by using an INSERT statement to isolate the error. The client automatically attempts to reestablish the connection. Errors connecting to an Amazon Keyspaces endpoint 623 Amazon Keyspaces (for Apache Cassandra) Developer Guide Troubleshooting capacity management errors in Amazon Keyspaces Having trouble with serverless capacity? Here are some common issues and how to resolve them. Serverless capacity errors This section outlines how to recognize errors related to serverless capacity management and how to resolve them. For example, you might observe insufficient capacity events when your application exceeds your provisioned throughput capacity. Because Apache Cassandra is cluster-based software that is designed to run on a fleet of nodes, it doesn’t have exception messages related to
AmazonKeyspaces-194
AmazonKeyspaces.pdf
194
to reestablish the connection. Errors connecting to an Amazon Keyspaces endpoint 623 Amazon Keyspaces (for Apache Cassandra) Developer Guide Troubleshooting capacity management errors in Amazon Keyspaces Having trouble with serverless capacity? Here are some common issues and how to resolve them. Serverless capacity errors This section outlines how to recognize errors related to serverless capacity management and how to resolve them. For example, you might observe insufficient capacity events when your application exceeds your provisioned throughput capacity. Because Apache Cassandra is cluster-based software that is designed to run on a fleet of nodes, it doesn’t have exception messages related to serverless features such as throughput capacity. Most drivers only understand the error codes that are available in Apache Cassandra, so Amazon Keyspaces uses that same set of error codes to maintain compatibility. To map Cassandra errors to the underlying capacity events, you can use Amazon CloudWatch to monitor the relevant Amazon Keyspaces metrics. Insufficient-capacity events that result in client- side errors can be categorized into these three groups based on the resource that is causing the event: • Table – If you choose Provisioned capacity mode for a table, and your application exceeds your provisioned throughput, you might observe insufficient-capacity errors. For more information, see the section called “Configure read/write capacity modes”. • Partition – You might experience insufficient-capacity events if traffic against a given partition exceeds 3,000 RCUs or 1,000 WCUs. We recommend distributing traffic uniformly across partitions as a best practice. For more information, see the section called “Data modeling”. • Connection – You might experience insufficient throughput if you exceed the quota for the maximum number of operations per second, per connection. To increase throughput, you can increase the number of default connections when configuring the connection with the driver. To learn how to configure connections for Amazon Keyspaces, see the section called “How to configure connections”. For more information about optimizing connections over VPC endpoints, see the section called “VPC endpoint connections”. To determine which resource is causing the insufficient-capacity event that is returning the client- side error, you can check the dashboard in the Amazon Keyspaces console. By default, the console Capacity management errors 624 Amazon Keyspaces (for Apache Cassandra) Developer Guide provides an aggregated view of the most common capacity and traffic related CloudWatch metrics in the Capacity and related metrics section on the Capacity tab for the table. To create your own dashboard using Amazon CloudWatch, check the following Amazon Keyspaces metrics. • PerConnectionRequestRateExceeded – Requests to Amazon Keyspaces that exceed the quota for the per-connection request rate. Each client connection to Amazon Keyspaces can support up to 3000 CQL requests per second. You can perform more than 3000 requests per second by creating multiple connections. • ReadThrottleEvents – Requests to Amazon Keyspaces that exceed the read capacity for a table. • StoragePartitionThroughputCapacityExceeded – Requests to an Amazon Keyspaces storage partition that exceed the throughput capacity of the partition. Amazon Keyspaces storage partitions can support up to 1000 WCU/WRU per second and 3000 RCU/RRU per second. To mitigate these exceptions, we recommend that you review your data model to distribute read/write traffic across more partitions. • WriteThrottleEvents – Requests to Amazon Keyspaces that exceed the write capacity for a table. To learn more about CloudWatch, see the section called “Monitoring with CloudWatch”. For a list of all available CloudWatch metrics for Amazon Keyspaces, see the section called “Metrics and dimensions”. Note To get started with a custom dashboard that shows all commonly observed metrics for Amazon Keyspaces, you can use a prebuilt CloudWatch template available on GitHub in the AWS samples repository. Topics • I'm receiving NoHostAvailable insufficient capacity errors from my client driver • I'm receiving write timeout errors during data import • I can't see the actual storage size of a keyspace or table Serverless capacity errors 625 Amazon Keyspaces (for Apache Cassandra) Developer Guide I'm receiving NoHostAvailable insufficient capacity errors from my client driver You're seeing Read_Timeout or Write_Timeout exceptions for a table. Repeatedly trying to write to or read from an Amazon Keyspaces table with insufficient capacity can result in client-side errors that are specific to the driver. Use CloudWatch to monitor your provisioned and actual throughput metrics, and insufficient capacity events for the table. For example, a read request that doesn’t have enough throughput capacity fails with a Read_Timeout exception and is posted to the ReadThrottleEvents metric. A write request that doesn’t have enough throughput capacity fails with a Write_Timeout exception and is posted to the WriteThrottleEvents metric. For more information about these metrics, see the section called “Metrics and dimensions”. To resolve these issues, consider one of the following options. • Increase the provisioned throughput for the table, which is the maximum amount of throughput capacity an application can consume. For more information, see the section called
AmazonKeyspaces-195
AmazonKeyspaces.pdf
195
insufficient capacity events for the table. For example, a read request that doesn’t have enough throughput capacity fails with a Read_Timeout exception and is posted to the ReadThrottleEvents metric. A write request that doesn’t have enough throughput capacity fails with a Write_Timeout exception and is posted to the WriteThrottleEvents metric. For more information about these metrics, see the section called “Metrics and dimensions”. To resolve these issues, consider one of the following options. • Increase the provisioned throughput for the table, which is the maximum amount of throughput capacity an application can consume. For more information, see the section called “Read capacity units and write capacity units”. • Let the service manage throughput capacity on your behalf with automatic scaling. For more information, see the section called “Manage throughput capacity with auto scaling”. • Chose On-demand capacity mode for the table. For more information, see the section called “Configure on-demand capacity mode”. If you need to increase the default capacity quota for your account, see Quotas. You're seeing errors related to exceeded partition capacity. When you're seeing the error StoragePartitionThroughputCapacityExceeded the partition capacity is temporarily exceeded. This might be automatically handled by adaptive capacity or on- demand capacity. We recommend reviewing your data model to distribute read/write traffic across more partitions to mitigate these errors. Amazon Keyspaces storage partitions can support up to 1000 WCU/WRU per second and 3000 RCU/RRU per second. To learn more about how to improve your data model to distribute read/write traffic across more partitions, see the section called “Data modeling”. Write_Timeout exceptions can also be caused by an elevated rate of concurrent write operations that include static and nonstatic data in the same logical partition. If traffic is expected to run multiple concurrent write operations that include static and nonstatic data within the same logical Serverless capacity errors 626 Amazon Keyspaces (for Apache Cassandra) Developer Guide partition, we recommend writing static and nonstatic data separately. Writing the data separately also helps to optimize the throughput costs. You're seeing errors related to exceeded connection request rate. You're seeing PerConnectionRequestRateExceeded due to one of the following causes. • You might not have enough connections configured per session. • You might be getting fewer connections than available peers, because you don't have the VPC endpoint permissions configured correctly. For more information about VPC endpoint policies, see the section called “Using interface VPC endpoints for Amazon Keyspaces”. • If you're using a 4.x driver, check to see if you have hostname validation enabled. The driver enables TLS hostname verification by default. This configuration leads to Amazon Keyspaces appearing as a single-node cluster to the driver. We recommend that you turn hostname verification off. We recommend that you follow these best practices to ensure that your connections and throughput are optimized: • Configure CQL query throughput tuning. Amazon Keyspaces supports up to 3,000 CQL queries per TCP connection per second, but there is no limit on the number of connections a driver can establish. Most open-source Cassandra drivers establish a connection pool to Cassandra and load balance queries over that pool of connections. Amazon Keyspaces exposes 9 peer IP addresses to drivers. The default behavior of most drivers is to establish a single connection to each peer IP address. Therefore, the maximum CQL query throughput of a driver using the default settings will be 27,000 CQL queries per second. To increase this number, we recommend that you increase the number of connections per IP address that your driver is maintaining in its connection pool. For example, setting the maximum connections per IP address to 2 will double the maximum throughput of your driver to 54,000 CQL queries per second. • Optimize your single-node connections. By default, most open-source Cassandra drivers establish one or more connections to every IP address advertised in the system.peers table when establishing a session. However, certain Serverless capacity errors 627 Amazon Keyspaces (for Apache Cassandra) Developer Guide configurations can lead to a driver connecting to a single Amazon Keyspaces IP address. This can happen if the driver is attempting SSL hostname validation of the peer nodes (for example, DataStax Java drivers), or when it's connecting through a VPC endpoint. To get the same availability and performance as a driver with connections to multiple IP addresses, we recommend that you do the following: • Increase the number of connections per IP to 9 or higher depending on the desired client throughput. • Create a custom retry policy that ensures that retries are run against the same node. For more information, see the section called “How to configure retry policies”. • If you use VPC endpoints, grant the IAM entity that is used to connect to Amazon Keyspaces access permissions to query your VPC for the endpoint and network interface information. This improves load balancing and increases read/write
AmazonKeyspaces-196
AmazonKeyspaces.pdf
196
as a driver with connections to multiple IP addresses, we recommend that you do the following: • Increase the number of connections per IP to 9 or higher depending on the desired client throughput. • Create a custom retry policy that ensures that retries are run against the same node. For more information, see the section called “How to configure retry policies”. • If you use VPC endpoints, grant the IAM entity that is used to connect to Amazon Keyspaces access permissions to query your VPC for the endpoint and network interface information. This improves load balancing and increases read/write throughput. For more information, see ???. I'm receiving write timeout errors during data import You're receiving a timeout error when uploading data using the cqlsh COPY command. Failed to import 1 rows: WriteTimeout - Error from server: code=1100 [Coordinator node timed out waiting for replica nodes' responses] message="Operation timed out - received only 0 responses." info={'received_responses': 0, 'required_responses': 2, 'write_type': 'SIMPLE', 'consistency': 'LOCAL_QUORUM'}, will retry later, attempt 1 of 100 Amazon Keyspaces uses the ReadTimeout and WriteTimeout exceptions to indicate when a write request fails due to insufficient throughput capacity. To help diagnose insufficient capacity exceptions, Amazon Keyspaces publishes the following metrics in Amazon CloudWatch. • WriteThrottleEvents • ReadThrottledEvents • StoragePartitionThroughputCapacityExceeded To resolve insufficient-capacity errors during a data load, lower the write rate per worker or the total ingest rate, and then retry to upload the rows. For more information, see the section called “Step 4: Configure cqlsh COPY FROM settings”. For a more robust data upload option, consider Serverless capacity errors 628 Amazon Keyspaces (for Apache Cassandra) Developer Guide using DSBulk, which is available from the GitHub repository. For step-by-step instructions, see the section called “Loading data using DSBulk”. I can't see the actual storage size of a keyspace or table You can't see the actual storage size of the keyspace or table. To learn more about the storage size of your table, see the section called “Evaluate your costs at the table level”. You can also estimate storage size by starting to calculate the row size in a table. Detailed instructions for calculating the row size are available at the section called “Estimate row size”. Troubleshooting data definition language errors in Amazon Keyspaces Having trouble creating resources? Here are some common issues and how to resolve them. Data definition language errors Amazon Keyspaces performs data definition language (DDL) operations asynchronously—for example, creating and deleting keyspaces and tables. If an application is trying to use the resource before it's ready, the operation fails. You can monitor the creation status of new keyspaces and tables in the AWS Management Console, which indicates when a keyspace or table is pending or active. You can also monitor the creation status of a new keyspace or table programmatically by querying the system schema table. A keyspace or table becomes visible in the system schema when it's ready for use. Note To optimize the creation of keyspaces using AWS CloudFormation, you can use this utility to convert CQL scripts into CloudFormation templates. The tool is available from the GitHub repository. Topics • I created a new keyspace, but I can't view or access it • I created a new table, but I can't view or access it Data definition language errors 629 Amazon Keyspaces (for Apache Cassandra) Developer Guide • I'm trying to restore a table using Amazon Keyspaces point-in-time recovery (PITR), but the restore fails • I'm trying to use INSERT/UPDATE to edit custom Time to Live (TTL) settings, but the operation fails • I'm trying to upload data to my Amazon Keyspaces table and I get an error about exceeding the number of columns • I'm trying to delete data in my Amazon Keyspaces table and the deletion fails for the range I created a new keyspace, but I can't view or access it You're receiving errors from your application that is trying to access a new keyspace. If you try to access a newly created Amazon Keyspaces keyspace that is still being created asynchronously, you will get an error. The following error is an example. InvalidRequest: Error from server: code=2200 [Invalid query] message="unconfigured keyspace mykeyspace" The recommended design pattern to check when a new keyspace is ready for use is to poll the Amazon Keyspaces system schema tables (system_schema_mcs.*). For more information, see the section called “Check keyspace creation status”. I created a new table, but I can't view or access it You're receiving errors from your application that is trying to access a new table. If you try to access a newly created Amazon Keyspaces table that is still being created asynchronously, you will get an error. For example, trying to query a table that isn't available yet fails with an unconfigured table error. InvalidRequest: Error from server: code=2200 [Invalid
AmazonKeyspaces-197
AmazonKeyspaces.pdf
197
when a new keyspace is ready for use is to poll the Amazon Keyspaces system schema tables (system_schema_mcs.*). For more information, see the section called “Check keyspace creation status”. I created a new table, but I can't view or access it You're receiving errors from your application that is trying to access a new table. If you try to access a newly created Amazon Keyspaces table that is still being created asynchronously, you will get an error. For example, trying to query a table that isn't available yet fails with an unconfigured table error. InvalidRequest: Error from server: code=2200 [Invalid query] message="unconfigured table mykeyspace.mytable" Trying to view the table with sync_table() fails with a KeyError. KeyError: 'mytable' Data definition language errors 630 Amazon Keyspaces (for Apache Cassandra) Developer Guide The recommended design pattern to check when a new table is ready for use is to poll the Amazon Keyspaces system schema tables (system_schema_mcs.*). This is the example output for a table that is being created. user-at-123@cqlsh:system_schema_mcs> select table_name,status from system_schema_mcs.tables where keyspace_name='example_keyspace' and table_name='example_table'; table_name | status ------------+---------- example_table | CREATING (1 rows) This is the example output for a table that is active. user-at-123@cqlsh:system_schema_mcs> select table_name,status from system_schema_mcs.tables where keyspace_name='example_keyspace' and table_name='example_table'; table_name | status ------------+---------- example_table | ACTIVE (1 rows) For more information, see the section called “Check table creation status”. I'm trying to restore a table using Amazon Keyspaces point-in-time recovery (PITR), but the restore fails If you're trying to restore an Amazon Keyspaces table with point-in-time recovery (PITR), and you see the restore process begin but not complete successfully, you might not have configured all of the required permissions that are needed by the restore process for this particular table. Data definition language errors 631 Amazon Keyspaces (for Apache Cassandra) Developer Guide In addition to user permissions, Amazon Keyspaces might require permissions to perform actions during the restore process on your principal's behalf. This is the case if the table is encrypted with a customer managed key, or if you're using IAM policies that restrict incoming traffic. For example, if you're using condition keys in your IAM policy to restrict source traffic to specific endpoints or IP ranges, the restore operation fails. To allow Amazon Keyspaces to perform the table restore operation on your principal's behalf, you must add an aws:ViaAWSService global condition key in the IAM policy. For more information about permissions to restore tables, see the section called “Configure IAM permissions for restore”. I'm trying to use INSERT/UPDATE to edit custom Time to Live (TTL) settings, but the operation fails If you're trying to insert or update a custom TTL value, the operation might fail with the following error. TTL is not yet supported. To specify custom TTL values for rows or columns by using INSERT or UPDATE operations, you must first enable TTL for the table. You can enable TTL for a table using the ttl custom property. For more information about enabling custom TTL settings for tables, see the section called “Update table custom TTL”. I'm trying to upload data to my Amazon Keyspaces table and I get an error about exceeding the number of columns You're uploading data and have exceeded the number of columns that can be updated simultaneously. This error occurs when your table schema exceeds the maximum size of 350 KB. For more information, see Quotas. I'm trying to delete data in my Amazon Keyspaces table and the deletion fails for the range You're trying to delete data by partition key and receive a range delete error. Data definition language errors 632 Amazon Keyspaces (for Apache Cassandra) Developer Guide This error occurs when you're trying to delete more than 1,000 rows in one delete operation. Range delete requests are limited by the amount of items that can be deleted in a single range. For more information, see the section called “Range delete”. To delete more than 1,000 rows within a single partition, consider the following options. • Delete by partition – If the majority of partitions are under 1,000 rows, you can attempt to delete data by partition. If the partitions contain more than 1,000 rows, attempt to delete by the clustering column instead. • Delete by clustering column – If your model contains multiple clustering columns, you can use the column hierarchy to delete multiple rows. Clustering columns are a nested structure, and you can delete many rows by operating against the top-level column. • Delete by individual row – You can iterate through the rows and delete each row by its full primary key (partition columns and clustering columns). • As a best practice, consider splitting your rows across partitions – In Amazon Keyspaces, we recommend that you distribute your throughput across table partitions. This distributes data and access evenly across physical resources, which provides the
AmazonKeyspaces-198
AmazonKeyspaces.pdf
198
column – If your model contains multiple clustering columns, you can use the column hierarchy to delete multiple rows. Clustering columns are a nested structure, and you can delete many rows by operating against the top-level column. • Delete by individual row – You can iterate through the rows and delete each row by its full primary key (partition columns and clustering columns). • As a best practice, consider splitting your rows across partitions – In Amazon Keyspaces, we recommend that you distribute your throughput across table partitions. This distributes data and access evenly across physical resources, which provides the best throughput. For more information, see the section called “Data modeling”. Consider also the following recommendations when you're planning delete operations for heavy workloads. • With Amazon Keyspaces, partitions can contain a virtually unbounded number of rows. This allows you to scale partitions “wider” than the traditional Cassandra guidance of 100 MB. It’s not uncommon for time series or ledgers to grow over a gigabyte of data over time. • With Amazon Keyspaces, there are no compaction strategies or tombstones to consider when you have to perform delete operations for heavy workloads. You can delete as much data as you want without impacting read performance. Data definition language errors 633 Amazon Keyspaces (for Apache Cassandra) Developer Guide Monitoring Amazon Keyspaces (for Apache Cassandra) Monitoring is an important part of maintaining the reliability, availability, and performance of Amazon Keyspaces and your other AWS solutions. AWS provides the following monitoring tools to watch Amazon Keyspaces, report when something is wrong, and take automatic actions when appropriate: • Amazon Keyspaces offers a preconfigured dashboard in the AWS Management Console showing the latency and errors aggregated across all tables in the account. • Amazon CloudWatch monitors your AWS resources and the applications you run on AWS in real time. You can collect and track metrics with customized dashboards. For example, you can create a baseline for normal Amazon Keyspaces performance in your environment by measuring performance at various times and under different load conditions. As you monitor Amazon Keyspaces, store historical monitoring data so that you can compare it with current performance data, identify normal performance patterns and performance anomalies, and devise methods to address issues. To establish a baseline, you should, at a minimum, monitor for system errors. For more information, see the Amazon CloudWatch User Guide. • Amazon CloudWatch alarms monitor a single metric over a time period that you specify, and perform one or more actions based on the value of the metric relative to a given threshold over a number of time periods. For example if you use Amazon Keyspaces in provisioned mode with application auto scaling, the action is a notification sent by the Amazon Simple Notification Service (Amazon SNS) to evaluate an Application Auto Scaling policy. CloudWatch alarms do not invoke actions simply because they are in a particular state. The state must have changed and been maintained for a specified number of periods. For more information, see Monitoring Amazon Keyspaces with Amazon CloudWatch. • Amazon CloudWatch Logs enables you to monitor, store, and access your log files from Amazon Keyspaces tables, CloudTrail, and other sources. CloudWatch Logs can monitor information in the log files and notify you when certain thresholds are met. You can also archive your log data in highly durable storage. For more information, see the Amazon CloudWatch Logs User Guide. • AWS CloudTrail captures API calls and related events made by or on behalf of your AWS account and delivers the log files to an Amazon S3 bucket that you specify. You can identify which users and accounts called AWS, the source IP address from which the calls were made, and when the calls occurred. For more information, see the AWS CloudTrail User Guide. 634 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon EventBridge is a serverless event bus service that makes it easy to connect your applications with data from a variety of sources. EventBridge delivers a stream of real-time data from your own applications, Software-as-a-Service (SaaS) applications, and AWS services and routes that data to targets such as Lambda. This enables you to monitor events that happen in services, and build event-driven architectures. For more information, see the Amazon EventBridge User Guide. Topics • Monitoring Amazon Keyspaces with Amazon CloudWatch • Logging Amazon Keyspaces API calls with AWS CloudTrail Monitoring Amazon Keyspaces with Amazon CloudWatch You can monitor Amazon Keyspaces using Amazon CloudWatch, which collects raw data and processes it into readable, near real-time metrics. These statistics are kept for 15 months, so that you can access historical information and gain a better perspective on how your web application or service is performing. You can also set alarms that watch for certain thresholds, and send notifications or take actions when those thresholds
AmazonKeyspaces-199
AmazonKeyspaces.pdf
199
architectures. For more information, see the Amazon EventBridge User Guide. Topics • Monitoring Amazon Keyspaces with Amazon CloudWatch • Logging Amazon Keyspaces API calls with AWS CloudTrail Monitoring Amazon Keyspaces with Amazon CloudWatch You can monitor Amazon Keyspaces using Amazon CloudWatch, which collects raw data and processes it into readable, near real-time metrics. These statistics are kept for 15 months, so that you can access historical information and gain a better perspective on how your web application or service is performing. You can also set alarms that watch for certain thresholds, and send notifications or take actions when those thresholds are met. For more information, see the Amazon CloudWatch User Guide. Note To get started quickly with a preconfigured CloudWatch dashboard showing common metrics for Amazon Keyspaces, you can use an AWS CloudFormation template available from https://github.com/aws-samples/amazon-keyspaces-cloudwatch-cloudformation- templates. Topics • How do I use Amazon Keyspaces metrics? • Amazon Keyspaces metrics and dimensions • Creating CloudWatch alarms to monitor Amazon Keyspaces Monitoring with CloudWatch 635 Amazon Keyspaces (for Apache Cassandra) Developer Guide How do I use Amazon Keyspaces metrics? The metrics reported by Amazon Keyspaces provide information that you can analyze in different ways. The following list shows some common uses for the metrics. These are suggestions to get you started, not a comprehensive list. For more information about metrics and retention, see Metrics. How can I? Relevant metrics How can I determine if any system errors occurred? You can monitor SystemErrors to determine whether any requests resulted in a server error code. Typically, this metric should be equal to zero. If it isn't, you might want to investiga te. How can I compare average provisioned read to consumed To monitor average provisioned read capacity and consumed read capacity read capacity? 1. Set the Period for ConsumedReadCapacityUnits and ProvisionedReadCapacityUnits you want to monitor. to the interval 2. Change the Statistic for ConsumedReadCapaci tyUnits from Average to Sum. 3. Create a new empty Math expression. 4. In the Details section of the new math expression, enter the Id of ConsumedReadCapacityUnits the metric by the CloudWatch PERIOD function of the metric (metric_id/(PERIOD(metric_id)). and divide 5. Unselect ConsumedReadCapacityUnits . You can now compare your average consumed read capacity to your provisioned capacity. For more information on basic arithmetic functions and how to create a time series see Using metric math. Using metrics 636 Amazon Keyspaces (for Apache Cassandra) Developer Guide How can I? Relevant metrics How can I compare average provisioned write to To monitor average provisioned write capacity and consumed write capacity consumed write capacity? 1. Set the Period for ConsumedWriteCapacityUnits and ProvisionedWriteCapacityUnits to the interval you want to monitor. 2. Change the Statistic for ConsumedWriteCapac ityUnits from Average to Sum. 3. Create a new empty Math expression. 4. In the Details section of the new math expression, enter the Id of ConsumedWriteCapacityUnits and divide the metric by the CloudWatch PERIOD function of the metric (metric_id/(PERIOD(metric_id)). 5. Unselect ConsumedWriteCapacityUnits . You can now compare your average consumed write capacity to your provisioned capacity. For more information on basic arithmetic functions and how to create a time series see Using metric math. Amazon Keyspaces metrics and dimensions When you interact with Amazon Keyspaces, it sends the following metrics and dimensions to Amazon CloudWatch. All metrics are aggregated and reported every minute. You can use the following procedures to view the metrics for Amazon Keyspaces. To view metrics using the CloudWatch console Metrics are grouped first by the service namespace, and then by the various dimension combinations within each namespace. 1. Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch/. 2. If necessary, change the Region. On the navigation bar, choose the Region where your AWS resources reside. For more information, see AWS service endpoints. Metrics and dimensions 637 Amazon Keyspaces (for Apache Cassandra) Developer Guide 3. In the navigation pane, choose Metrics. 4. Under the All metrics tab, choose AWS/Cassandra. To view metrics using the AWS CLI • At a command prompt, use the following command. aws cloudwatch list-metrics --namespace "AWS/Cassandra" Amazon Keyspaces metrics and dimensions The metrics and dimensions that Amazon Keyspaces sends to Amazon CloudWatch are listed here. Amazon Keyspaces metrics Amazon CloudWatch aggregates Amazon Keyspaces metrics at one-minute intervals. Not all statistics, such as Average or Sum, are applicable for every metric. However, all of these values are available through the Amazon Keyspaces console, or by using the CloudWatch console, AWS CLI, or AWS SDKs for all metrics. In the following table, each metric has a list of valid statistics that are applicable to that metric. Metric Description AccountMaxTableLevelReads The maximum number of read capacity units that can be used by a table of an account. For on-demand tables this limit caps the maximum read request units a table can use. Units: Count Valid Statistics: • Maximum – The maximum number of read
AmazonKeyspaces-200
AmazonKeyspaces.pdf
200
as Average or Sum, are applicable for every metric. However, all of these values are available through the Amazon Keyspaces console, or by using the CloudWatch console, AWS CLI, or AWS SDKs for all metrics. In the following table, each metric has a list of valid statistics that are applicable to that metric. Metric Description AccountMaxTableLevelReads The maximum number of read capacity units that can be used by a table of an account. For on-demand tables this limit caps the maximum read request units a table can use. Units: Count Valid Statistics: • Maximum – The maximum number of read capacity units that can be used by a table of the account. AccountMaxTableLev elWrites The maximum number of write capacity units that can be used by a table of an account. For on-demand tables Metrics and dimensions 638 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description this limit caps the maximum write request units a table can use. Units: Count Valid Statistics: • Maximum – The maximum number of write capacity units that can be used by a table of the account. AccountProvisioned ReadCapacityUtilization The percentage of provisioned read capacity units utilized by an account. Units: Percent Valid Statistics: • Maximum – The maximum percentage of provisioned read capacity units utilized by the account. • Minimum – The minimum percentage of provisioned read capacity units utilized by the account. • Average – The average percentage of provisioned read capacity units utilized by the account. The metric is published for five-minute intervals. Therefore, if you rapidly adjust the provisioned read capacity units, this statistic might not reflect the true average. Metrics and dimensions 639 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description AccountProvisioned WriteCapacityUtilization The percentage of provisioned write capacity units utilized by an account. BillableTableSizeInBytes Units: Percent Valid Statistics: • Maximum – The maximum percentage of provisioned write capacity units utilized by the account. • Minimum – The minimum percentage of provisioned write capacity units utilized by the account. • Average – The average percentage of provisioned write capacity units utilized by the account. The metric is published for five-minute intervals. Therefore, if you rapidly adjust the provisioned write capacity units, this statistic might not reflect the true average. The billable size of the table in bytes. It is the sum of the encoded size of all rows in the table. This metric helps you track your table storage costs over time. Units: Bytes Dimensions: Keyspace, TableName Valid Statistics: • Maximum – The maximum storage size of the table. • Minimum – The minimum storage size of the table. • Average – The average storage size of the table. This metric is calculated over 4 - 6 hour intervals. Metrics and dimensions 640 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description ConditionalCheckFa The number of failed lightweight transaction (LWT) iledRequests write requests. The INSERT, UPDATE, and DELETE operations let you provide a logical condition that must evaluate to true before the operation can proceed. If this condition evaluates to false, ConditionalCheckFa iledRequests checks that evaluate to false consume write capacity is incremented by one. Condition units based on the size of the row. For more information, see the section called “Estimate capacity consumption of LWT”. Units: Count Dimensions: Keyspace, TableName Valid Statistics: • Minimum • Maximum • Average • SampleCount • Sum Metrics and dimensions 641 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description ConsumedReadCapacityUnits The number of read capacity units consumed over the specified time period. For more information, see Read/ Write capacity mode. Note To understand your average throughput utilizati on per second, use the Sum statistic to calculate the consumed throughput for the one minute period. Then divide the sum by the number of seconds in a minute (60) to calculate the average ConsumedReadCapacityUnits per second (recognizing that this average does not highlight any large but brief spikes in read activity that occurred during that minute). For more informati on on comparing average consumed read capacity to provisioned read capacity, see the section called “Using metrics” Units: Count Dimensions: Keyspace, TableName Valid Statistics: • Minimum – The minimum number of read capacity units consumed by any individual request to the table. • Maximum – The maximum number of read capacity units consumed by any individual request to the table. • Average – The average per-request read capacity consumed. Metrics and dimensions 642 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description Note The Average value is influenced by periods of inactivity where the sample value will be zero. • Sum – The total read capacity units consumed. This is the most useful statistic for the ConsumedR eadCapacityUnits metric. • SampleCount – The number of requests to Amazon Keyspaces, even if no read capacity was consumed. Note The SampleCount value is influenced by periods
AmazonKeyspaces-201
AmazonKeyspaces.pdf
201
– The maximum number of read capacity units consumed by any individual request to the table. • Average – The average per-request read capacity consumed. Metrics and dimensions 642 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description Note The Average value is influenced by periods of inactivity where the sample value will be zero. • Sum – The total read capacity units consumed. This is the most useful statistic for the ConsumedR eadCapacityUnits metric. • SampleCount – The number of requests to Amazon Keyspaces, even if no read capacity was consumed. Note The SampleCount value is influenced by periods of inactivity where the sample value will be zero. Metrics and dimensions 643 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description ConsumedWriteCapac ityUnits The number of write capacity units consumed over the specified time period. You can retrieve the total consumed write capacity for a table. For more informati on, see Read/Write capacity mode. Note To understand your average throughput utilizati on per second, use the Sum statistic to calculate the consumed throughput for the one minute period. Then divide the sum by the number of seconds in a minute (60) to calculate the average ConsumedWriteCapacityUnits (recognizing that this average does not highlight per second any large but brief spikes in write activity that occurred during that minute). For more informati on on comparing average consumed write capacity to provisioned write capacity, see the section called “Using metrics” Units: Count Dimensions: Keyspace, TableName Valid Statistics: • Minimum – The minimum number of write capacity units consumed by any individual request to the table. • Maximum – The maximum number of write capacity units consumed by any individual request to the table. • Average – The average per-request write capacity consumed. Metrics and dimensions 644 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description Note The Average value is influenced by periods of inactivity where the sample value will be zero. • Sum – The total write capacity units consumed. This is the most useful statistic for the ConsumedW riteCapacityUnits metric. • SampleCount – The number of requests to Amazon Keyspaces, even if no write capacity was consumed. Note The SampleCount value is influenced by periods of inactivity where the sample value will be zero. Metrics and dimensions 645 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description MaxProvisionedTabl eReadCapacityUtilization The percentage of provisioned read capacity units utilized by the highest provisioned read table of an account. Units: Percent Valid Statistics: • Maximum – : The maximum percentage of provisioned read capacity units utilized by the highest provisioned read table of the account. • Minimum – The minimum percentage of provisioned read capacity units utilized by the highest provisioned read table of the account. • Average – The average percentage of provisioned read capacity units utilized by the highest provisioned read table of the account. The metric is published for five-minute intervals. Therefore, if you rapidly adjust the provisioned read capacity units, this statistic might not reflect the true average. Metrics and dimensions 646 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description MaxProvisionedTabl eWriteCapacityUtil ization The percentage of provisioned write capacity utilized by the highest provisioned write table of an account. Units: Percent Valid Statistics: • Maximum – The maximum percentage of provisioned write capacity units utilized by the highest provisioned write table of the account. • Minimum – The minimum percentage of provisioned write capacity units utilized by the highest provisioned write table of the account. • Average – The average percentage of provisioned write capacity units utilized by the highest provisioned write table of the account. The metric is published for five-minute intervals. Therefore, if you rapidly adjust the provisioned write capacity units, this statistic might not reflect the true average. Metrics and dimensions 647 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description PerConnectionReque stRateExceeded Requests to Amazon Keyspaces that exceed the per- connection request rate quota. Each client connection to Amazon Keyspaces can support up to 3000 CQL requests per second. Clients can create multiple connections to increase throughput. When you're using multi-Region replication, each replicated write also contributes to this quota. As a best practice, we recommend to increase the number of connections to your tables to avoid PerConnec tionRequestRateExceeded limit to the number of connections you can have in Amazon Keyspaces. errors. There is no Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • SampleCount • Sum Metrics and dimensions 648 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description ProvisionedReadCap acityUnits The number of provisioned read capacity units for a table. The TableName dimension returns the Provision edReadCapacityUnits for the table. Units: Count Dimensions: Keyspace, TableName Valid Statistics: • Minimum – The lowest setting for provisioned read capacity. If you use ALTER TABLE to increase read capacity, this metric shows the lowest
AmazonKeyspaces-202
AmazonKeyspaces.pdf
202
your tables to avoid PerConnec tionRequestRateExceeded limit to the number of connections you can have in Amazon Keyspaces. errors. There is no Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • SampleCount • Sum Metrics and dimensions 648 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description ProvisionedReadCap acityUnits The number of provisioned read capacity units for a table. The TableName dimension returns the Provision edReadCapacityUnits for the table. Units: Count Dimensions: Keyspace, TableName Valid Statistics: • Minimum – The lowest setting for provisioned read capacity. If you use ALTER TABLE to increase read capacity, this metric shows the lowest value of provisioned ReadCapacityUnits during this time period. • Maximum – The highest setting for provisioned read capacity. If you use ALTER TABLE to decrease read capacity, this metric shows the highest value of provisioned ReadCapacityUnits during this time period. • Average – The average provisioned read capacity. The ProvisionedReadCapacityUnits is published at five-minute intervals. Therefore, if you metric rapidly adjust the provisioned read capacity units, this statistic might not reflect the true average. Metrics and dimensions 649 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description ProvisionedWriteCa pacityUnits The number of provisioned write capacity units for a table. The TableName dimension returns the Provision edWriteCapacityUnits for the table. Units: Count Dimensions: Keyspace, TableName Valid Statistics: • Minimum – The lowest setting for provisioned write capacity. If you use ALTER TABLE to increase write capacity, this metric shows the lowest value of provisioned WriteCapacityUnits time period. during this • Maximum – The highest setting for provisioned write capacity. If you use ALTER TABLE to decrease write capacity, this metric shows the highest value of provisioned WriteCapacityUnits time period. during this • Average – The average provisioned write capacity. The ProvisionedWriteCapacityUnits is published at five-minute intervals. Therefore, if you metric rapidly adjust the provisioned write capacity units, this statistic might not reflect the true average. Metrics and dimensions 650 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description ReadThrottleEvents Requests to Amazon Keyspaces that exceed the provisioned read capacity for a table, or account level quotas, request per connection quotas, or partition level quotas. Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • SampleCount • Sum ReplicationLatency This metric only applies to multi-Region keyspaces and measures the time it took to replicate updates, inserts, or deletes from one replica table to another replica table in a multi-Region keyspace. Units: Millisecond Dimensions: TableName, ReceivingRegion Valid Statistics: • Average • Maximum • Minimum Metrics and dimensions 651 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description ReturnedItemCountBySelect The number of rows returned by multi-row SELECT queries during the specified time period. Multi-row SELECT queries are queries which do not contain the fully qualified primary key, such as full table scans and range queries. The number of rows returned is not necessarily the same as the number of rows that were evaluated. For example, suppose that you requested a SELECT * with ALLOW FILTERING on a table that had 100 rows, but specified a WHERE clause that narrowed the results so that only 15 rows were returned. In this case, the response from SELECT would contain a ScanCount of 100 and a Count of 15 returned rows. Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • Minimum • Maximum • Average • SampleCount • Sum Metrics and dimensions 652 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description StoragePartitionTh roughputCapacityExceeded Requests to an Amazon Keyspaces storage partition that exceed the throughput capacity of the partition. Amazon Keyspaces storage partitions can support up to 1000 WCU/WRU per second and 3000 RCU/RRU per second. We recommend reviewing your data model to distribute read/write traffic across more partitions to mitigate these exceptions. Note Logical Amazon Keyspaces partitions can span multiple storage partitions and are virtually unbounded in size. Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • SampleCount • Sum SuccessfulRequestCount The number of successful requests processed over the specified time period. Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • SampleCount Metrics and dimensions 653 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description SuccessfulRequestLatency The successful requests to Amazon Keyspaces during the specified time period. SuccessfulRequestLatency can provide two different kinds of information: • The elapsed time for successful requests (Minimum, Maximum, Sum, or Average). • The number of successful requests (SampleCount ). SuccessfulRequestLatency reflects activity only within Amazon Keyspaces and does not take into account network latency or client-side activity. Units: Milliseconds Dimensions: Keyspace, TableName, Operation Valid Statistics: • Minimum • Maximum • Average • SampleCount SystemErrors The requests to Amazon Keyspaces that generate a ServerError during the specified time period. A ServerError usually indicates an internal service error. Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • Sum • SampleCount Metrics and dimensions 654 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description
AmazonKeyspaces-203
AmazonKeyspaces.pdf
203
elapsed time for successful requests (Minimum, Maximum, Sum, or Average). • The number of successful requests (SampleCount ). SuccessfulRequestLatency reflects activity only within Amazon Keyspaces and does not take into account network latency or client-side activity. Units: Milliseconds Dimensions: Keyspace, TableName, Operation Valid Statistics: • Minimum • Maximum • Average • SampleCount SystemErrors The requests to Amazon Keyspaces that generate a ServerError during the specified time period. A ServerError usually indicates an internal service error. Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • Sum • SampleCount Metrics and dimensions 654 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric Description SystemReconciliati The units consumed to delete tombstoned data when onDeletes client-side timestamps are enabled. Each SystemRec onciliationDelete to delete or update up to 1KB of data per row. For provides enough capacity example, to update a row that stores 2.5 KB of data and to delete one or more columns within the row at the same time requires 3 SystemReconciliati onDeletes . Or, to delete an entire row that contains 3.5 KB of data requires 4 SystemReconciliati onDeletes . Units: Count Dimensions: Keyspace, TableName Valid Statistics: • Sum – The total number of SystemReconciliati onDeletes consumed in a time period. TTLDeletes The units consumed to delete or update data in a row by using Time to Live (TTL). Each TTLDelete provides enough capacity to delete or update up to 1KB of data per row. For example, to update a row that stores 2.5 KB of data and to delete one or more columns within the row at the same time requires 3 TTL deletes. Or, to delete an entire row that contains 3.5 KB of data requires 4 TTL deletes. Units: Count Dimensions: Keyspace, TableName Valid Statistics: • Sum – The total number of TTLDeletes consumed in a time period. Metrics and dimensions 655 Amazon Keyspaces (for Apache Cassandra) Developer Guide Metric UserErrors Description Requests to Amazon Keyspaces that generate an InvalidRequest error during the specified time period. An InvalidRequest usually indicates a client- side error, such as an invalid combination of parameter s, an attempt to update a nonexistent table, or an incorrect request signature. UserErrors represents the aggregate of invalid requests for the current AWS Region and the current AWS account. Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • Sum • SampleCount WriteThrottleEvents Requests to Amazon Keyspaces that exceed the provisioned write capacity for a table, or account level quotas, request per connection quotas, or partition level quotas. Units: Count Dimensions: Keyspace, TableName, Operation Valid Statistics: • SampleCount • Sum Metrics and dimensions 656 Amazon Keyspaces (for Apache Cassandra) Developer Guide Dimensions for Amazon Keyspaces metrics The metrics for Amazon Keyspaces are qualified by the values for the account, table name, or operation. You can use the CloudWatch console to retrieve Amazon Keyspaces data along any of the dimensions in the following table. Dimension Keyspace Description This dimension limits the data to a specific keyspace. This value can be any keyspace in the current Region and the current AWS account. Operation This dimension limits the data to one of the Amazon Keyspaces CQL operations, such as INSERT or SELECT operations. TableName This dimension limits the data to a specific table. This value can be any table name in the current Region and the current AWS account. If the table name is not unique within the account, you must also specify Keyspace. Creating CloudWatch alarms to monitor Amazon Keyspaces You can create an Amazon CloudWatch alarm for Amazon Keyspaces that sends an Amazon Simple Notification Service (Amazon SNS) message when the alarm changes state. An alarm watches a single metric over a time period that you specify. It performs one or more actions based on the value of the metric relative to a given threshold over a number of time periods. The action is a notification sent to an Amazon SNS topic or an Application Auto Scaling policy. When you use Amazon Keyspaces in provisioned mode with Application Auto Scaling, the service creates two pairs of CloudWatch alarms on your behalf. Each pair represents your upper and lower boundaries for provisioned and consumed throughput settings. These CloudWatch alarms are triggered when the table's actual utilization deviates from your target utilization for a sustained period of time. To learn more about CloudWatch alarms created by Application Auto Scaling, see the section called “How Amazon Keyspaces automatic scaling works”. Alarms invoke actions for sustained state changes only. CloudWatch alarms do not invoke actions simply because they are in a particular state. The state must have changed and been maintained for a specified number of periods. Creating alarms 657 Amazon Keyspaces (for Apache Cassandra) Developer Guide For more information about creating CloudWatch alarms, see Using Amazon CloudWatch alarms in the Amazon CloudWatch User Guide. Logging Amazon Keyspaces API calls with AWS
AmazonKeyspaces-204
AmazonKeyspaces.pdf
204
target utilization for a sustained period of time. To learn more about CloudWatch alarms created by Application Auto Scaling, see the section called “How Amazon Keyspaces automatic scaling works”. Alarms invoke actions for sustained state changes only. CloudWatch alarms do not invoke actions simply because they are in a particular state. The state must have changed and been maintained for a specified number of periods. Creating alarms 657 Amazon Keyspaces (for Apache Cassandra) Developer Guide For more information about creating CloudWatch alarms, see Using Amazon CloudWatch alarms in the Amazon CloudWatch User Guide. Logging Amazon Keyspaces API calls with AWS CloudTrail Amazon Keyspaces is integrated with AWS CloudTrail, a service that provides a record of actions taken by a user, role, or an AWS service in Amazon Keyspaces. CloudTrail captures Data Definition Language (DDL) API calls and Data Manipulation Language (DML) API calls for Amazon Keyspaces as events. The calls that are captured include calls from the Amazon Keyspaces console and programmatic calls to the Amazon Keyspaces API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon Simple Storage Service (Amazon S3) bucket, including events for Amazon Keyspaces. If you don't configure a trail, you can still view the most recent supported events on the CloudTrail console in Event history. Using the information collected by CloudTrail, you can determine the request that was made to Amazon Keyspaces, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about CloudTrail, see the AWS CloudTrail User Guide. Topics • Configuring Amazon Keyspaces log file entries in CloudTrail • Amazon Keyspaces Data Definition Language (DDL) information in CloudTrail • Amazon Keyspaces Data Manipulation Language (DML) information in CloudTrail • Understanding Amazon Keyspaces log file entries Configuring Amazon Keyspaces log file entries in CloudTrail Each Amazon Keyspaces API action logged in CloudTrail includes request parameters that are expressed in CQL query language. For more information, see the CQL language reference. You can view, search, and download recent events in your AWS account. For more information, see Viewing events with CloudTrail event history. For an ongoing record of events in your AWS account, including events for Amazon Keyspaces, create a trail. A trail enables CloudTrail to deliver log files to an Amazon S3 bucket. By default, when you create a trail in the console, the trail applies to all AWS Regions. The trail logs events Logging with CloudTrail 658 Amazon Keyspaces (for Apache Cassandra) Developer Guide from all Regions in the AWS partition and delivers the log files to the Amazon S3 bucket that you specify. Additionally, you can configure other AWS services to further analyze and act upon the event data collected in CloudTrail logs. For more information, see the following topics in the AWS CloudTrail User Guide: • Overview for creating a trail • CloudTrail supported services and integrations • Configuring Amazon SNS notifications for CloudTrail • Receiving CloudTrail log files from multiple Regions • Receiving CloudTrail log files from multiple accounts Every event or log entry contains information about who generated the request. The identity information helps you determine the following: • Whether the request was made with root or AWS Identity and Access Management (IAM) user credentials. • Whether the request was made with temporary security credentials for a role or federated user. • Whether the request was made by another AWS service. For more information, see the CloudTrail userIdentity element. Amazon Keyspaces Data Definition Language (DDL) information in CloudTrail CloudTrail is enabled on your AWS account when you create the account. When a DDL activity occurs in Amazon Keyspaces, that activity is automatically recorded as a CloudTrail event along with other AWS service events in Event history. The following table shows the DDL statements that are logged for Amazon Keyspaces. CloudTrail eventName Statement CQL action AWS SDK action CreateKeyspace AlterKeyspace DDL DDL CREATE KEYSPACE CreateKeyspace ALTER KEYSPACE UpdateKeyspace DDL information in CloudTrail 659 Amazon Keyspaces (for Apache Cassandra) Developer Guide CloudTrail eventName DropKeyspace CreateTable DropTable AlterTable CreateUdt DropUdt Statement CQL action AWS SDK action DDL DDL DDL DDL DDL DDL DROP KEYSPACE DeleteKeyspace CREATE TABLE CreateTable DROP TABLE DeleteTable ALTER TABLE UpdateTable , TagResource , UntagResource CREATE TYPE CreateType DROP TYPE DeleteType Amazon Keyspaces Data Manipulation Language (DML) information in CloudTrail To enable logging of Amazon Keyspaces DML statements with CloudTrail, you have to first enable logging of data plane API activity in CloudTrail. You can start logging Amazon Keyspaces DML events in new or existing trails by choosing to log activity for the data event type Cassandra table using the CloudTrail console, or by setting the resources.type value to AWS::Cassandra::Table using the AWS CLI, or CloudTrail API operations. For more information, see Logging data events. For more information
AmazonKeyspaces-205
AmazonKeyspaces.pdf
205
ALTER TABLE UpdateTable , TagResource , UntagResource CREATE TYPE CreateType DROP TYPE DeleteType Amazon Keyspaces Data Manipulation Language (DML) information in CloudTrail To enable logging of Amazon Keyspaces DML statements with CloudTrail, you have to first enable logging of data plane API activity in CloudTrail. You can start logging Amazon Keyspaces DML events in new or existing trails by choosing to log activity for the data event type Cassandra table using the CloudTrail console, or by setting the resources.type value to AWS::Cassandra::Table using the AWS CLI, or CloudTrail API operations. For more information, see Logging data events. For more information and an example that shows how to create alarms for data events, see the following post on the AWS Database blog Using DML auditing for Amazon Keyspaces (for Apache Cassandra). The following table shows the data events that are logged by CloudTrail for Cassandra table. CloudTrail eventName Statement CQL action AWS SDK actions Select DML SELECT GetKeyspace , GetTable, GetType, DML information in CloudTrail 660 Amazon Keyspaces (for Apache Cassandra) Developer Guide CloudTrail eventName Statement CQL action AWS SDK actions Insert Update Delete DML DML DML INSERT UPDATE DELETE ListKeysp aces , ListTable s , ListTypes , ListTagsF orResource no AWS SDK actions available no AWS SDK actions available no AWS SDK actions available Understanding Amazon Keyspaces log file entries CloudTrail log files contain one or more log entries. An event represents a single request from any source and includes information about the requested action, the date and time of the action, request parameters, and so on. CloudTrail log files aren't an ordered stack trace of the public API calls, so they don't appear in any specific order. The following example shows a CloudTrail log entry that demonstrates the CreateKeyspace, DropKeyspace, CreateTable, and DropTable actions: { "Records": [ { "eventVersion": "1.05", "userIdentity": { "type": "AssumedRole", "principalId": "AKIAIOSFODNN7EXAMPLE:alice", "arn": "arn:aws:sts::111122223333:assumed-role/users/alice", "accountId": "111122223333", "sessionContext": { "sessionIssuer": { "type": "Role", Understanding log file entries 661 Amazon Keyspaces (for Apache Cassandra) Developer Guide "principalId": "AKIAIOSFODNN7EXAMPLE", "arn": "arn:aws:iam::111122223333:role/Admin", "accountId": "111122223333", "userName": "Admin" }, "webIdFederationData": {}, "attributes": { "mfaAuthenticated": "false", "creationDate": "2020-01-15T18:47:56Z" } } }, "eventTime": "2020-01-15T18:53:04Z", "eventSource": "cassandra.amazonaws.com", "eventName": "CreateKeyspace", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "rawQuery": "\n\tCREATE KEYSPACE \"mykeyspace\"\n\tWITH\n\t\tREPLICATION = {'class': 'SingleRegionStrategy'}\n\t\t", "keyspaceName": "mykeyspace" }, "responseElements": null, "requestID": "bfa3e75d-bf4d-4fc0-be5e-89d15850eb41", "eventID": "d25beae8-f611-4229-877a-921557a07bb9", "readOnly": false, "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Keyspace", "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/" } ], "eventType": "AwsApiCall", "apiVersion": "3.4.4", "recipientAccountId": "111122223333", "managementEvent": true, "eventCategory": "Management", "tlsDetails": { "tlsVersion": "TLSv1.2", "cipherSuite": "ECDHE-RSA-AES128-GCM-SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" }, Understanding log file entries 662 Amazon Keyspaces (for Apache Cassandra) Developer Guide { "eventVersion": "1.05", "userIdentity": { "type": "AssumedRole", "principalId": "AKIAIOSFODNN7EXAMPLE:alice", "arn": "arn:aws:sts::111122223333:assumed-role/users/alice", "accountId": "111122223333", "sessionContext": { "sessionIssuer": { "type": "Role", "principalId": "AKIAIOSFODNN7EXAMPLE", "arn": "arn:aws:iam::111122223333:role/Admin", "accountId": "111122223333", "userName": "Admin" }, "webIdFederationData": {}, "attributes": { "mfaAuthenticated": "false", "creationDate": "2020-01-15T18:47:56Z" } } }, "eventTime": "2020-01-15T19:28:39Z", "eventSource": "cassandra.amazonaws.com", "eventName": "DropKeyspace", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "rawQuery": "DROP KEYSPACE \"mykeyspace\"", "keyspaceName": "mykeyspace" }, "responseElements": null, "requestID": "66f3d86a-56ae-4c29-b46f-abcd489ed86b", "eventID": "e5aebeac-e1dd-41e3-a515-84fe6aaabd7b", "readOnly": false, "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Keyspace", "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/" } ], "eventType": "AwsApiCall", Understanding log file entries 663 Amazon Keyspaces (for Apache Cassandra) Developer Guide "apiVersion": "3.4.4", "recipientAccountId": "111122223333", "managementEvent": true, "eventCategory": "Management", "tlsDetails": { "tlsVersion": "TLSv1.2", "cipherSuite": "ECDHE-RSA-AES128-GCM-SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" }, { "eventVersion": "1.05", "userIdentity": { "type": "AssumedRole", "principalId": "AKIAIOSFODNN7EXAMPLE:alice", "arn": "arn:aws:sts::111122223333:assumed-role/users/alice", "accountId": "111122223333", "sessionContext": { "sessionIssuer": { "type": "Role", "principalId": "AKIAIOSFODNN7EXAMPLE", "arn": "arn:aws:iam::111122223333:role/Admin", "accountId": "111122223333", "userName": "Admin" }, "webIdFederationData": {}, "attributes": { "mfaAuthenticated": "false", "creationDate": "2020-01-15T18:47:56Z" } } }, "eventTime": "2020-01-15T18:55:24Z", "eventSource": "cassandra.amazonaws.com", "eventName": "CreateTable", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "rawQuery": "\n\tCREATE TABLE \"mykeyspace\".\"mytable\"(\n\t\t\"ID\" int, \n\t\t\"username\" text,\n\t\t\"email\" text,\n\t\t\"post_type\" text,\n\t\tPRIMARY KEY((\"ID\", \"username\", \"email\")))", "keyspaceName": "mykeyspace", "tableName": "mytable" }, Understanding log file entries 664 Amazon Keyspaces (for Apache Cassandra) Developer Guide "responseElements": null, "requestID": "5f845963-70ea-4988-8a7a-2e66d061aacb", "eventID": "fe0dbd2b-7b34-4675-a30c-740f9d8d73f9", "readOnly": false, "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Table", "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable" } ], "eventType": "AwsApiCall", "apiVersion": "3.4.4", "recipientAccountId": "111122223333", "managementEvent": true, "eventCategory": "Management", "tlsDetails": { "tlsVersion": "TLSv1.2", "cipherSuite": "ECDHE-RSA-AES128-GCM-SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" }, { "eventVersion": "1.05", "userIdentity": { "type": "AssumedRole", "principalId": "AKIAIOSFODNN7EXAMPLE:alice", "arn": "arn:aws:sts::111122223333:assumed-role/users/alice", "accountId": "111122223333", "sessionContext": { "sessionIssuer": { "type": "Role", "principalId": "AKIAIOSFODNN7EXAMPLE", "arn": "arn:aws:iam::111122223333:role/Admin", "accountId": "111122223333", "userName": "Admin" }, "webIdFederationData": {}, "attributes": { "mfaAuthenticated": "false", "creationDate": "2020-01-15T18:47:56Z" } } }, Understanding log file entries 665 Amazon Keyspaces (for Apache Cassandra) Developer Guide "eventTime": "2020-01-15T19:27:59Z", "eventSource": "cassandra.amazonaws.com", "eventName": "DropTable", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "rawQuery": "DROP TABLE \"mykeyspace\".\"mytable\"", "keyspaceName": "mykeyspace", "tableName": "mytable" }, "responseElements": null, "requestID": "025501b0-3582-437e-9d18-8939e9ef262f", "eventID": "1a5cbedc-4e38-4889-8475-3eab98de0ffd", "readOnly": false, "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Table", "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable" } ], "eventType": "AwsApiCall", "apiVersion": "3.4.4", "recipientAccountId": "111122223333", "managementEvent": true, "eventCategory": "Management", "tlsDetails": { "tlsVersion": "TLSv1.2", "cipherSuite": "ECDHE-RSA-AES128-GCM-SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" } ] } The following log file shows an example of a SELECT statement. { "eventVersion": "1.09", "userIdentity": { "type": "IAMUser", "principalId": "AKIAIOSFODNN7EXAMPLE", Understanding log file entries 666 Amazon Keyspaces
AmazonKeyspaces-206
AmazonKeyspaces.pdf
206
665 Amazon Keyspaces (for Apache Cassandra) Developer Guide "eventTime": "2020-01-15T19:27:59Z", "eventSource": "cassandra.amazonaws.com", "eventName": "DropTable", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "rawQuery": "DROP TABLE \"mykeyspace\".\"mytable\"", "keyspaceName": "mykeyspace", "tableName": "mytable" }, "responseElements": null, "requestID": "025501b0-3582-437e-9d18-8939e9ef262f", "eventID": "1a5cbedc-4e38-4889-8475-3eab98de0ffd", "readOnly": false, "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Table", "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable" } ], "eventType": "AwsApiCall", "apiVersion": "3.4.4", "recipientAccountId": "111122223333", "managementEvent": true, "eventCategory": "Management", "tlsDetails": { "tlsVersion": "TLSv1.2", "cipherSuite": "ECDHE-RSA-AES128-GCM-SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" } ] } The following log file shows an example of a SELECT statement. { "eventVersion": "1.09", "userIdentity": { "type": "IAMUser", "principalId": "AKIAIOSFODNN7EXAMPLE", Understanding log file entries 666 Amazon Keyspaces (for Apache Cassandra) Developer Guide "arn": "arn:aws:iam::111122223333:user/alice", "accountId": "111122223333", "userName": "alice" }, "eventTime": "2023-11-17T10:38:04Z", "eventSource": "cassandra.amazonaws.com", "eventName": "Select", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "keyspaceName": "my_keyspace", "tableName": "my_table", "conditions": [ "pk = **(Redacted)", "ck < 3**(Redacted)0", "region = 't**(Redacted)t'" ], "select": [ "pk", "ck", "region" ], "allowFiltering": true }, "responseElements": null, "requestID": "6d83bbf0-a3d0-4d49-b1d9-e31779a28628", "eventID": "e00552d3-34e9-4092-931a-912c4e08ba17", "readOnly": true, "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Table", "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/my_keyspace/ table/my_table" } ], "eventType": "AwsApiCall", "apiVersion": "3.4.4", "managementEvent": false, "recipientAccountId": "111122223333", "eventCategory": "Data", "tlsDetails": { "tlsVersion": "TLSv1.3", Understanding log file entries 667 Amazon Keyspaces (for Apache Cassandra) Developer Guide "cipherSuite": "TLS_AES_128_GCM_SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" } } The following log file shows an example of an INSERT statement. { "eventVersion": "1.09", "userIdentity": { "type": "IAMUser", "principalId": "AKIAIOSFODNN7EXAMPLE", "arn": "arn:aws:iam::111122223333:user/alice", "accountId": "111122223333", "userName": "alice" }, "eventTime": "2023-12-01T22:11:43Z", "eventSource": "cassandra.amazonaws.com", "eventName": "Insert", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "keyspaceName": "my_keyspace", "tableName": "my_table", "primaryKeys": { "pk": "**(Redacted)", "ck": "1**(Redacted)8" }, "columnNames": [ "pk", "ck", "region" ], "updateParameters": { "TTL": "2**(Redacted)0" } } }, "responseElements": null, "requestID": "edf8af47-2f87-4432-864d-a960ac35e471", "eventID": "81b56a1c-9bdd-4c92-bb8e-92776b5a3bf1", "readOnly": false, Understanding log file entries 668 Amazon Keyspaces (for Apache Cassandra) "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Table", Developer Guide "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/my_keyspace/table/ my_table" } ], "eventType": "AwsApiCall", "apiVersion": "3.4.4", "managementEvent": false, "recipientAccountId": "111122223333", "eventCategory": "Data", "tlsDetails": { "tlsVersion": "TLSv1.3", "cipherSuite": "TLS_AES_128_GCM_SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" } } The following log file shows an example of an UPDATE statement. { "eventVersion": "1.09", "userIdentity": { "type": "IAMUser", "principalId": "AKIAIOSFODNN7EXAMPLE", "arn": "arn:aws:iam::111122223333:user/alice", "accountId": "111122223333", "userName": "alice" }, "eventTime": "2023-12-01T22:11:43Z", "eventSource": "cassandra.amazonaws.com", "eventName": "Update", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "keyspaceName": "my_keyspace", "tableName": "my_table", "primaryKeys": { "pk": "'t**(Redacted)t'", "ck": "'s**(Redacted)g'" Understanding log file entries 669 Amazon Keyspaces (for Apache Cassandra) Developer Guide }, "assignmentColumnNames": [ "nonkey" ], "conditions": [ "nonkey < 1**(Redacted)7" ] }, "responseElements": null, "requestID": "edf8af47-2f87-4432-864d-a960ac35e471", "eventID": "81b56a1c-9bdd-4c92-bb8e-92776b5a3bf1", "readOnly": false, "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Table", "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/my_keyspace/table/ my_table" } ], "eventType": "AwsApiCall", "apiVersion": "3.4.4", "managementEvent": false, "recipientAccountId": "111122223333", "eventCategory": "Data", "tlsDetails": { "tlsVersion": "TLSv1.3", "cipherSuite": "TLS_AES_128_GCM_SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" } } The following log file shows an example of a DELETE statement. { "eventVersion": "1.09", "userIdentity": { "type": "IAMUser", "principalId": "AKIAIOSFODNN7EXAMPLE", "arn": "arn:aws:iam::111122223333:user/alice", "accountId": "111122223333", "userName": "alice", }, Understanding log file entries 670 Amazon Keyspaces (for Apache Cassandra) Developer Guide "eventTime": "2023-10-23T13:59:05Z", "eventSource": "cassandra.amazonaws.com", "eventName": "Delete", "awsRegion": "us-east-1", "sourceIPAddress": "10.24.34.01", "userAgent": "Cassandra Client/ProtocolV4", "requestParameters": { "keyspaceName": "my_keyspace", "tableName": "my_table", "primaryKeys": { "pk": "**(Redacted)", "ck": "**(Redacted)" }, "conditions": [], "deleteColumnNames": [ "m", "s" ], "updateParameters": {} }, "responseElements": null, "requestID": "3d45e63b-c0c8-48e2-bc64-31afc5b4f49d", "eventID": "499da055-c642-4762-8775-d91757f06512", "readOnly": false, "resources": [ { "accountId": "111122223333", "type": "AWS::Cassandra::Table", "ARN": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/my_keyspace/table/ my_table" } ], "eventType": "AwsApiCall", "apiVersion": "3.4.4", "managementEvent": false, "recipientAccountId": "111122223333", "eventCategory": "Data", "tlsDetails": { "tlsVersion": "TLSv1.3", "cipherSuite": "TLS_AES_128_GCM_SHA256", "clientProvidedHostHeader": "cassandra.us-east-1.amazonaws.com" } } Understanding log file entries 671 Amazon Keyspaces (for Apache Cassandra) Developer Guide Security in Amazon Keyspaces (for Apache Cassandra) Cloud security at AWS is the highest priority. As an AWS customer, you benefit from a data center and network architecture that is built to meet the requirements of the most security-sensitive organizations. Security is a shared responsibility between AWS and you. The shared responsibility model describes this as security of the cloud and security in the cloud: • Security of the cloud – AWS is responsible for protecting the infrastructure that runs AWS services in the AWS Cloud. AWS also provides you with services that you can use securely. The effectiveness of our security is regularly tested and verified by third-party auditors as part of the AWS compliance programs. To learn about the compliance programs that apply to Amazon Keyspaces, see AWS Services in scope by compliance program. • Security in the cloud – Your responsibility is determined by the AWS service that you use. You are also responsible for other factors including the sensitivity of your data, your organization’s requirements, and applicable laws and regulations. This documentation will help you understand how to apply the shared responsibility model when using Amazon Keyspaces. The following topics show you how to configure Amazon Keyspaces to meet your security and compliance objectives. You'll also learn how to use other AWS services that can help you to monitor and secure your Amazon Keyspaces resources. Topics • Data protection in Amazon
AmazonKeyspaces-207
AmazonKeyspaces.pdf
207
program. • Security in the cloud – Your responsibility is determined by the AWS service that you use. You are also responsible for other factors including the sensitivity of your data, your organization’s requirements, and applicable laws and regulations. This documentation will help you understand how to apply the shared responsibility model when using Amazon Keyspaces. The following topics show you how to configure Amazon Keyspaces to meet your security and compliance objectives. You'll also learn how to use other AWS services that can help you to monitor and secure your Amazon Keyspaces resources. Topics • Data protection in Amazon Keyspaces • AWS Identity and Access Management for Amazon Keyspaces • Compliance validation for Amazon Keyspaces (for Apache Cassandra) • Resilience and disaster recovery in Amazon Keyspaces • Infrastructure security in Amazon Keyspaces • Configuration and vulnerability analysis for Amazon Keyspaces • Security best practices for Amazon Keyspaces 672 Amazon Keyspaces (for Apache Cassandra) Developer Guide Data protection in Amazon Keyspaces The AWS shared responsibility model applies to data protection in Amazon Keyspaces (for Apache Cassandra). As described in this model, AWS is responsible for protecting the global infrastructure that runs all of the AWS Cloud. You are responsible for maintaining control over your content that is hosted on this infrastructure. You are also responsible for the security configuration and management tasks for the AWS services that you use. For more information about data privacy, see the Data Privacy FAQ. For information about data protection in Europe, see the AWS Shared Responsibility Model and GDPR blog post on the AWS Security Blog. For data protection purposes, we recommend that you protect AWS account credentials and set up individual users with AWS IAM Identity Center or AWS Identity and Access Management (IAM). That way, each user is given only the permissions necessary to fulfill their job duties. We also recommend that you secure your data in the following ways: • Use multi-factor authentication (MFA) with each account. • Use SSL/TLS to communicate with AWS resources. We require TLS 1.2 and recommend TLS 1.3. • Set up API and user activity logging with AWS CloudTrail. For information about using CloudTrail trails to capture AWS activities, see Working with CloudTrail trails in the AWS CloudTrail User Guide. • Use AWS encryption solutions, along with all default security controls within AWS services. • Use advanced managed security services such as Amazon Macie, which assists in discovering and securing sensitive data that is stored in Amazon S3. • If you require FIPS 140-3 validated cryptographic modules when accessing AWS through a command line interface or an API, use a FIPS endpoint. For more information about the available FIPS endpoints, see Federal Information Processing Standard (FIPS) 140-3. We strongly recommend that you never put confidential or sensitive information, such as your customers' email addresses, into tags or free-form text fields such as a Name field. This includes when you work with Amazon Keyspaces or other AWS services using the console, API, AWS CLI, or AWS SDKs. Any data that you enter into tags or free-form text fields used for names may be used for billing or diagnostic logs. If you provide a URL to an external server, we strongly recommend that you do not include credentials information in the URL to validate your request to that server. Topics • Encryption at rest in Amazon Keyspaces Data protection 673 Amazon Keyspaces (for Apache Cassandra) Developer Guide • Encryption in transit in Amazon Keyspaces • Internetwork traffic privacy in Amazon Keyspaces Encryption at rest in Amazon Keyspaces Amazon Keyspaces (for Apache Cassandra) encryption at rest provides enhanced security by encrypting all your data at rest using encryption keys stored in AWS Key Management Service (AWS KMS). This functionality helps reduce the operational burden and complexity involved in protecting sensitive data. With encryption at rest, you can build security-sensitive applications that meet strict compliance and regulatory requirements for data protection. Amazon Keyspaces encryption at rest encrypts your data using 256-bit Advanced Encryption Standard (AES-256). This helps secure your data from unauthorized access to the underlying storage. Amazon Keyspaces encrypts and decrypts the table data transparently. Amazon Keyspaces uses envelope encryption and a key hierarchy to protect data encryption keys. It integrates with AWS KMS for storing and managing the root encryption key. For more information about the encryption key hierarchy, see the section called “How it works”. For more information about AWS KMS concepts like envelope encryption, see AWS KMS management service concepts in the AWS Key Management Service Developer Guide. When creating a new table, you can choose one of the following AWS KMS keys (KMS keys): • AWS owned key – This is the default encryption type. The key is owned by Amazon Keyspaces (no additional charge). • Customer managed key – This key is stored in
AmazonKeyspaces-208
AmazonKeyspaces.pdf
208
It integrates with AWS KMS for storing and managing the root encryption key. For more information about the encryption key hierarchy, see the section called “How it works”. For more information about AWS KMS concepts like envelope encryption, see AWS KMS management service concepts in the AWS Key Management Service Developer Guide. When creating a new table, you can choose one of the following AWS KMS keys (KMS keys): • AWS owned key – This is the default encryption type. The key is owned by Amazon Keyspaces (no additional charge). • Customer managed key – This key is stored in your account and is created, owned, and managed by you. You have full control over the customer managed key (AWS KMS charges apply). You can switch between the AWS owned key and the customer managed key at any given time. You can specify a customer managed key when you create a new table or change the KMS key of an existing table by using the console or programmatically using CQL statements. To learn how, see Encryption at rest: How to use customer managed keys to encrypt tables in Amazon Keyspaces. Encryption at rest using the default option of AWS owned keys is offered at no additional charge. However, AWS KMS charges apply for customer managed keys. For more information about pricing, see AWS KMS pricing. Encryption at rest 674 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces encryption at rest is available in all AWS Regions, including the AWS China (Beijing) and AWS China (Ningxia) Regions. For more information, see Encryption at rest: How it works in Amazon Keyspaces. Topics • Encryption at rest: How it works in Amazon Keyspaces • Encryption at rest: How to use customer managed keys to encrypt tables in Amazon Keyspaces Encryption at rest: How it works in Amazon Keyspaces Amazon Keyspaces (for Apache Cassandra) encryption at rest encrypts your data using the 256-bit Advanced Encryption Standard (AES-256). This helps secure your data from unauthorized access to the underlying storage. All customer data in Amazon Keyspaces tables is encrypted at rest by default, and server-side encryption is transparent, which means that changes to applications aren't required. Encryption at rest integrates with AWS Key Management Service (AWS KMS) for managing the encryption key that is used to encrypt your tables. When creating a new table or updating an existing table, you can choose one of the following AWS KMS key options: • AWS owned key – This is the default encryption type. The key is owned by Amazon Keyspaces (no additional charge). • Customer managed key – This key is stored in your account and is created, owned, and managed by you. You have full control over the customer managed key (AWS KMS charges apply). AWS KMS key (KMS key) Encryption at rest protects all your Amazon Keyspaces data with a AWS KMS key. By default, Amazon Keyspaces uses an AWS owned key, a multi-tenant encryption key that is created and managed in an Amazon Keyspaces service account. However, you can encrypt your Amazon Keyspaces tables using a customer managed key in your AWS account. You can select a different KMS key for each table in a keyspace. The KMS key you select for a table is also used to encrypt all its metadata and restorable backups. You select the KMS key for a table when you create or update the table. You can change the KMS key for a table at any time, either in the Amazon Keyspaces console or by using the ALTER Encryption at rest 675 Amazon Keyspaces (for Apache Cassandra) Developer Guide TABLE statement. The process of switching KMS keys is seamless, and doesn't require downtime or cause service degradation. Key hierarchy Amazon Keyspaces uses a key hierarchy to encrypt data. In this key hierarchy, the KMS key is the root key. It's used to encrypt and decrypt the Amazon Keyspaces table encryption key. The table encryption key is used to encrypt the encryption keys used internally by Amazon Keyspaces to encrypt and decrypt data when performing read and write operations. With the encryption key hierarchy, you can make changes to the KMS key without having to reencrypt data or impacting applications and ongoing data operations. Encryption at rest 676 Amazon Keyspaces (for Apache Cassandra) Developer Guide Table key The Amazon Keyspaces table key is used as a key encryption key. Amazon Keyspaces uses the table key to protect the internal data encryption keys that are used to encrypt the data stored in tables, log files, and restorable backups. Amazon Keyspaces generates a unique data encryption key for each underlying structure in a table. However, multiple table rows might be protected by the same data encryption key. When you first set the KMS key to a customer managed key, AWS KMS
AmazonKeyspaces-209
AmazonKeyspaces.pdf
209
applications and ongoing data operations. Encryption at rest 676 Amazon Keyspaces (for Apache Cassandra) Developer Guide Table key The Amazon Keyspaces table key is used as a key encryption key. Amazon Keyspaces uses the table key to protect the internal data encryption keys that are used to encrypt the data stored in tables, log files, and restorable backups. Amazon Keyspaces generates a unique data encryption key for each underlying structure in a table. However, multiple table rows might be protected by the same data encryption key. When you first set the KMS key to a customer managed key, AWS KMS generates a data key. The AWS KMS data key refers to the table key in Amazon Keyspaces. When you access an encrypted table, Amazon Keyspaces sends a request to AWS KMS to use the KMS key to decrypt the table key. Then, it uses the plaintext table key to decrypt the Amazon Keyspaces data encryption keys, and it uses the plaintext data encryption keys to decrypt table data. Amazon Keyspaces uses and stores the table key and data encryption keys outside of AWS KMS. It protects all keys with Advanced Encryption Standard (AES) encryption and 256-bit encryption keys. Then, it stores the encrypted keys with the encrypted data so that they're available to decrypt the table data on demand. Table key caching To avoid calling AWS KMS for every Amazon Keyspaces operation, Amazon Keyspaces caches the plaintext table keys for each connection in memory. If Amazon Keyspaces gets a request for the cached table key after five minutes of inactivity, it sends a new request to AWS KMS to decrypt the table key. This call captures any changes made to the access policies of the KMS key in AWS KMS or AWS Identity and Access Management (IAM) since the last request to decrypt the table key. Envelope encryption If you change the customer managed key for your table, Amazon Keyspaces generates a new table key. Then, it uses the new table key to reencrypt the data encryption keys. It also uses the new table key to encrypt previous table keys that are used to protect restorable backups. This process is called envelope encryption. This ensures that you can access restorable backups even if you rotate the customer managed key. For more information about envelope encryption, see Envelope Encryption in the AWS Key Management Service Developer Guide. Topics Encryption at rest 677 Amazon Keyspaces (for Apache Cassandra) • AWS owned keys • Customer managed keys • Encryption at rest usage notes AWS owned keys Developer Guide AWS owned keys aren't stored in your AWS account. They are part of a collection of KMS keys that AWS owns and manages for use in multiple AWS accounts. AWS services can use AWS owned keys to protect your data. You can't view, manage, or use AWS owned keys, or audit their use. However, you don't need to do any work or change any programs to protect the keys that encrypt your data. You aren't charged a monthly fee or a usage fee for use of AWS owned keys, and they don't count against AWS KMS quotas for your account. Customer managed keys Customer managed keys are keys in your AWS account that you create, own, and manage. You have full control over these KMS keys. Use a customer managed key to get the following features: • You create and manage the customer managed key, including setting and maintaining the key policies, IAM policies, and grants to control access to the customer managed key. You can enable and disable the customer managed key, enable and disable automatic key rotation, and schedule the customer managed key for deletion when it is no longer in use. You can create tags and aliases for the customer managed keys you manage. • You can use a customer managed key with imported key material or a customer managed key in a custom key store that you own and manage. • You can use AWS CloudTrail and Amazon CloudWatch Logs to track the requests that Amazon Keyspaces sends to AWS KMS on your behalf. For more information, see the section called “Step 6: Configure monitoring with AWS CloudTrail”. Customer managed keys incur a charge for each API call, and AWS KMS quotas apply to these KMS keys. For more information, see AWS KMS resource or request quotas. When you specify a customer managed key as the root encryption key for a table, restorable backups are encrypted with the same encryption key that is specified for the table at the time the Encryption at rest 678 Amazon Keyspaces (for Apache Cassandra) Developer Guide backup is created. If the KMS key for the table is rotated, key enveloping ensures that the latest KMS key has access to all restorable backups. Amazon Keyspaces must
AmazonKeyspaces-210
AmazonKeyspaces.pdf
210
incur a charge for each API call, and AWS KMS quotas apply to these KMS keys. For more information, see AWS KMS resource or request quotas. When you specify a customer managed key as the root encryption key for a table, restorable backups are encrypted with the same encryption key that is specified for the table at the time the Encryption at rest 678 Amazon Keyspaces (for Apache Cassandra) Developer Guide backup is created. If the KMS key for the table is rotated, key enveloping ensures that the latest KMS key has access to all restorable backups. Amazon Keyspaces must have access to your customer managed key to provide you access to your table data. If the state of the encryption key is set to disabled or it's scheduled for deletion, Amazon Keyspaces is unable to encrypt or decrypt data. As a result, you are not able to perform read and write operations on the table. As soon as the service detects that your encryption key is inaccessible, Amazon Keyspaces sends an email notification to alert you. You must restore access to your encryption key within seven days or Amazon Keyspaces deletes your table automatically. As a precaution, Amazon Keyspaces creates a restorable backup of your table data before deleting the table. Amazon Keyspaces maintains the restorable backup for 35 days. After 35 days, you can no longer restore your table data. You aren't billed for the restorable backup, but standard restore charges apply. You can use this restorable backup to restore your data to a new table. To initiate the restore, the last customer managed key used for the table must be enabled, and Amazon Keyspaces must have access to it. Note When you're creating a table that's encrypted using a customer managed key that's inaccessible or scheduled for deletion before the creation process completes, an error occurs. The create table operation fails, and you're sent an email notification. Encryption at rest usage notes Consider the following when you're using encryption at rest in Amazon Keyspaces. • Server-side encryption at rest is enabled on all Amazon Keyspaces tables and can't be disabled. The entire table is encrypted at rest, you can't select specific columns or rows for encryption. • By default, Amazon Keyspaces uses a single-service default key (AWS owned key) for encrypting all of your tables. If this key doesn’t exist, it's created for you. Service default keys can't be disabled. • Encryption at rest only encrypts data while it's static (at rest) on a persistent storage media. If data security is a concern for data in transit or data in use, you must take additional measures: Encryption at rest 679 Amazon Keyspaces (for Apache Cassandra) Developer Guide • Data in transit: All your data in Amazon Keyspaces is encrypted in transit. By default, communications to and from Amazon Keyspaces are protected by using Secure Sockets Layer (SSL)/Transport Layer Security (TLS) encryption. • Data in use: Protect your data before sending it to Amazon Keyspaces by using client-side encryption. • Customer managed keys: Data at rest in your tables is always encrypted using your customer managed keys. However operations that perform atomic updates of multiple rows encrypt data temporarily using AWS owned keys during processing. This includes range delete operations and operations that simultaneously access static and non-static data. • A single customer managed key can have up to 50,000 grants. Every Amazon Keyspaces table associated with a customer managed key consumes 2 grants. One grant is released when the table is deleted. The second grant is used to create an automatic snapshot of the table to protect from data loss in case Amazon Keyspaces lost access to the customer managed key unintentionally. This grant is released 42 days after deletion of the table. Encryption at rest: How to use customer managed keys to encrypt tables in Amazon Keyspaces You can use the console or CQL statements to specify the AWS KMS key for new tables and update the encryption keys of existing tables in Amazon Keyspaces. The following topic outlines how to implement customer managed keys for new and existing tables. Topics • Prerequisites: Create a customer managed key using AWS KMS and grant permissions to Amazon Keyspaces • Step 3: Specify a customer managed key for a new table • Step 4: Update the encryption key of an existing table • Step 5: Use the Amazon Keyspaces encryption context in logs • Step 6: Configure monitoring with AWS CloudTrail Encryption at rest 680 Amazon Keyspaces (for Apache Cassandra) Developer Guide Prerequisites: Create a customer managed key using AWS KMS and grant permissions to Amazon Keyspaces Before you can protect an Amazon Keyspaces table with a customer managed key, you must first create the key in AWS Key Management Service (AWS KMS) and then authorize Amazon Keyspaces
AmazonKeyspaces-211
AmazonKeyspaces.pdf
211
Keyspaces • Step 3: Specify a customer managed key for a new table • Step 4: Update the encryption key of an existing table • Step 5: Use the Amazon Keyspaces encryption context in logs • Step 6: Configure monitoring with AWS CloudTrail Encryption at rest 680 Amazon Keyspaces (for Apache Cassandra) Developer Guide Prerequisites: Create a customer managed key using AWS KMS and grant permissions to Amazon Keyspaces Before you can protect an Amazon Keyspaces table with a customer managed key, you must first create the key in AWS Key Management Service (AWS KMS) and then authorize Amazon Keyspaces to use that key. Step 1: Create a customer managed key using AWS KMS To create a customer managed key to be used to protect an Amazon Keyspaces table, you can follow the steps in Creating symmetric encryption KMS keys using the console or the AWS API. Step 2: Authorize the use of your customer managed key Before you can choose a customer managed key to protect an Amazon Keyspaces table, the policies on that customer managed key must give Amazon Keyspaces permission to use it on your behalf. You have full control over the policies and grants on the customer managed key. You can provide these permissions in a key policy, an IAM policy, or a grant. Amazon Keyspaces doesn't need additional authorization to use the default AWS owned key to protect the Amazon Keyspaces tables in your AWS account. The following topics show how to configure the required permissions using IAM policies and grants that allow Amazon Keyspaces tables to use a customer managed key. Topics • Key policy for customer managed keys • Example key policy • Using grants to authorize Amazon Keyspaces Key policy for customer managed keys When you select a customer managed key to protect an Amazon Keyspaces table, Amazon Keyspaces gets permission to use the customer managed key on behalf of the principal who makes the selection. That principal, a user or role, must have the permissions on the customer managed key that Amazon Keyspaces requires. At a minimum, Amazon Keyspaces requires the following permissions on a customer managed key: • kms:Encrypt Encryption at rest 681 Amazon Keyspaces (for Apache Cassandra) Developer Guide • kms:Decrypt • kms:ReEncrypt* (for kms:ReEncryptFrom and kms:ReEncryptTo) • kms:GenerateDataKey* (for kms:GenerateDataKey and kms:GenerateDataKeyWithoutPlaintext) • kms:DescribeKey • kms:CreateGrant Example key policy For example, the following example key policy provides only the required permissions. The policy has the following effects: • Allows Amazon Keyspaces to use the customer managed key in cryptographic operations and create grants—but only when it's acting on behalf of principals in the account who have permission to use Amazon Keyspaces. If the principals specified in the policy statement don't have permission to use Amazon Keyspaces, the call fails, even when it comes from the Amazon Keyspaces service. • The kms:ViaService condition key allows the permissions only when the request comes from Amazon Keyspaces on behalf of the principals listed in the policy statement. These principals can't call these operations directly. Note that the kms:ViaService value, cassandra.*.amazonaws.com, has an asterisk (*) in the Region position. Amazon Keyspaces requires the permission to be independent of any particular AWS Region. • Gives the customer managed key administrators (users who can assume the db-team role) read- only access to the customer managed key and permission to revoke grants, including the grants that Amazon Keyspaces requires to protect the table. • Gives Amazon Keyspaces read-only access to the customer managed key. In this case, Amazon Keyspaces can call these operations directly. It doesn't have to act on behalf of an account principal. Before using an example key policy, replace the example principals with actual principals from your AWS account. { "Id": "key-policy-cassandra", "Version":"2012-10-17", "Statement": [ { Encryption at rest 682 Amazon Keyspaces (for Apache Cassandra) Developer Guide "Sid" : "Allow access through Amazon Keyspaces for all principals in the account that are authorized to use Amazon Keyspaces", "Effect": "Allow", "Principal": {"AWS": "arn:aws:iam::111122223333:user/db-lead"}, "Action": [ "kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey", "kms:CreateGrant" ], "Resource": "*", "Condition": { "StringLike": { "kms:ViaService" : "cassandra.*.amazonaws.com" } } }, { "Sid": "Allow administrators to view the customer managed key and revoke grants", "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::111122223333:role/db-team" }, "Action": [ "kms:Describe*", "kms:Get*", "kms:List*", "kms:RevokeGrant" ], "Resource": "*" } ] } Using grants to authorize Amazon Keyspaces In addition to key policies, Amazon Keyspaces uses grants to set permissions on a customer managed key. To view the grants on a customer managed key in your account, use the ListGrants operation. Amazon Keyspaces doesn't need grants, or any additional permissions, to use the AWS owned key to protect your table. Encryption at rest 683 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces uses the grant permissions when it performs background system maintenance and continuous data protection tasks. It
AmazonKeyspaces-212
AmazonKeyspaces.pdf
212
}, "Action": [ "kms:Describe*", "kms:Get*", "kms:List*", "kms:RevokeGrant" ], "Resource": "*" } ] } Using grants to authorize Amazon Keyspaces In addition to key policies, Amazon Keyspaces uses grants to set permissions on a customer managed key. To view the grants on a customer managed key in your account, use the ListGrants operation. Amazon Keyspaces doesn't need grants, or any additional permissions, to use the AWS owned key to protect your table. Encryption at rest 683 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces uses the grant permissions when it performs background system maintenance and continuous data protection tasks. It also uses grants to generate table keys. Each grant is specific to a table. If the account includes multiple tables encrypted under the same customer managed key, there is a grant of each type for each table. The grant is constrained by the Amazon Keyspaces encryption context, which includes the table name and the AWS account ID. The grant includes permission to retire the grant if it's no longer needed. To create the grants, Amazon Keyspaces must have permission to call CreateGrant on behalf of the user who created the encrypted table. The key policy can also allow the account to revoke the grant on the customer managed key. However, if you revoke the grant on an active encrypted table, Amazon Keyspaces will not be able to protect and maintain the table. Step 3: Specify a customer managed key for a new table Follow these steps to specify the customer managed key on a new table using the Amazon Keyspaces console or CQL. Create an encrypted table using a customer managed key (console) 1. Sign in to the AWS Management Console, and open the Amazon Keyspaces console at https:// console.aws.amazon.com/keyspaces/home. 2. In the navigation pane, choose Tables, and then choose Create table. 3. On the Create table page in the Table details section, select a keyspace and provide a name for the new table. 4. 5. In the Schema section, create the schema for your table. In the Table settings section, choose Customize settings. 6. Continue to Encryption settings. In this step, you select the encryption settings for the table. In the Encryption at rest section under Choose an AWS KMS key, choose the option Choose a different KMS key (advanced), and in the search field, choose an AWS KMS key or enter an Amazon Resource Name (ARN). Encryption at rest 684 Amazon Keyspaces (for Apache Cassandra) Developer Guide Note If the key you selected is not accessible or is missing the required permissions, see Troubleshooting key access in the AWS Key Management Service Developer Guide. 7. Choose Create to create the encrypted table. Create a new table using a customer managed key for encryption at rest (CQL) To create a new table that uses a customer managed key for encryption at rest, you can use the CREATE TABLE statement as shown in the following example. Make sure to replace the key ARN with an ARN for a valid key with permissions granted to Amazon Keyspaces. CREATE TABLE my_keyspace.my_table(id bigint, name text, place text STATIC, PRIMARY KEY(id, name)) WITH CUSTOM_PROPERTIES = { 'encryption_specification':{ 'encryption_type': 'CUSTOMER_MANAGED_KMS_KEY', 'kms_key_identifier':'arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111' } }; If you receive an Invalid Request Exception, you need to confirm that the customer managed key is valid and Amazon Keyspaces has the required permissions. To confirm that the key has been configured correctly, see Troubleshooting key access in the AWS Key Management Service Developer Guide. Step 4: Update the encryption key of an existing table You can also use the Amazon Keyspaces console or CQL to change the encryption keys of an existing table between an AWS owned key and a customer managed KMS key at any time. Update an existing table with the new customer managed key (console) 1. Sign in to the AWS Management Console, and open the Amazon Keyspaces console at https:// console.aws.amazon.com/keyspaces/home. 2. In the navigation pane, choose Tables. 3. Choose the table that you want to update, and then choose the Additional settings tab. Encryption at rest 685 Amazon Keyspaces (for Apache Cassandra) Developer Guide 4. In the Encryption at rest section, choose Manage Encryption to edit the encryption settings for the table. Under Choose an AWS KMS key, choose the option Choose a different KMS key (advanced), and in the search field, choose an AWS KMS key or enter an Amazon Resource Name (ARN). Note If the key you selected is not valid, see Troubleshooting key access in the AWS Key Management Service Developer Guide. Alternatively, you can choose an AWS owned key for a table that is encrypted with a customer managed key. 5. Choose Save changes to save your changes to the table. Update the encryption key used for an existing table To change the encryption key of an existing table, you
AmazonKeyspaces-213
AmazonKeyspaces.pdf
213
an AWS KMS key, choose the option Choose a different KMS key (advanced), and in the search field, choose an AWS KMS key or enter an Amazon Resource Name (ARN). Note If the key you selected is not valid, see Troubleshooting key access in the AWS Key Management Service Developer Guide. Alternatively, you can choose an AWS owned key for a table that is encrypted with a customer managed key. 5. Choose Save changes to save your changes to the table. Update the encryption key used for an existing table To change the encryption key of an existing table, you use the ALTER TABLE statement to specify a customer managed key for encryption at rest. Make sure to replace the key ARN with an ARN for a valid key with permissions granted to Amazon Keyspaces. ALTER TABLE my_keyspace.my_table WITH CUSTOM_PROPERTIES = { 'encryption_specification':{ 'encryption_type': 'CUSTOMER_MANAGED_KMS_KEY', 'kms_key_identifier':'arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111' } }; If you receive an Invalid Request Exception, you need to confirm that the customer managed key is valid and Amazon Keyspaces has the required permissions. To confirm that the key has been configured correctly, see Troubleshooting key access in the AWS Key Management Service Developer Guide. To change the encryption key back to the default encryption at rest option with AWS owned keys, you can use the ALTER TABLE statement as shown in the following example. ALTER TABLE my_keyspace.my_table WITH CUSTOM_PROPERTIES = { Encryption at rest 686 Amazon Keyspaces (for Apache Cassandra) Developer Guide 'encryption_specification':{ 'encryption_type' : 'AWS_OWNED_KMS_KEY' } }; Step 5: Use the Amazon Keyspaces encryption context in logs An encryption context is a set of key–value pairs that contain arbitrary nonsecret data. When you include an encryption context in a request to encrypt data, AWS KMS cryptographically binds the encryption context to the encrypted data. To decrypt the data, you must pass in the same encryption context. Amazon Keyspaces uses the same encryption context in all AWS KMS cryptographic operations. If you use a customer managed key to protect your Amazon Keyspaces table, you can use the encryption context to identify the use of the customer managed key in audit records and logs. It also appears in plaintext in logs, such as in logs for AWS CloudTrail and Amazon CloudWatch Logs. In its requests to AWS KMS, Amazon Keyspaces uses an encryption context with three key–value pairs. "encryptionContextSubset": { "aws:cassandra:keyspaceName": "my_keyspace", "aws:cassandra:tableName": "mytable" "aws:cassandra:subscriberId": "111122223333" } • Keyspace – The first key–value pair identifies the keyspace that includes the table that Amazon Keyspaces is encrypting. The key is aws:cassandra:keyspaceName. The value is the name of the keyspace. "aws:cassandra:keyspaceName": "<keyspace-name>" For example: "aws:cassandra:keyspaceName": "my_keyspace" • Table – The second key–value pair identifies the table that Amazon Keyspaces is encrypting. The key is aws:cassandra:tableName. The value is the name of the table. "aws:cassandra:tableName": "<table-name>" Encryption at rest 687 Amazon Keyspaces (for Apache Cassandra) Developer Guide For example: "aws:cassandra:tableName": "my_table" • Account – The third key–value pair identifies the AWS account. The key is aws:cassandra:subscriberId. The value is the account ID. "aws:cassandra:subscriberId": "<account-id>" For example: "aws:cassandra:subscriberId": "111122223333" Step 6: Configure monitoring with AWS CloudTrail If you use a customer managed key to protect your Amazon Keyspaces tables, you can use AWS CloudTrail logs to track the requests that Amazon Keyspaces sends to AWS KMS on your behalf. The GenerateDataKey, DescribeKey, Decrypt, and CreateGrant requests are discussed in this section. In addition, Amazon Keyspaces uses a RetireGrant operation to remove a grant when you delete a table. GenerateDataKey Amazon Keyspaces creates a unique table key to encrypt data at rest. It sends a GenerateDataKey request to AWS KMS that specifies the KMS key for the table. The event that records the GenerateDataKey operation is similar to the following example event. The user is the Amazon Keyspaces service account. The parameters include the Amazon Resource Name (ARN) of the customer managed key, a key specifier that requires a 256-bit key, and the encryption context that identifies the keyspace, the table, and the AWS account. { "eventVersion": "1.08", "userIdentity": { "type": "AWSService", "invokedBy": "AWS Internal" }, "eventTime": "2021-04-16T04:56:05Z", Encryption at rest 688 Amazon Keyspaces (for Apache Cassandra) Developer Guide "eventSource": "kms.amazonaws.com", "eventName": "GenerateDataKey", "awsRegion": "us-east-1", "sourceIPAddress": "AWS Internal", "userAgent": "AWS Internal", "requestParameters": { "keySpec": "AES_256", "encryptionContext": { "aws:cassandra:keyspaceName": "my_keyspace", "aws:cassandra:tableName": "my_table", "aws:cassandra:subscriberId": "123SAMPLE012" }, "keyId": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111" }, "responseElements": null, "requestID": "5e8e9cb5-9194-4334-aacc-9dd7d50fe246", "eventID": "49fccab9-2448-4b97-a89d-7d5c39318d6f", "readOnly": true, "resources": [ { "accountId": "123SAMPLE012", "type": "AWS::KMS::Key", "ARN": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111" } ], "eventType": "AwsApiCall", "managementEvent": true, "eventCategory": "Management", "recipientAccountId": "123SAMPLE012", "sharedEventID": "84fbaaf0-9641-4e32-9147-57d2cb08792e" } DescribeKey Amazon Keyspaces uses a DescribeKey operation to determine whether the KMS key you selected exists in the account and Region. The event that records the DescribeKey operation is similar to the following example event. The user is the Amazon Keyspaces service account. The parameters include the ARN of the customer managed key and a key
AmazonKeyspaces-214
AmazonKeyspaces.pdf
214
"encryptionContext": { "aws:cassandra:keyspaceName": "my_keyspace", "aws:cassandra:tableName": "my_table", "aws:cassandra:subscriberId": "123SAMPLE012" }, "keyId": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111" }, "responseElements": null, "requestID": "5e8e9cb5-9194-4334-aacc-9dd7d50fe246", "eventID": "49fccab9-2448-4b97-a89d-7d5c39318d6f", "readOnly": true, "resources": [ { "accountId": "123SAMPLE012", "type": "AWS::KMS::Key", "ARN": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111" } ], "eventType": "AwsApiCall", "managementEvent": true, "eventCategory": "Management", "recipientAccountId": "123SAMPLE012", "sharedEventID": "84fbaaf0-9641-4e32-9147-57d2cb08792e" } DescribeKey Amazon Keyspaces uses a DescribeKey operation to determine whether the KMS key you selected exists in the account and Region. The event that records the DescribeKey operation is similar to the following example event. The user is the Amazon Keyspaces service account. The parameters include the ARN of the customer managed key and a key specifier that requires a 256-bit key. Encryption at rest 689 Amazon Keyspaces (for Apache Cassandra) Developer Guide { "eventVersion": "1.08", "userIdentity": { "type": "IAMUser", "principalId": "AIDAZ3FNIIVIZZ6H7CFQG", "arn": "arn:aws:iam::123SAMPLE012:user/admin", "accountId": "123SAMPLE012", "accessKeyId": "AKIAIOSFODNN7EXAMPLE", "userName": "admin", "sessionContext": { "sessionIssuer": {}, "webIdFederationData": {}, "attributes": { "mfaAuthenticated": "false", "creationDate": "2021-04-16T04:55:42Z" } }, "invokedBy": "AWS Internal" }, "eventTime": "2021-04-16T04:55:58Z", "eventSource": "kms.amazonaws.com", "eventName": "DescribeKey", "awsRegion": "us-east-1", "sourceIPAddress": "AWS Internal", "userAgent": "AWS Internal", "requestParameters": { "keyId": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111" }, "responseElements": null, "requestID": "c25a8105-050b-4f52-8358-6e872fb03a6c", "eventID": "0d96420e-707e-41b9-9118-56585a669658", "readOnly": true, "resources": [ { "accountId": "123SAMPLE012", "type": "AWS::KMS::Key", "ARN": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111" } ], "eventType": "AwsApiCall", "managementEvent": true, Encryption at rest 690 Amazon Keyspaces (for Apache Cassandra) Developer Guide "eventCategory": "Management", "recipientAccountId": "123SAMPLE012" } Decrypt When you access an Amazon Keyspaces table, Amazon Keyspaces needs to decrypt the table key so that it can decrypt the keys below it in the hierarchy. It then decrypts the data in the table. To decrypt the table key, Amazon Keyspaces sends a Decrypt request to AWS KMS that specifies the KMS key for the table. The event that records the Decrypt operation is similar to the following example event. The user is the principal in your AWS account who is accessing the table. The parameters include the encrypted table key (as a ciphertext blob) and the encryption context that identifies the table and the AWS account. AWS KMS derives the ID of the customer managed key from the ciphertext. { "eventVersion": "1.08", "userIdentity": { "type": "AWSService", "invokedBy": "AWS Internal" }, "eventTime": "2021-04-16T05:29:44Z", "eventSource": "kms.amazonaws.com", "eventName": "Decrypt", "awsRegion": "us-east-1", "sourceIPAddress": "AWS Internal", "userAgent": "AWS Internal", "requestParameters": { "encryptionContext": { "aws:cassandra:keyspaceName": "my_keyspace", "aws:cassandra:tableName": "my_table", "aws:cassandra:subscriberId": "123SAMPLE012" }, "encryptionAlgorithm": "SYMMETRIC_DEFAULT" }, "responseElements": null, "requestID": "50e80373-83c9-4034-8226-5439e1c9b259", "eventID": "8db9788f-04a5-4ae2-90c9-15c79c411b6b", "readOnly": true, "resources": [ Encryption at rest 691 Amazon Keyspaces (for Apache Cassandra) Developer Guide { "accountId": "123SAMPLE012", "type": "AWS::KMS::Key", "ARN": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111" } ], "eventType": "AwsApiCall", "managementEvent": true, "eventCategory": "Management", "recipientAccountId": "123SAMPLE012", "sharedEventID": "7ed99e2d-910a-4708-a4e3-0180d8dbb68e" } CreateGrant When you use a customer managed key to protect your Amazon Keyspaces table, Amazon Keyspaces uses grants to allow the service to perform continuous data protection and maintenance and durability tasks. These grants aren't required on AWS owned keys. The grants that Amazon Keyspaces creates are specific to a table. The principal in the CreateGrant request is the user who created the table. The event that records the CreateGrant operation is similar to the following example event. The parameters include the ARN of the customer managed key for the table, the grantee principal and retiring principal (the Amazon Keyspaces service), and the operations that the grant covers. It also includes a constraint that requires all encryption operations use the specified encryption context. { "eventVersion": "1.08", "userIdentity": { "type": "IAMUser", "principalId": "AIDAZ3FNIIVIZZ6H7CFQG", "arn": "arn:aws:iam::arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111:user/admin", "accountId": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111", "accessKeyId": "AKIAI44QH8DHBEXAMPLE", "userName": "admin", "sessionContext": { "sessionIssuer": {}, "webIdFederationData": {}, Encryption at rest 692 Amazon Keyspaces (for Apache Cassandra) Developer Guide "attributes": { "mfaAuthenticated": "false", "creationDate": "2021-04-16T04:55:42Z" } }, "invokedBy": "AWS Internal" }, "eventTime": "2021-04-16T05:11:10Z", "eventSource": "kms.amazonaws.com", "eventName": "CreateGrant", "awsRegion": "us-east-1", "sourceIPAddress": "AWS Internal", "userAgent": "AWS Internal", "requestParameters": { "keyId": "a7d328af-215e-4661-9a69-88c858909f20", "operations": [ "DescribeKey", "GenerateDataKey", "Decrypt", "Encrypt", "ReEncryptFrom", "ReEncryptTo", "RetireGrant" ], "constraints": { "encryptionContextSubset": { "aws:cassandra:keyspaceName": "my_keyspace", "aws:cassandra:tableName": "my_table", "aws:cassandra:subscriberId": "123SAMPLE012" } }, "retiringPrincipal": "cassandratest.us-east-1.amazonaws.com", "granteePrincipal": "cassandratest.us-east-1.amazonaws.com" }, "responseElements": { "grantId": "18e4235f1b07f289762a31a1886cb5efd225f069280d4f76cd83b9b9b5501013" }, "requestID": "b379a767-1f9b-48c3-b731-fb23e865e7f7", "eventID": "29ee1fd4-28f2-416f-a419-551910d20291", "readOnly": false, "resources": [ { "accountId": "123SAMPLE012", Encryption at rest 693 Amazon Keyspaces (for Apache Cassandra) Developer Guide "type": "AWS::KMS::Key", "ARN": "arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111" } ], "eventType": "AwsApiCall", "managementEvent": true, "eventCategory": "Management", "recipientAccountId": "123SAMPLE012" } Encryption in transit in Amazon Keyspaces Amazon Keyspaces only accepts secure connections using Transport Layer Security (TLS). Encryption in transit provides an additional layer of data protection by encrypting your data as it travels to and from Amazon Keyspaces. Organizational policies, industry or government regulations, and compliance requirements often require the use of encryption in transit to increase the data security of your applications when they transmit data over the network. To learn how to encrypt cqlsh connections to Amazon Keyspaces using TLS, see the section called “How to manually configure cqlsh connections for TLS”. To learn how to use TLS encryption with client drivers, see the section called “Using a Cassandra client driver”. Internetwork traffic privacy in Amazon Keyspaces This topic describes how Amazon Keyspaces
AmazonKeyspaces-215
AmazonKeyspaces.pdf
215
protection by encrypting your data as it travels to and from Amazon Keyspaces. Organizational policies, industry or government regulations, and compliance requirements often require the use of encryption in transit to increase the data security of your applications when they transmit data over the network. To learn how to encrypt cqlsh connections to Amazon Keyspaces using TLS, see the section called “How to manually configure cqlsh connections for TLS”. To learn how to use TLS encryption with client drivers, see the section called “Using a Cassandra client driver”. Internetwork traffic privacy in Amazon Keyspaces This topic describes how Amazon Keyspaces (for Apache Cassandra) secures connections from on-premises applications to Amazon Keyspaces and between Amazon Keyspaces and other AWS resources within the same AWS Region. Traffic between service and on-premises clients and applications You have two connectivity options between your private network and AWS: • An AWS Site-to-Site VPN connection. For more information, see What is AWS Site-to-Site VPN? in the AWS Site-to-Site VPN User Guide. • An AWS Direct Connect connection. For more information, see What is AWS Direct Connect? in the AWS Direct Connect User Guide. Encryption in transit 694 Amazon Keyspaces (for Apache Cassandra) Developer Guide As a managed service, Amazon Keyspaces (for Apache Cassandra) is protected by AWS global network security. For information about AWS security services and how AWS protects infrastructure, see AWS Cloud Security. To design your AWS environment using the best practices for infrastructure security, see Infrastructure Protection in Security Pillar AWS Well‐Architected Framework. You use AWS published API calls to access Amazon Keyspaces through the network. Clients must support the following: • Transport Layer Security (TLS). We require TLS 1.2 and recommend TLS 1.3. • Cipher suites with perfect forward secrecy (PFS) such as DHE (Ephemeral Diffie-Hellman) or ECDHE (Elliptic Curve Ephemeral Diffie-Hellman). Most modern systems such as Java 7 and later support these modes. Additionally, requests must be signed by using an access key ID and a secret access key that is associated with an IAM principal. Or you can use the AWS Security Token Service (AWS STS) to generate temporary security credentials to sign requests. Amazon Keyspaces supports two methods of authenticating client requests. The first method uses service-specific credentials, which are password based credentials generated for a specific IAM user. You can create and manage the password using the IAM console, the AWS CLI, or the AWS API. For more information, see Using IAM with Amazon Keyspaces. The second method uses an authentication plugin for the open-source DataStax Java Driver for Cassandra. This plugin enables IAM users, roles, and federated identities to add authentication information to Amazon Keyspaces (for Apache Cassandra) API requests using the AWS Signature Version 4 process (SigV4). For more information, see the section called “Create IAM credentials for AWS authentication”. Traffic between AWS resources in the same Region Interface VPC endpoints enable private communication between your virtual private cloud (VPC) running in Amazon VPC and Amazon Keyspaces. Interface VPC endpoints are powered by AWS PrivateLink, which is an AWS service that enables private communication between VPCs and AWS services. AWS PrivateLink enables this by using an elastic network interface with private IPs in your VPC so that network traffic does not leave the Amazon network. Interface VPC endpoints don't require an internet gateway, NAT device, VPN connection, or AWS Direct Connect connection. For more information, see Amazon Virtual Private Cloud and Interface VPC endpoints (AWS Internetwork traffic privacy 695 Amazon Keyspaces (for Apache Cassandra) Developer Guide PrivateLink). For example policies, see the section called “Using interface VPC endpoints for Amazon Keyspaces”. AWS Identity and Access Management for Amazon Keyspaces AWS Identity and Access Management (IAM) is an AWS service that helps an administrator securely control access to AWS resources. IAM administrators control who can be authenticated (signed in) and authorized (have permissions) to use Amazon Keyspaces resources. IAM is an AWS service that you can use with no additional charge. Topics • Audience • Authenticating with identities • Managing access using policies • How Amazon Keyspaces works with IAM • Amazon Keyspaces identity-based policy examples • AWS managed policies for Amazon Keyspaces • Troubleshooting Amazon Keyspaces identity and access • Using service-linked roles for Amazon Keyspaces Audience How you use AWS Identity and Access Management (IAM) differs, depending on the work that you do in Amazon Keyspaces. Service user – If you use the Amazon Keyspaces service to do your job, then your administrator provides you with the credentials and permissions that you need. As you use more Amazon Keyspaces features to do your work, you might need additional permissions. Understanding how access is managed can help you request the right permissions from your administrator. If you cannot access a feature in Amazon Keyspaces, see Troubleshooting Amazon Keyspaces identity and access. Service administrator – If you're
AmazonKeyspaces-216
AmazonKeyspaces.pdf
216
Audience How you use AWS Identity and Access Management (IAM) differs, depending on the work that you do in Amazon Keyspaces. Service user – If you use the Amazon Keyspaces service to do your job, then your administrator provides you with the credentials and permissions that you need. As you use more Amazon Keyspaces features to do your work, you might need additional permissions. Understanding how access is managed can help you request the right permissions from your administrator. If you cannot access a feature in Amazon Keyspaces, see Troubleshooting Amazon Keyspaces identity and access. Service administrator – If you're in charge of Amazon Keyspaces resources at your company, you probably have full access to Amazon Keyspaces. It's your job to determine which Amazon Keyspaces features and resources your service users should access. You must then submit requests to your IAM administrator to change the permissions of your service users. Review the information on this page AWS Identity and Access Management 696 Amazon Keyspaces (for Apache Cassandra) Developer Guide to understand the basic concepts of IAM. To learn more about how your company can use IAM with Amazon Keyspaces, see How Amazon Keyspaces works with IAM. IAM administrator – If you're an IAM administrator, you might want to learn details about how you can write policies to manage access to Amazon Keyspaces. To view example Amazon Keyspaces identity-based policies that you can use in IAM, see Amazon Keyspaces identity-based policy examples. Authenticating with identities Authentication is how you sign in to AWS using your identity credentials. You must be authenticated (signed in to AWS) as the AWS account root user, as an IAM user, or by assuming an IAM role. You can sign in to AWS as a federated identity by using credentials provided through an identity source. AWS IAM Identity Center (IAM Identity Center) users, your company's single sign-on authentication, and your Google or Facebook credentials are examples of federated identities. When you sign in as a federated identity, your administrator previously set up identity federation using IAM roles. When you access AWS by using federation, you are indirectly assuming a role. Depending on the type of user you are, you can sign in to the AWS Management Console or the AWS access portal. For more information about signing in to AWS, see How to sign in to your AWS account in the AWS Sign-In User Guide. If you access AWS programmatically, AWS provides a software development kit (SDK) and a command line interface (CLI) to cryptographically sign your requests by using your credentials. If you don't use AWS tools, you must sign requests yourself. For more information about using the recommended method to sign requests yourself, see AWS Signature Version 4 for API requests in the IAM User Guide. Regardless of the authentication method that you use, you might be required to provide additional security information. For example, AWS recommends that you use multi-factor authentication (MFA) to increase the security of your account. To learn more, see Multi-factor authentication in the AWS IAM Identity Center User Guide and AWS Multi-factor authentication in IAM in the IAM User Guide. AWS account root user When you create an AWS account, you begin with one sign-in identity that has complete access to all AWS services and resources in the account. This identity is called the AWS account root user and Authenticating with identities 697 Amazon Keyspaces (for Apache Cassandra) Developer Guide is accessed by signing in with the email address and password that you used to create the account. We strongly recommend that you don't use the root user for your everyday tasks. Safeguard your root user credentials and use them to perform the tasks that only the root user can perform. For the complete list of tasks that require you to sign in as the root user, see Tasks that require root user credentials in the IAM User Guide. IAM users and groups An IAM user is an identity within your AWS account that has specific permissions for a single person or application. Where possible, we recommend relying on temporary credentials instead of creating IAM users who have long-term credentials such as passwords and access keys. However, if you have specific use cases that require long-term credentials with IAM users, we recommend that you rotate access keys. For more information, see Rotate access keys regularly for use cases that require long- term credentials in the IAM User Guide. An IAM group is an identity that specifies a collection of IAM users. You can't sign in as a group. You can use groups to specify permissions for multiple users at a time. Groups make permissions easier to manage for large sets of users. For example, you could have a group named IAMAdmins and give that group permissions to administer IAM resources.
AmazonKeyspaces-217
AmazonKeyspaces.pdf
217
specific use cases that require long-term credentials with IAM users, we recommend that you rotate access keys. For more information, see Rotate access keys regularly for use cases that require long- term credentials in the IAM User Guide. An IAM group is an identity that specifies a collection of IAM users. You can't sign in as a group. You can use groups to specify permissions for multiple users at a time. Groups make permissions easier to manage for large sets of users. For example, you could have a group named IAMAdmins and give that group permissions to administer IAM resources. Users are different from roles. A user is uniquely associated with one person or application, but a role is intended to be assumable by anyone who needs it. Users have permanent long-term credentials, but roles provide temporary credentials. To learn more, see Use cases for IAM users in the IAM User Guide. IAM roles An IAM role is an identity within your AWS account that has specific permissions. It is similar to an IAM user, but is not associated with a specific person. To temporarily assume an IAM role in the AWS Management Console, you can switch from a user to an IAM role (console). You can assume a role by calling an AWS CLI or AWS API operation or by using a custom URL. For more information about methods for using roles, see Methods to assume a role in the IAM User Guide. IAM roles with temporary credentials are useful in the following situations: • Federated user access – To assign permissions to a federated identity, you create a role and define permissions for the role. When a federated identity authenticates, the identity is associated with the role and is granted the permissions that are defined by the role. For information about roles for federation, see Create a role for a third-party identity provider Authenticating with identities 698 Amazon Keyspaces (for Apache Cassandra) Developer Guide (federation) in the IAM User Guide. If you use IAM Identity Center, you configure a permission set. To control what your identities can access after they authenticate, IAM Identity Center correlates the permission set to a role in IAM. For information about permissions sets, see Permission sets in the AWS IAM Identity Center User Guide. • Temporary IAM user permissions – An IAM user or role can assume an IAM role to temporarily take on different permissions for a specific task. • Cross-account access – You can use an IAM role to allow someone (a trusted principal) in a different account to access resources in your account. Roles are the primary way to grant cross- account access. However, with some AWS services, you can attach a policy directly to a resource (instead of using a role as a proxy). To learn the difference between roles and resource-based policies for cross-account access, see Cross account resource access in IAM in the IAM User Guide. • Cross-service access – Some AWS services use features in other AWS services. For example, when you make a call in a service, it's common for that service to run applications in Amazon EC2 or store objects in Amazon S3. A service might do this using the calling principal's permissions, using a service role, or using a service-linked role. • Forward access sessions (FAS) – When you use an IAM user or role to perform actions in AWS, you are considered a principal. When you use some services, you might perform an action that then initiates another action in a different service. FAS uses the permissions of the principal calling an AWS service, combined with the requesting AWS service to make requests to downstream services. FAS requests are only made when a service receives a request that requires interactions with other AWS services or resources to complete. In this case, you must have permissions to perform both actions. For policy details when making FAS requests, see Forward access sessions. • Service role – A service role is an IAM role that a service assumes to perform actions on your behalf. An IAM administrator can create, modify, and delete a service role from within IAM. For more information, see Create a role to delegate permissions to an AWS service in the IAM User Guide. • Service-linked role – A service-linked role is a type of service role that is linked to an AWS service. The service can assume the role to perform an action on your behalf. Service-linked roles appear in your AWS account and are owned by the service. An IAM administrator can view, but not edit the permissions for service-linked roles. • Applications running on Amazon EC2 – You can use an IAM role to manage temporary credentials for applications that are running on an EC2 instance and making AWS
AmazonKeyspaces-218
AmazonKeyspaces.pdf
218
to delegate permissions to an AWS service in the IAM User Guide. • Service-linked role – A service-linked role is a type of service role that is linked to an AWS service. The service can assume the role to perform an action on your behalf. Service-linked roles appear in your AWS account and are owned by the service. An IAM administrator can view, but not edit the permissions for service-linked roles. • Applications running on Amazon EC2 – You can use an IAM role to manage temporary credentials for applications that are running on an EC2 instance and making AWS CLI or AWS API requests. This is preferable to storing access keys within the EC2 instance. To assign an AWS role to an EC2 instance and make it available to all of its applications, you create an instance profile Authenticating with identities 699 Amazon Keyspaces (for Apache Cassandra) Developer Guide that is attached to the instance. An instance profile contains the role and enables programs that are running on the EC2 instance to get temporary credentials. For more information, see Use an IAM role to grant permissions to applications running on Amazon EC2 instances in the IAM User Guide. Managing access using policies You control access in AWS by creating policies and attaching them to AWS identities or resources. A policy is an object in AWS that, when associated with an identity or resource, defines their permissions. AWS evaluates these policies when a principal (user, root user, or role session) makes a request. Permissions in the policies determine whether the request is allowed or denied. Most policies are stored in AWS as JSON documents. For more information about the structure and contents of JSON policy documents, see Overview of JSON policies in the IAM User Guide. Administrators can use AWS JSON policies to specify who has access to what. That is, which principal can perform actions on what resources, and under what conditions. By default, users and roles have no permissions. To grant users permission to perform actions on the resources that they need, an IAM administrator can create IAM policies. The administrator can then add the IAM policies to roles, and users can assume the roles. IAM policies define permissions for an action regardless of the method that you use to perform the operation. For example, suppose that you have a policy that allows the iam:GetRole action. A user with that policy can get role information from the AWS Management Console, the AWS CLI, or the AWS API. Identity-based policies Identity-based policies are JSON permissions policy documents that you can attach to an identity, such as an IAM user, group of users, or role. These policies control what actions users and roles can perform, on which resources, and under what conditions. To learn how to create an identity-based policy, see Define custom IAM permissions with customer managed policies in the IAM User Guide. Identity-based policies can be further categorized as inline policies or managed policies. Inline policies are embedded directly into a single user, group, or role. Managed policies are standalone policies that you can attach to multiple users, groups, and roles in your AWS account. Managed policies include AWS managed policies and customer managed policies. To learn how to choose between a managed policy or an inline policy, see Choose between managed policies and inline policies in the IAM User Guide. Managing access using policies 700 Amazon Keyspaces (for Apache Cassandra) Resource-based policies Developer Guide Resource-based policies are JSON policy documents that you attach to a resource. Examples of resource-based policies are IAM role trust policies and Amazon S3 bucket policies. In services that support resource-based policies, service administrators can use them to control access to a specific resource. For the resource where the policy is attached, the policy defines what actions a specified principal can perform on that resource and under what conditions. You must specify a principal in a resource-based policy. Principals can include accounts, users, roles, federated users, or AWS services. Resource-based policies are inline policies that are located in that service. You can't use AWS managed policies from IAM in a resource-based policy. Access control lists (ACLs) Access control lists (ACLs) control which principals (account members, users, or roles) have permissions to access a resource. ACLs are similar to resource-based policies, although they do not use the JSON policy document format. Amazon S3, AWS WAF, and Amazon VPC are examples of services that support ACLs. To learn more about ACLs, see Access control list (ACL) overview in the Amazon Simple Storage Service Developer Guide. Other policy types AWS supports additional, less-common policy types. These policy types can set the maximum permissions granted to you by the more common policy types. • Permissions boundaries – A permissions boundary is an advanced feature in which you
AmazonKeyspaces-219
AmazonKeyspaces.pdf
219
(account members, users, or roles) have permissions to access a resource. ACLs are similar to resource-based policies, although they do not use the JSON policy document format. Amazon S3, AWS WAF, and Amazon VPC are examples of services that support ACLs. To learn more about ACLs, see Access control list (ACL) overview in the Amazon Simple Storage Service Developer Guide. Other policy types AWS supports additional, less-common policy types. These policy types can set the maximum permissions granted to you by the more common policy types. • Permissions boundaries – A permissions boundary is an advanced feature in which you set the maximum permissions that an identity-based policy can grant to an IAM entity (IAM user or role). You can set a permissions boundary for an entity. The resulting permissions are the intersection of an entity's identity-based policies and its permissions boundaries. Resource-based policies that specify the user or role in the Principal field are not limited by the permissions boundary. An explicit deny in any of these policies overrides the allow. For more information about permissions boundaries, see Permissions boundaries for IAM entities in the IAM User Guide. • Service control policies (SCPs) – SCPs are JSON policies that specify the maximum permissions for an organization or organizational unit (OU) in AWS Organizations. AWS Organizations is a service for grouping and centrally managing multiple AWS accounts that your business owns. If Managing access using policies 701 Amazon Keyspaces (for Apache Cassandra) Developer Guide you enable all features in an organization, then you can apply service control policies (SCPs) to any or all of your accounts. The SCP limits permissions for entities in member accounts, including each AWS account root user. For more information about Organizations and SCPs, see Service control policies in the AWS Organizations User Guide. • Resource control policies (RCPs) – RCPs are JSON policies that you can use to set the maximum available permissions for resources in your accounts without updating the IAM policies attached to each resource that you own. The RCP limits permissions for resources in member accounts and can impact the effective permissions for identities, including the AWS account root user, regardless of whether they belong to your organization. For more information about Organizations and RCPs, including a list of AWS services that support RCPs, see Resource control policies (RCPs) in the AWS Organizations User Guide. • Session policies – Session policies are advanced policies that you pass as a parameter when you programmatically create a temporary session for a role or federated user. The resulting session's permissions are the intersection of the user or role's identity-based policies and the session policies. Permissions can also come from a resource-based policy. An explicit deny in any of these policies overrides the allow. For more information, see Session policies in the IAM User Guide. Multiple policy types When multiple types of policies apply to a request, the resulting permissions are more complicated to understand. To learn how AWS determines whether to allow a request when multiple policy types are involved, see Policy evaluation logic in the IAM User Guide. How Amazon Keyspaces works with IAM Before you use IAM to manage access to Amazon Keyspaces, you should understand what IAM features are available to use with Amazon Keyspaces. To get a high-level view of how Amazon Keyspaces and other AWS services work with IAM, see AWS services that work with IAM in the IAM User Guide. Topics • Amazon Keyspaces identity-based policies • Amazon Keyspaces resource-based policies • Authorization based on Amazon Keyspaces tags • Amazon Keyspaces IAM roles How Amazon Keyspaces works with IAM 702 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces identity-based policies With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon Keyspaces supports specific actions and resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON policy elements reference in the IAM User Guide. To see the Amazon Keyspaces service-specific resources and actions, and condition context keys that can be used for IAM permissions policies, see the Actions, resources, and condition keys for Amazon Keyspaces (for Apache Cassandra) in the Service Authorization Reference. Actions Administrators can use AWS JSON policies to specify who has access to what. That is, which principal can perform actions on what resources, and under what conditions. The Action element of a JSON policy describes the actions that you can use to allow or deny access in a policy. Policy actions usually have the same name as the associated AWS API operation. There are some exceptions, such as permission-only actions that don't have a matching API operation. There are also some operations that require multiple actions in a
AmazonKeyspaces-220
AmazonKeyspaces.pdf
220
for Amazon Keyspaces (for Apache Cassandra) in the Service Authorization Reference. Actions Administrators can use AWS JSON policies to specify who has access to what. That is, which principal can perform actions on what resources, and under what conditions. The Action element of a JSON policy describes the actions that you can use to allow or deny access in a policy. Policy actions usually have the same name as the associated AWS API operation. There are some exceptions, such as permission-only actions that don't have a matching API operation. There are also some operations that require multiple actions in a policy. These additional actions are called dependent actions. Include actions in a policy to grant permissions to perform the associated operation. Policy actions in Amazon Keyspaces use the following prefix before the action: cassandra:. For example, to grant someone permission to create an Amazon Keyspaces keyspace with the Amazon Keyspaces CREATE CQL statement, you include the cassandra:Create action in their policy. Policy statements must include either an Action or NotAction element. Amazon Keyspaces defines its own set of actions that describe tasks that you can perform with this service. To specify multiple actions in a single statement, separate them with commas as follows: "Action": [ "cassandra:CREATE", "cassandra:MODIFY" ] To see a list of Amazon Keyspaces actions, see Actions Defined by Amazon Keyspaces (for Apache Cassandra) in the Service Authorization Reference. How Amazon Keyspaces works with IAM 703 Amazon Keyspaces (for Apache Cassandra) Developer Guide Resources Administrators can use AWS JSON policies to specify who has access to what. That is, which principal can perform actions on what resources, and under what conditions. The Resource JSON policy element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. As a best practice, specify a resource using its Amazon Resource Name (ARN). You can do this for actions that support a specific resource type, known as resource-level permissions. For actions that don't support resource-level permissions, such as listing operations, use a wildcard (*) to indicate that the statement applies to all resources. "Resource": "*" In Amazon Keyspaces keyspaces and tables can be used in the Resource element of IAM permissions. The Amazon Keyspaces keyspace resource has the following ARN: arn:${Partition}:cassandra:${Region}:${Account}:/keyspace/${KeyspaceName}/ The Amazon Keyspaces table resource has the following ARN: arn:${Partition}:cassandra:${Region}:${Account}:/keyspace/${KeyspaceName}/table/ ${tableName} For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS service namespaces. For example, to specify the mykeyspace keyspace in your statement, use the following ARN: "Resource": "arn:aws:cassandra:us-east-1:123456789012:/keyspace/mykeyspace/" To specify all keyspaces that belong to a specific account, use the wildcard (*): "Resource": "arn:aws:cassandra:us-east-1:123456789012:/keyspace/*" How Amazon Keyspaces works with IAM 704 Amazon Keyspaces (for Apache Cassandra) Developer Guide Some Amazon Keyspaces actions, such as those for creating resources, cannot be performed on a specific resource. In those cases, you must use the wildcard (*). "Resource": "*" To connect to Amazon Keyspaces programmatically with a standard driver, a principal must have SELECT access to the system tables, because most drivers read the system keyspaces/ tables on connection. For example, to grant SELECT permissions to an IAM user for mytable in mykeyspace, the principal must have permissions to read both, mytable and the system keyspace. To specify multiple resources in a single statement, separate the ARNs with commas. "Resource": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" To see a list of Amazon Keyspaces resource types and their ARNs, see Resources Defined by Amazon Keyspaces (for Apache Cassandra) in the Service Authorization Reference. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Keyspaces (for Apache Cassandra). Condition keys Administrators can use AWS JSON policies to specify who has access to what. That is, which principal can perform actions on what resources, and under what conditions. The Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can create conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request. If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted. You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM policy elements: variables and tags in the IAM User Guide. How Amazon Keyspaces works with IAM 705 Amazon Keyspaces (for Apache Cassandra) Developer Guide AWS
AmazonKeyspaces-221
AmazonKeyspaces.pdf
221
using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted. You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM policy elements: variables and tags in the IAM User Guide. How Amazon Keyspaces works with IAM 705 Amazon Keyspaces (for Apache Cassandra) Developer Guide AWS supports global condition keys and service-specific condition keys. To see all AWS global condition keys, see AWS global condition context keys in the IAM User Guide. Amazon Keyspaces defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS global condition context keys in the IAM User Guide. All Amazon Keyspaces actions support the aws:RequestTag/${TagKey}, the aws:ResourceTag/${TagKey}, and the aws:TagKeys condition keys. For more information, see the section called “ Amazon Keyspaces resource access based on tags”. To see a list of Amazon Keyspaces condition keys, see Condition Keys for Amazon Keyspaces (for Apache Cassandra) in the Service Authorization Reference. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Keyspaces (for Apache Cassandra). Examples To view examples of Amazon Keyspaces identity-based policies, see Amazon Keyspaces identity- based policy examples. Amazon Keyspaces resource-based policies Amazon Keyspaces does not support resource-based policies. To view an example of a detailed resource-based policy page, see https://docs.aws.amazon.com/lambda/latest/dg/access-control- resource-based.html. Authorization based on Amazon Keyspaces tags You can manage access to your Amazon Keyspaces resources by using tags. To manage resource access based on tags, you provide tag information in the condition element of a policy using the cassandra:ResourceTag/key-name, aws:RequestTag/key-name, or aws:TagKeys condition keys. For more information about tagging Amazon Keyspaces resources, see the section called “Working with tags”. To view example identity-based policies for limiting access to a resource based on the tags on that resource, see Amazon Keyspaces resource access based on tags. Amazon Keyspaces IAM roles An IAM role is an entity within your AWS account that has specific permissions. How Amazon Keyspaces works with IAM 706 Amazon Keyspaces (for Apache Cassandra) Developer Guide Using temporary credentials with Amazon Keyspaces You can use temporary credentials to sign in with federation, to assume an IAM role, or to assume a cross-account role. You obtain temporary security credentials by calling AWS STS API operations such as AssumeRole or GetFederationToken. Amazon Keyspaces supports using temporary credentials with the AWS Signature Version 4 (SigV4) authentication plugin available from the Github repo for the following languages: • Java: https://github.com/aws/aws-sigv4-auth-cassandra-java-driver-plugin. • Node.js: https://github.com/aws/aws-sigv4-auth-cassandra-nodejs-driver-plugin. • Python: https://github.com/aws/aws-sigv4-auth-cassandra-python-driver-plugin. • Go: https://github.com/aws/aws-sigv4-auth-cassandra-gocql-driver-plugin. For examples and tutorials that implement the authentication plugin to access Amazon Keyspaces programmatically, see the section called “Using a Cassandra client driver”. Service-linked roles Service-linked roles allow AWS services to access resources in other services to complete an action on your behalf. Service-linked roles appear in your IAM account and are owned by the service. An IAM administrator can view but not edit the permissions for service-linked roles. For details about creating or managing Amazon Keyspaces service-linked roles, see the section called “Using service-linked roles”. Service roles Amazon Keyspaces does not support service roles. Amazon Keyspaces identity-based policy examples By default, IAM users and roles don't have permission to create or modify Amazon Keyspaces resources. They also can't perform tasks using the console, CQLSH, AWS CLI, or AWS API. An IAM administrator must create IAM policies that grant users and roles permission to perform specific API operations on the specified resources they need. The administrator must then attach those policies to the IAM users or groups that require those permissions. To learn how to create an IAM identity-based policy using these example JSON policy documents, see Creating policies on the JSON tab in the IAM User Guide. Identity-based policy examples 707 Amazon Keyspaces (for Apache Cassandra) Developer Guide Topics • Policy best practices • Using the Amazon Keyspaces console • Allow users to view their own permissions • Accessing Amazon Keyspaces tables • Amazon Keyspaces resource access based on tags Policy best practices Identity-based policies determine whether someone can create, access, or delete Amazon Keyspaces resources in your account. These actions can incur costs for your AWS account. When you create or edit identity-based policies, follow these guidelines and recommendations: • Get started with AWS managed policies and move toward least-privilege permissions – To get started granting permissions to your users and workloads, use the AWS managed policies that grant permissions for many common use cases. They are available in your AWS account. We recommend that you reduce
AmazonKeyspaces-222
AmazonKeyspaces.pdf
222
Accessing Amazon Keyspaces tables • Amazon Keyspaces resource access based on tags Policy best practices Identity-based policies determine whether someone can create, access, or delete Amazon Keyspaces resources in your account. These actions can incur costs for your AWS account. When you create or edit identity-based policies, follow these guidelines and recommendations: • Get started with AWS managed policies and move toward least-privilege permissions – To get started granting permissions to your users and workloads, use the AWS managed policies that grant permissions for many common use cases. They are available in your AWS account. We recommend that you reduce permissions further by defining AWS customer managed policies that are specific to your use cases. For more information, see AWS managed policies or AWS managed policies for job functions in the IAM User Guide. • Apply least-privilege permissions – When you set permissions with IAM policies, grant only the permissions required to perform a task. You do this by defining the actions that can be taken on specific resources under specific conditions, also known as least-privilege permissions. For more information about using IAM to apply permissions, see Policies and permissions in IAM in the IAM User Guide. • Use conditions in IAM policies to further restrict access – You can add a condition to your policies to limit access to actions and resources. For example, you can write a policy condition to specify that all requests must be sent using SSL. You can also use conditions to grant access to service actions if they are used through a specific AWS service, such as AWS CloudFormation. For more information, see IAM JSON policy elements: Condition in the IAM User Guide. • Use IAM Access Analyzer to validate your IAM policies to ensure secure and functional permissions – IAM Access Analyzer validates new and existing policies so that the policies adhere to the IAM policy language (JSON) and IAM best practices. IAM Access Analyzer provides more than 100 policy checks and actionable recommendations to help you author secure and functional policies. For more information, see Validate policies with IAM Access Analyzer in the IAM User Guide. Identity-based policy examples 708 Amazon Keyspaces (for Apache Cassandra) Developer Guide • Require multi-factor authentication (MFA) – If you have a scenario that requires IAM users or a root user in your AWS account, turn on MFA for additional security. To require MFA when API operations are called, add MFA conditions to your policies. For more information, see Secure API access with MFA in the IAM User Guide. For more information about best practices in IAM, see Security best practices in IAM in the IAM User Guide. Using the Amazon Keyspaces console Amazon Keyspaces doesn't require specific permissions to access the Amazon Keyspaces console. You need at least read-only permissions to list and view details about the Amazon Keyspaces resources in your AWS account. If you create an identity-based policy that is more restrictive than the minimum required permissions, the console won't function as intended for entities (IAM users or roles) with that policy. Two AWS managed policies are available to the entities for Amazon Keyspaces console access. • AmazonKeyspacesReadOnlyAccess_v2 – This policy grants read-only access to Amazon Keyspaces. • AmazonKeyspacesFullAccess – This policy grants permissions to use Amazon Keyspaces with full access to all features. For more information about Amazon Keyspaces managed policies, see the section called “AWS managed policies”. Allow users to view their own permissions This example shows how you might create a policy that allows IAM users to view the inline and managed policies that are attached to their user identity. This policy includes permissions to complete this action on the console or programmatically using the AWS CLI or AWS API. { "Version": "2012-10-17", "Statement": [ { "Sid": "ViewOwnUserInfo", "Effect": "Allow", "Action": [ Identity-based policy examples 709 Amazon Keyspaces (for Apache Cassandra) Developer Guide "iam:GetUserPolicy", "iam:ListGroupsForUser", "iam:ListAttachedUserPolicies", "iam:ListUserPolicies", "iam:GetUser" ], "Resource": ["arn:aws:iam::*:user/${aws:username}"] }, { "Sid": "NavigateInConsole", "Effect": "Allow", "Action": [ "iam:GetGroupPolicy", "iam:GetPolicyVersion", "iam:GetPolicy", "iam:ListAttachedGroupPolicies", "iam:ListGroupPolicies", "iam:ListPolicyVersions", "iam:ListPolicies", "iam:ListUsers" ], "Resource": "*" } ] } Accessing Amazon Keyspaces tables The following is a sample policy that grants read-only (SELECT) access to the Amazon Keyspaces system tables. For all samples, replace the Region and account ID in the Amazon Resource Name (ARN) with your own. Note To connect with a standard driver, a user must have at least SELECT access to the system tables, because most drivers read the system keyspaces/tables on connection. { "Version":"2012-10-17", "Statement":[ { Identity-based policy examples 710 Amazon Keyspaces (for Apache Cassandra) Developer Guide "Effect":"Allow", "Action":[ "cassandra:Select" ], "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } The following sample policy adds read-only access to the user table mytable in the keyspace mykeyspace. { "Version":"2012-10-17", "Statement":[ { "Effect":"Allow", "Action":[ "cassandra:Select" ], "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } The following sample policy
AmazonKeyspaces-223
AmazonKeyspaces.pdf
223
account ID in the Amazon Resource Name (ARN) with your own. Note To connect with a standard driver, a user must have at least SELECT access to the system tables, because most drivers read the system keyspaces/tables on connection. { "Version":"2012-10-17", "Statement":[ { Identity-based policy examples 710 Amazon Keyspaces (for Apache Cassandra) Developer Guide "Effect":"Allow", "Action":[ "cassandra:Select" ], "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } The following sample policy adds read-only access to the user table mytable in the keyspace mykeyspace. { "Version":"2012-10-17", "Statement":[ { "Effect":"Allow", "Action":[ "cassandra:Select" ], "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } The following sample policy assigns read/write access to a user table and read access to the system tables. Note System tables are always read-only. { "Version":"2012-10-17", Identity-based policy examples 711 Amazon Keyspaces (for Apache Cassandra) Developer Guide "Statement":[ { "Effect":"Allow", "Action":[ "cassandra:Select", "cassandra:Modify" ], "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/ mytable", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } The following sample policy allows a user to create tables in keyspace mykeyspace. { "Version":"2012-10-17", "Statement":[ { "Effect":"Allow", "Action":[ "cassandra:Create", "cassandra:Select" ], "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/*", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } Amazon Keyspaces resource access based on tags You can use conditions in your identity-based policy to control access to Amazon Keyspaces resources based on tags. These policies control visibility of the keyspaces and tables in the account. Note that tag-based permissions for system tables behave differently when requests are made using the AWS SDK compared to Cassandra Query Language (CQL) API calls via Cassandra drivers and developer tools. Identity-based policy examples 712 Amazon Keyspaces (for Apache Cassandra) Developer Guide • To make List and Get resource requests with the AWS SDK when using tag-based access, the caller needs to have read access to system tables. For example, Select action permissions are required to read data from system tables via the GetTable operation. If the caller has only tag- based access to a specific table, an operation that requires additional access to a system table will fail. • For compatibility with established Cassandra driver behavior, tag-based authorization policies are not enforced when performing operations on system tables using Cassandra Query Language (CQL) API calls via Cassandra drivers and developer tools. The following example shows how you can create a policy that grants permissions to a user to view a table if the table's Owner contains the value of that user's user name. In this example you also give read access to the system tables. { "Version":"2012-10-17", "Statement":[ { "Sid":"ReadOnlyAccessTaggedTables", "Effect":"Allow", "Action":"cassandra:Select", "Resource":[ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/table/*", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ], "Condition":{ "StringEquals":{ "aws:ResourceTag/Owner":"${aws:username}" } } } ] } You can attach this policy to the IAM users in your account. If a user named richard-roe attempts to view an Amazon Keyspaces table, the table must be tagged Owner=richard-roe or owner=richard-roe. Otherwise, he is denied access. The condition tag key Owner matches both Owner and owner because condition key names are not case-sensitive. For more information, see IAM JSON policy elements: Condition in the IAM User Guide. Identity-based policy examples 713 Amazon Keyspaces (for Apache Cassandra) Developer Guide The following policy grants permissions to a user to create tables with tags if the table's Owner contains the value of that user's user name. { "Version": "2012-10-17", "Statement": [ { "Sid": "CreateTagTableUser", "Effect": "Allow", "Action": [ "cassandra:Create", "cassandra:TagResource" ], "Resource": "arn:aws:cassandra:us-east-1:111122223333:/keyspace/mykeyspace/ table/*", "Condition":{ "StringEquals":{ "aws:RequestTag/Owner":"${aws:username}" } } } ] } AWS managed policies for Amazon Keyspaces An AWS managed policy is a standalone policy that is created and administered by AWS. AWS managed policies are designed to provide permissions for many common use cases so that you can start assigning permissions to users, groups, and roles. Keep in mind that AWS managed policies might not grant least-privilege permissions for your specific use cases because they're available for all AWS customers to use. We recommend that you reduce permissions further by defining customer managed policies that are specific to your use cases. You cannot change the permissions defined in AWS managed policies. If AWS updates the permissions defined in an AWS managed policy, the update affects all principal identities (users, groups, and roles) that the policy is attached to. AWS is most likely to update an AWS managed policy when a new AWS service is launched or new API operations become available for existing services. AWS managed policies 714 Amazon Keyspaces (for Apache Cassandra) Developer Guide For more information, see AWS managed policies in the IAM User Guide. AWS managed policy: AmazonKeyspacesReadOnlyAccess_v2 You can attach the AmazonKeyspacesReadOnlyAccess_v2 policy to your IAM identities. This policy grants read-only access to Amazon Keyspaces and includes the required permissions when connecting through private VPC endpoints. Permissions details This policy includes the following permissions. • Amazon Keyspaces – Provides read-only access to Amazon Keyspaces. • Application Auto Scaling – Allows principals to view configurations from Application Auto Scaling. This is required so that users can view automatic
AmazonKeyspaces-224
AmazonKeyspaces.pdf
224
for existing services. AWS managed policies 714 Amazon Keyspaces (for Apache Cassandra) Developer Guide For more information, see AWS managed policies in the IAM User Guide. AWS managed policy: AmazonKeyspacesReadOnlyAccess_v2 You can attach the AmazonKeyspacesReadOnlyAccess_v2 policy to your IAM identities. This policy grants read-only access to Amazon Keyspaces and includes the required permissions when connecting through private VPC endpoints. Permissions details This policy includes the following permissions. • Amazon Keyspaces – Provides read-only access to Amazon Keyspaces. • Application Auto Scaling – Allows principals to view configurations from Application Auto Scaling. This is required so that users can view automatic scaling policies that are attached to a table. • CloudWatch – Allows principals to view metric data and alarms configured in CloudWatch. This is required so users can view the billable table size and CloudWatch alarms that have been configured for a table. • AWS KMS – Allows principals to view keys configured in AWS KMS. This is required so users can view AWS KMS keys that they create and manage in their account to confirm that the key assigned to Amazon Keyspaces is a symmetric encryption key that is enabled. • Amazon EC2 – Allows principals connecting to Amazon Keyspaces through VPC endpoints to query the VPC on your Amazon EC2 instance for endpoint and network interface information. This read-only access to the Amazon EC2 instance is required so Amazon Keyspaces can look up and store available interface VPC endpoints in the system.peers table used for connection load balancing. To review the policy in JSON format, see AmazonKeyspacesReadOnlyAccess_v2. AWS managed policies 715 Amazon Keyspaces (for Apache Cassandra) Developer Guide AWS managed policy: AmazonKeyspacesReadOnlyAccess You can attach the AmazonKeyspacesReadOnlyAccess policy to your IAM identities. This policy grants read-only access to Amazon Keyspaces. Permissions details This policy includes the following permissions. • Amazon Keyspaces – Provides read-only access to Amazon Keyspaces. • Application Auto Scaling – Allows principals to view configurations from Application Auto Scaling. This is required so that users can view automatic scaling policies that are attached to a table. • CloudWatch – Allows principals to view metric data and alarms configured in CloudWatch. This is required so users can view the billable table size and CloudWatch alarms that have been configured for a table. • AWS KMS – Allows principals to view keys configured in AWS KMS. This is required so users can view AWS KMS keys that they create and manage in their account to confirm that the key assigned to Amazon Keyspaces is a symmetric encryption key that is enabled. To review the policy in JSON format, see AmazonKeyspacesReadOnlyAccess. AWS managed policy: AmazonKeyspacesFullAccess You can attach the AmazonKeyspacesFullAccess policy to your IAM identities. This policy grants administrative permissions that allow your administrators unrestricted access to Amazon Keyspaces. AWS managed policies 716 Amazon Keyspaces (for Apache Cassandra) Developer Guide Permissions details This policy includes the following permissions. • Amazon Keyspaces – Allows principals to access any Amazon Keyspaces resource and perform all actions. • Application Auto Scaling – Allows principals to create, view, and delete automatic scaling policies for Amazon Keyspaces tables. This is required so that administrators can manage automatic scaling policies for Amazon Keyspaces tables. • CloudWatch – Allows principals to see the billable table size as well as create, view, and delete CloudWatch alarms for Amazon Keyspaces automatic scaling policies. This is required so that administrators can view the billable table size and create a CloudWatch dashboard. • IAM – Allows Amazon Keyspaces to create service-linked roles with IAM automatically when the following features are turned on: • Application Auto Scaling – When an administrator enables Application Auto Scaling for a table, Amazon Keyspaces creates the service-linked role AWSServiceRoleForApplicationAutoScaling_CassandraTable to perform automatic scaling actions on your behalf. • Amazon Keyspaces multi-Region replication – When an administrator creates a new multi-Region keyspace, or adds a new AWS Region to an existing single-Region keyspace, Amazon Keyspaces creates the service-linked role AWSServiceRoleForAmazonKeyspacesReplication to perform replication of tables, data, and metadata to the selected Regions on your behalf. • AWS KMS – Allows principals to view keys configured in AWS KMS. This is required so that users can view AWS KMS keys that they create and manage in their account to confirm that the key assigned to Amazon Keyspaces is a symmetric encryption key that is enabled. • Amazon EC2 – Allows principals connecting to Amazon Keyspaces through VPC endpoints to query the VPC on your Amazon EC2 instance for endpoint and network interface information. This read-only access to the Amazon EC2 instance is required so Amazon Keyspaces can look up and store available interface VPC endpoints in the system.peers table used for connection load balancing. To review the policy in JSON format, see AmazonKeyspacesFullAccess. AWS managed policies 717 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces
AmazonKeyspaces-225
AmazonKeyspaces.pdf
225
their account to confirm that the key assigned to Amazon Keyspaces is a symmetric encryption key that is enabled. • Amazon EC2 – Allows principals connecting to Amazon Keyspaces through VPC endpoints to query the VPC on your Amazon EC2 instance for endpoint and network interface information. This read-only access to the Amazon EC2 instance is required so Amazon Keyspaces can look up and store available interface VPC endpoints in the system.peers table used for connection load balancing. To review the policy in JSON format, see AmazonKeyspacesFullAccess. AWS managed policies 717 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces updates to AWS managed policies View details about updates to AWS managed policies for Amazon Keyspaces since this service began tracking these changes. For automatic alerts about changes to this page, subscribe to the RSS feed on the Document history page. Change Description Date AmazonKeyspacesFullAccess – Update to an existing policy AmazonKeyspacesFullAccess – Update to an existing policy Amazon Keyspaces updated November 19, 2024 the KeyspacesReplicati onServiceRolePolic y of the service linked role AWSServiceRoleForA mazonKeyspacesReplication to add the permissions that are required when an administrator adds a new AWS Region to a single or multi-Region keyspace. Amazon Keyspaces uses the service-linked role AWSServiceRoleForA mazonKeyspacesRepl ication to replicate tables, their settings, and data on your behalf. For more information, see the section called “Multi-Region Replicati on”. Amazon Keyspaces added new permissions to allow Amazon Keyspaces to create a service-linked role when an administrator adds a new October 3, 2023 AWS managed policies 718 Amazon Keyspaces (for Apache Cassandra) Developer Guide Change Description Date Region to a single or multi- Region keyspace. Amazon Keyspaces uses the service-linked role to perform data replication tasks on your behalf. For more information, see the section called “Multi- Region Replication”. AmazonKeyspacesRea dOnlyAccess_v2 – New policy Amazon Keyspaces created a new policy to add read- September 12, 2023 only permissions for clients connecting to Amazon Keyspaces through interface VPC endpoints to access the Amazon EC2 instance to look up network information. Amazon Keyspaces stores available interface VPC endpoints in the system.pe ers table for connectio n load balancing. For more information, see the section called “Using interface VPC endpoints”. AWS managed policies 719 Amazon Keyspaces (for Apache Cassandra) Developer Guide Change Description Date AmazonKeyspacesFullAccess – Update to an existing policy Amazon Keyspaces added new permissions to allow June 5, 2023 Amazon Keyspaces to create a service-linked role when an administrator creates a multi- Region keyspace. Amazon Keyspaces uses the service-linked role AWSServiceRoleForA mazonKeyspacesRepl ication to perform data replication tasks on your behalf. For more information, see the section called “Multi- Region Replication”. AmazonKeyspacesRea dOnlyAccess – Update to an Amazon Keyspaces added new permissions to allow July 7, 2022 existing policy users to view the billable size of a table using CloudWatch. Amazon Keyspaces integrate s with Amazon CloudWatch to allow you to monitor the billable table size. For more information, see the section called “Amazon Keyspaces metrics and dimensions”. AWS managed policies 720 Amazon Keyspaces (for Apache Cassandra) Developer Guide Change Description Date AmazonKeyspacesFullAccess – Update to an existing policy Amazon Keyspaces added new permissions to allow July 7, 2022 users to view the billable size of a table using CloudWatch. Amazon Keyspaces integrate s with Amazon CloudWatch to allow you to monitor the billable table size. For more information, see the section called “Amazon Keyspaces metrics and dimensions”. AmazonKeyspacesRea dOnlyAccess – Update to an Amazon Keyspaces added new permissions to allow June 1, 2021 existing policy users to view AWS KMS keys that have been configured for Amazon Keyspaces encryption at rest. Amazon Keyspaces encryptio n at rest integrates with AWS KMS for protecting and managing the encryption keys used to encrypt data at rest. To view the AWS KMS key configured for Amazon Keyspaces, read-only permissions have been added. AWS managed policies 721 Amazon Keyspaces (for Apache Cassandra) Developer Guide Change Description Date AmazonKeyspacesFullAccess – Update to an existing policy Amazon Keyspaces added new permissions to allow June 1, 2021 users to view AWS KMS keys that have been configured for Amazon Keyspaces encryption at rest. Amazon Keyspaces encryptio n at rest integrates with AWS KMS for protecting and managing the encryption keys used to encrypt data at rest. To view the AWS KMS key configured for Amazon Keyspaces, read-only permissions have been added. Amazon Keyspaces started tracking changes Amazon Keyspaces started tracking changes for its AWS June 1, 2021 managed policies. Troubleshooting Amazon Keyspaces identity and access Use the following information to help you diagnose and fix common issues that you might encounter when working with Amazon Keyspaces and IAM. Topics • I'm not authorized to perform an action in Amazon Keyspaces • I modified an IAM user or role and the changes did not take effect immediately • I can't restore a table
AmazonKeyspaces-226
AmazonKeyspaces.pdf
226
encrypt data at rest. To view the AWS KMS key configured for Amazon Keyspaces, read-only permissions have been added. Amazon Keyspaces started tracking changes Amazon Keyspaces started tracking changes for its AWS June 1, 2021 managed policies. Troubleshooting Amazon Keyspaces identity and access Use the following information to help you diagnose and fix common issues that you might encounter when working with Amazon Keyspaces and IAM. Topics • I'm not authorized to perform an action in Amazon Keyspaces • I modified an IAM user or role and the changes did not take effect immediately • I can't restore a table using Amazon Keyspaces point-in-time recovery (PITR) • I'm not authorized to perform iam:PassRole • I'm an administrator and want to allow others to access Amazon Keyspaces • I want to allow people outside of my AWS account to access my Amazon Keyspaces resources Troubleshooting 722 Amazon Keyspaces (for Apache Cassandra) Developer Guide I'm not authorized to perform an action in Amazon Keyspaces If the AWS Management Console tells you that you're not authorized to perform an action, then you must contact your administrator for assistance. Your administrator is the person that provided you with your user name and password. The following example error occurs when the mateojackson IAM user tries to use the console to view details about a table but does not have cassandra:Select permissions for the table. User: arn:aws:iam::123456789012:user/mateojackson is not authorized to perform: cassandra:Select on resource: mytable In this case, Mateo asks his administrator to update his policies to allow him to access the mytable resource using the cassandra:Select action. I modified an IAM user or role and the changes did not take effect immediately IAM policy changes may take up to 10 minutes to take effect for applications with existing, established connections to Amazon Keyspaces. IAM policy changes take effect immediately when applications establish a new connection. If you have made modifications to an existing IAM user or role, and it has not taken immediate effect, either wait for 10 minutes or disconnect and reconnect to Amazon Keyspaces. I can't restore a table using Amazon Keyspaces point-in-time recovery (PITR) If you are trying to restore an Amazon Keyspaces table with point-in-time recovery (PITR), and you see the restore process begin, but not complete successfully, you might not have configured all of the required permissions that are needed by the restore process. You must contact your administrator for assistance and ask that person to update your policies to allow you to restore a table in Amazon Keyspaces. In addition to user permissions, Amazon Keyspaces may require permissions to perform actions during the restore process on your principal's behalf. This is the case if the table is encrypted with a customer-managed key, or if you are using IAM policies that restrict incoming traffic. For example, if you are using condition keys in your IAM policy to restrict source traffic to specific endpoints or IP ranges, the restore operation fails. To allow Amazon Keyspaces to perform the table restore operation on your principal's behalf, you must add an aws:ViaAWSService global condition key in the IAM policy. Troubleshooting 723 Amazon Keyspaces (for Apache Cassandra) Developer Guide For more information about permissions to restore tables, see the section called “Configure IAM permissions for restore”. I'm not authorized to perform iam:PassRole If you receive an error that you're not authorized to perform the iam:PassRole action, your policies must be updated to allow you to pass a role to Amazon Keyspaces. Some AWS services allow you to pass an existing role to that service instead of creating a new service role or service-linked role. To do this, you must have permissions to pass the role to the service. The following example error occurs when an IAM user named marymajor tries to use the console to perform an action in Amazon Keyspaces. However, the action requires the service to have permissions that are granted by a service role. Mary does not have permissions to pass the role to the service. User: arn:aws:iam::123456789012:user/marymajor is not authorized to perform: iam:PassRole In this case, Mary's policies must be updated to allow her to perform the iam:PassRole action. If you need help, contact your AWS administrator. Your administrator is the person who provided you with your sign-in credentials. I'm an administrator and want to allow others to access Amazon Keyspaces To allow others to access Amazon Keyspaces, you must grant permission to the people or applications that need access. If you are using AWS IAM Identity Center to manage people and applications, you assign permission sets to users or groups to define their level of access. Permission sets automatically create and assign IAM policies to IAM roles that are associated with the person or application. For more information, see Permission sets in the AWS IAM Identity Center User
AmazonKeyspaces-227
AmazonKeyspaces.pdf
227
is the person who provided you with your sign-in credentials. I'm an administrator and want to allow others to access Amazon Keyspaces To allow others to access Amazon Keyspaces, you must grant permission to the people or applications that need access. If you are using AWS IAM Identity Center to manage people and applications, you assign permission sets to users or groups to define their level of access. Permission sets automatically create and assign IAM policies to IAM roles that are associated with the person or application. For more information, see Permission sets in the AWS IAM Identity Center User Guide. If you are not using IAM Identity Center, you must create IAM entities (users or roles) for the people or applications that need access. You must then attach a policy to the entity that grants them the correct permissions in Amazon Keyspaces. After the permissions are granted, provide the credentials to the user or application developer. They will use those credentials to access AWS. To learn more about creating IAM users, groups, policies, and permissions, see IAM Identities and Policies and permissions in IAM in the IAM User Guide. Troubleshooting 724 Amazon Keyspaces (for Apache Cassandra) Developer Guide I want to allow people outside of my AWS account to access my Amazon Keyspaces resources You can create a role that users in other accounts or people outside of your organization can use to access your resources. You can specify who is trusted to assume the role. For services that support resource-based policies or access control lists (ACLs), you can use those policies to grant people access to your resources. To learn more, consult the following: • To learn whether Amazon Keyspaces supports these features, see How Amazon Keyspaces works with IAM. • To learn how to provide access to your resources across AWS accounts that you own, see Providing access to an IAM user in another AWS account that you own in the IAM User Guide. • To learn how to provide access to your resources to third-party AWS accounts, see Providing access to AWS accounts owned by third parties in the IAM User Guide. • To learn how to provide access through identity federation, see Providing access to externally authenticated users (identity federation) in the IAM User Guide. • To learn the difference between using roles and resource-based policies for cross-account access, see Cross account resource access in IAM in the IAM User Guide. Using service-linked roles for Amazon Keyspaces Amazon Keyspaces (for Apache Cassandra) uses AWS Identity and Access Management (IAM) service-linked roles. A service-linked role is a unique type of IAM role that is linked directly to Amazon Keyspaces. Service-linked roles are predefined by Amazon Keyspaces and include all the permissions that the service requires to call other AWS services on your behalf. For information about other services that support service-linked roles, see AWS services that work with IAM and look for the services that have Yes in the Service-linked roles column. Choose a Yes with a link to view the service-linked role documentation for that service. Topics • Using roles for Amazon Keyspaces application auto scaling • Using roles for Amazon Keyspaces Multi-Region Replication Using service-linked roles 725 Amazon Keyspaces (for Apache Cassandra) Developer Guide Using roles for Amazon Keyspaces application auto scaling Amazon Keyspaces (for Apache Cassandra) uses AWS Identity and Access Management (IAM) service-linked roles. A service-linked role is a unique type of IAM role that is linked directly to Amazon Keyspaces. Service-linked roles are predefined by Amazon Keyspaces and include all the permissions that the service requires to call other AWS services on your behalf. A service-linked role makes setting up Amazon Keyspaces easier because you don’t have to manually add the necessary permissions. Amazon Keyspaces defines the permissions of its service- linked roles, and unless defined otherwise, only Amazon Keyspaces can assume its roles. The defined permissions include the trust policy and the permissions policy, and that permissions policy cannot be attached to any other IAM entity. You can delete a service-linked role only after first deleting its related resources. This protects your Amazon Keyspaces resources because you can't inadvertently remove permission to access the resources. Service-linked role permissions for Amazon Keyspaces Amazon Keyspaces uses the service-linked role named AWSServiceRoleForApplicationAutoScaling_CassandraTable to allow Application Auto Scaling to call Amazon Keyspaces and Amazon CloudWatch on your behalf. The AWSServiceRoleForApplicationAutoScaling_CassandraTable service-linked role trusts the following services to assume the role: • cassandra.application-autoscaling.amazonaws.com The role permissions policy allows Application Auto Scaling to complete the following actions on the specified Amazon Keyspaces resources: • Action: cassandra:Select on arn:*:cassandra:*:*:/keyspace/system/table/* • Action: cassandra:Select on the resource arn:*:cassandra:*:*:/keyspace/ system_schema/table/* • Action: cassandra:Select on the resource arn:*:cassandra:*:*:/keyspace/ system_schema_mcs/table/* • Action: cassandra:Alter on the resource arn:*:cassandra:*:*:"*" Using service-linked roles 726 Amazon Keyspaces (for Apache Cassandra) Developer Guide Creating a service-linked role
AmazonKeyspaces-228
AmazonKeyspaces.pdf
228
Keyspaces Amazon Keyspaces uses the service-linked role named AWSServiceRoleForApplicationAutoScaling_CassandraTable to allow Application Auto Scaling to call Amazon Keyspaces and Amazon CloudWatch on your behalf. The AWSServiceRoleForApplicationAutoScaling_CassandraTable service-linked role trusts the following services to assume the role: • cassandra.application-autoscaling.amazonaws.com The role permissions policy allows Application Auto Scaling to complete the following actions on the specified Amazon Keyspaces resources: • Action: cassandra:Select on arn:*:cassandra:*:*:/keyspace/system/table/* • Action: cassandra:Select on the resource arn:*:cassandra:*:*:/keyspace/ system_schema/table/* • Action: cassandra:Select on the resource arn:*:cassandra:*:*:/keyspace/ system_schema_mcs/table/* • Action: cassandra:Alter on the resource arn:*:cassandra:*:*:"*" Using service-linked roles 726 Amazon Keyspaces (for Apache Cassandra) Developer Guide Creating a service-linked role for Amazon Keyspaces You don't need to manually create a service-linked role for Amazon Keyspaces automatic scaling. When you enable Amazon Keyspaces auto scaling on a table with the AWS Management Console, CQL, the AWS CLI, or the AWS API, Application Auto Scaling creates the service-linked role for you. If you delete this service-linked role, and then need to create it again, you can use the same process to recreate the role in your account. When you enable Amazon Keyspaces auto scaling for a table, Application Auto Scaling creates the service-linked role for you again. Important This service-linked role can appear in your account if you completed an action in another service that uses the features supported by this role. To learn more, see A new role appeared in my AWS account. If you delete this service-linked role, and then need to create it again, you can use the same process to recreate the role in your account. When you enable Amazon Keyspaces automatic application scaling for a table, Application Auto Scaling creates the service-linked role for you again. Editing a service-linked role for Amazon Keyspaces Amazon Keyspaces does not allow you to edit the AWSServiceRoleForApplicationAutoScaling_CassandraTable service-linked role. After you create a service-linked role, you cannot change the name of the role because various entities might reference the role. However, you can edit the description of the role using IAM. For more information, see Editing a service-linked role in the IAM User Guide. Deleting a service-linked role for Amazon Keyspaces If you no longer need to use a feature or service that requires a service-linked role, we recommend that you delete that role. That way you don’t have an unused entity that isn't actively monitored or maintained. However, you must first disable automatic scaling on all tables in the account across all AWS Regions before you can delete the service-linked role manually. To disable automatic scaling on Amazon Keyspaces tables, see the section called “Turn off Amazon Keyspaces auto scaling for a table”. Using service-linked roles 727 Amazon Keyspaces (for Apache Cassandra) Developer Guide Note If Amazon Keyspaces automatic scaling is using the role when you try to modify the resources, then the deregistration might fail. If that happens, wait for a few minutes and try the operation again. To manually delete the service-linked role using IAM Use the IAM console, the AWS CLI, or the AWS API to delete the AWSServiceRoleForApplicationAutoScaling_CassandraTable service-linked role. For more information, see Deleting a Service-Linked Role in the IAM User Guide. Note To delete the service-linked role used by Amazon Keyspaces automatic scaling, you must first disable automatic scaling on all tables in the account. Supported Regions for Amazon Keyspaces service-linked roles Amazon Keyspaces supports using service-linked roles in all of the Regions where the service is available. For more information, see Service endpoints for Amazon Keyspaces. Using roles for Amazon Keyspaces Multi-Region Replication Amazon Keyspaces (for Apache Cassandra) uses AWS Identity and Access Management (IAM) service-linked roles. A service-linked role is a unique type of IAM role that is linked directly to Amazon Keyspaces. Service-linked roles are predefined by Amazon Keyspaces and include all the permissions that the service requires to call other AWS services on your behalf. A service-linked role makes setting up Amazon Keyspaces easier because you don’t have to manually add the necessary permissions. Amazon Keyspaces defines the permissions of its service- linked roles, and unless defined otherwise, only Amazon Keyspaces can assume its roles. The defined permissions include the trust policy and the permissions policy, and that permissions policy cannot be attached to any other IAM entity. You can delete a service-linked role only after first deleting its related resources. This protects your Amazon Keyspaces resources because you can't inadvertently remove permission to access the resources. Using service-linked roles 728 Amazon Keyspaces (for Apache Cassandra) Developer Guide Service-linked role permissions for Amazon Keyspaces Amazon Keyspaces uses the service-linked role named AWSServiceRoleForAmazonKeyspacesReplication to allow Amazon Keyspaces to add new AWS Regions to a keyspace on your behalf, and replicate tables and all their data and settings to the new Region. The role also allows Amazon Keyspaces to replicate writes to tables in all Regions on your behalf. The AWSServiceRoleForAmazonKeyspacesReplication service-linked
AmazonKeyspaces-229
AmazonKeyspaces.pdf
229
delete a service-linked role only after first deleting its related resources. This protects your Amazon Keyspaces resources because you can't inadvertently remove permission to access the resources. Using service-linked roles 728 Amazon Keyspaces (for Apache Cassandra) Developer Guide Service-linked role permissions for Amazon Keyspaces Amazon Keyspaces uses the service-linked role named AWSServiceRoleForAmazonKeyspacesReplication to allow Amazon Keyspaces to add new AWS Regions to a keyspace on your behalf, and replicate tables and all their data and settings to the new Region. The role also allows Amazon Keyspaces to replicate writes to tables in all Regions on your behalf. The AWSServiceRoleForAmazonKeyspacesReplication service-linked role trusts the following services to assume the role: • replication.cassandra.amazonaws.com The role permissions policy named KeyspacesReplicationServiceRolePolicy allows Amazon Keyspaces to complete the following actions: • Action: cassandra:Select • Action: cassandra:SelectMultiRegionResource • Action: cassandra:Modify • Action: cassandra:ModifyMultiRegionResource • Action: cassandra:AlterMultiRegionResource • Action: application-autoscaling:RegisterScalableTarget – Amazon Keyspaces uses the application auto scaling permissions when you add a replica to a single Region table in provisioned mode with auto scaling enabled. • Action: application-autoscaling:DeregisterScalableTarget • Action: application-autoscaling:DescribeScalableTargets • Action: application-autoscaling:PutScalingPolicy • Action: application-autoscaling:DescribeScalingPolicies • Action: cassandra:Alter • Action: cloudwatch:DeleteAlarms • Action: cloudwatch:DescribeAlarms • Action: cloudwatch:PutMetricAlarm Although the Amazon Keyspaces service-linked role AWSServiceRoleForAmazonKeyspacesReplication provides the permissions: "Action:" for the Using service-linked roles 729 Amazon Keyspaces (for Apache Cassandra) Developer Guide specified Amazon Resource Name (ARN) "arn:*" in the policy, Amazon Keyspaces supplies the ARN of your account. Permissions to create the service-linked role AWSServiceRoleForAmazonKeyspacesReplication are included in the AmazonKeyspacesFullAccess managed policy. For more information, see the section called “AmazonKeyspacesFullAccess”. You must configure permissions to allow your users, groups, or roles to create, edit, or delete a service-linked role. For more information, see Service-linked role permissions in the IAM User Guide. Creating a service-linked role for Amazon Keyspaces You can't manually create a service-linked role. When you create a multi-Region keyspace in the AWS Management Console, the AWS CLI, or the AWS API, Amazon Keyspaces creates the service- linked role for you. If you delete this service-linked role, and then need to create it again, you can use the same process to recreate the role in your account. When you create a multi-Region keyspace, Amazon Keyspaces creates the service-linked role for you again. Editing a service-linked role for Amazon Keyspaces Amazon Keyspaces does not allow you to edit the AWSServiceRoleForAmazonKeyspacesReplication service-linked role. After you create a service-linked role, you cannot change the name of the role because various entities might reference the role. However, you can edit the description of the role using IAM. For more information, see Editing a service-linked role in the IAM User Guide. Deleting a service-linked role for Amazon Keyspaces If you no longer need to use a feature or service that requires a service-linked role, we recommend that you delete that role. That way you don’t have an unused entity that is not actively monitored or maintained. However, you must first delete all multi-Region keyspaces in the account across all AWS Regions before you can delete the service-linked role manually. Cleaning up a service-linked role Before you can use IAM to delete a service-linked role, you must first delete any multi-Region keyspaces and tables used by the role. Using service-linked roles 730 Amazon Keyspaces (for Apache Cassandra) Developer Guide Note If the Amazon Keyspaces service is using the role when you try to delete the resources, then the deletion might fail. If that happens, wait for a few minutes and try the operation again. To delete Amazon Keyspaces resources used by the AWSServiceRoleForAmazonKeyspacesReplication (console) 1. Sign in to the AWS Management Console, and open the Amazon Keyspaces console at https:// console.aws.amazon.com/keyspaces/home. 2. Choose Keyspaces from the left-side panel. 3. Select all multi-Region keyspaces from the list. 4. Choose Delete confirm the deletion and choose Delete keyspaces. You can also delete multi-Region keyspaces programmatically using any of the following methods. • The Cassandra Query Language (CQL) ??? statement. • The delete-keyspace operation of the AWS CLI. • The DeleteKeyspace operation of the Amazon Keyspaces API. Manually delete the service-linked role Use the IAM console, the AWS CLI, or the AWS API to delete the AWSServiceRoleForAmazonKeyspacesReplication service-linked role. For more information, see Deleting a service-linked role in the IAM User Guide. Supported Regions for Amazon Keyspaces service-linked roles Amazon Keyspaces does not support using service-linked roles in every Region where the service is available. You can use the AWSServiceRoleForAmazonKeyspacesReplication role in the following Regions. Region name Region identity Support in Amazon Keyspaces US East (N. Virginia) us-east-1 Yes Using service-linked roles 731 Amazon Keyspaces (for Apache Cassandra) Developer Guide Region name Region identity Support in Amazon Keyspaces US East (Ohio) US West (N. California) US West (Oregon) Asia Pacific (Mumbai) Asia Pacific (Osaka) Asia Pacific (Seoul) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Canada (Central) Europe (Frankfurt) Europe (Ireland)
AmazonKeyspaces-230
AmazonKeyspaces.pdf
230
Supported Regions for Amazon Keyspaces service-linked roles Amazon Keyspaces does not support using service-linked roles in every Region where the service is available. You can use the AWSServiceRoleForAmazonKeyspacesReplication role in the following Regions. Region name Region identity Support in Amazon Keyspaces US East (N. Virginia) us-east-1 Yes Using service-linked roles 731 Amazon Keyspaces (for Apache Cassandra) Developer Guide Region name Region identity Support in Amazon Keyspaces US East (Ohio) US West (N. California) US West (Oregon) Asia Pacific (Mumbai) Asia Pacific (Osaka) Asia Pacific (Seoul) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Canada (Central) Europe (Frankfurt) Europe (Ireland) Europe (London) Europe (Paris) South America (São Paulo) us-east-2 us-west-1 us-west-2 ap-south-1 ap-northeast-3 ap-northeast-2 ap-southeast-1 ap-southeast-2 ap-northeast-1 ca-central-1 eu-central-1 eu-west-1 eu-west-2 eu-west-3 sa-east-1 AWS GovCloud (US-East) us-gov-east-1 AWS GovCloud (US-West) us-gov-west-1 Yes Yes Yes Yes Yes Yes Yes Yes Yes Yes Yes Yes Yes Yes Yes No No Using service-linked roles 732 Amazon Keyspaces (for Apache Cassandra) Developer Guide Compliance validation for Amazon Keyspaces (for Apache Cassandra) Third-party auditors assess the security and compliance of Amazon Keyspaces (for Apache Cassandra) as part of multiple AWS compliance programs. These include: • ISO/IEC 27001:2013, 27017:2015, 27018:2019, and ISO/IEC 9001:2015. For more information, see AWS ISO and CSA STAR certifications and services. • System and Organization Controls (SOC) • Payment Card Industry (PCI) • Federal Risk and Authorization Management Program (FedRAMP) High • Health Insurance Portability and Accountability Act (HIPAA) To learn whether an AWS service is within the scope of specific compliance programs, see AWS services in Scope by Compliance Program and choose the compliance program that you are interested in. For general information, see AWS Compliance Programs. You can download third-party audit reports using AWS Artifact. For more information, see Downloading Reports in AWS Artifact. Your compliance responsibility when using AWS services is determined by the sensitivity of your data, your company's compliance objectives, and applicable laws and regulations. AWS provides the following resources to help with compliance: • Security Compliance & Governance – These solution implementation guides discuss architectural considerations and provide steps for deploying security and compliance features. • HIPAA Eligible Services Reference – Lists HIPAA eligible services. Not all AWS services are HIPAA eligible. • AWS Compliance Resources – This collection of workbooks and guides might apply to your industry and location. • AWS Customer Compliance Guides – Understand the shared responsibility model through the lens of compliance. The guides summarize the best practices for securing AWS services and map the guidance to security controls across multiple frameworks (including National Institute of Standards and Technology (NIST), Payment Card Industry Security Standards Council (PCI), and International Organization for Standardization (ISO)). Compliance validation 733 Amazon Keyspaces (for Apache Cassandra) Developer Guide • Evaluating Resources with Rules in the AWS Config Developer Guide – The AWS Config service assesses how well your resource configurations comply with internal practices, industry guidelines, and regulations. • AWS Security Hub – This AWS service provides a comprehensive view of your security state within AWS. Security Hub uses security controls to evaluate your AWS resources and to check your compliance against security industry standards and best practices. For a list of supported services and controls, see Security Hub controls reference. • Amazon GuardDuty – This AWS service detects potential threats to your AWS accounts, workloads, containers, and data by monitoring your environment for suspicious and malicious activities. GuardDuty can help you address various compliance requirements, like PCI DSS, by meeting intrusion detection requirements mandated by certain compliance frameworks. • AWS Audit Manager – This AWS service helps you continuously audit your AWS usage to simplify how you manage risk and compliance with regulations and industry standards. Resilience and disaster recovery in Amazon Keyspaces The AWS global infrastructure is built around AWS Regions and Availability Zones. AWS Regions provide multiple physically separated and isolated Availability Zones, which are connected with low-latency, high-throughput, and highly redundant networking. With Availability Zones, you can design and operate applications and databases that automatically fail over between Availability Zones without interruption. Availability Zones are more highly available, fault tolerant, and scalable than traditional single or multiple data center infrastructures. Amazon Keyspaces replicates data automatically three times in multiple AWS Availability Zones within the same AWS Region for durability and high availability. For more information about AWS Regions and Availability Zones, see AWS global infrastructure. In addition to the AWS global infrastructure, Amazon Keyspaces offers several features to help support your data resiliency and backup needs. multi-Region replication Amazon Keyspaces provides multi-Region replication if you need to replicate your data or applications over greater geographic distances. You can replicate your Amazon Keyspaces tables across different AWS Regions of your choice. For more information, see the section called “Multi-Region replication”. Resilience 734 Amazon Keyspaces (for Apache Cassandra) Point-in-time recovery (PITR) Developer Guide PITR helps
AmazonKeyspaces-231
AmazonKeyspaces.pdf
231
the same AWS Region for durability and high availability. For more information about AWS Regions and Availability Zones, see AWS global infrastructure. In addition to the AWS global infrastructure, Amazon Keyspaces offers several features to help support your data resiliency and backup needs. multi-Region replication Amazon Keyspaces provides multi-Region replication if you need to replicate your data or applications over greater geographic distances. You can replicate your Amazon Keyspaces tables across different AWS Regions of your choice. For more information, see the section called “Multi-Region replication”. Resilience 734 Amazon Keyspaces (for Apache Cassandra) Point-in-time recovery (PITR) Developer Guide PITR helps protect your Amazon Keyspaces tables from accidental write or delete operations by providing you continuous backups of your table data. For more information, see Point-in-time recovery for Amazon Keyspaces. Infrastructure security in Amazon Keyspaces As a managed service, Amazon Keyspaces (for Apache Cassandra) is protected by AWS global network security. For information about AWS security services and how AWS protects infrastructure, see AWS Cloud Security. To design your AWS environment using the best practices for infrastructure security, see Infrastructure Protection in Security Pillar AWS Well‐Architected Framework. You use AWS published API calls to access Amazon Keyspaces through the network. Clients must support the following: • Transport Layer Security (TLS). We require TLS 1.2 and recommend TLS 1.3. • Cipher suites with perfect forward secrecy (PFS) such as DHE (Ephemeral Diffie-Hellman) or ECDHE (Elliptic Curve Ephemeral Diffie-Hellman). Most modern systems such as Java 7 and later support these modes. Additionally, requests must be signed by using an access key ID and a secret access key that is associated with an IAM principal. Or you can use the AWS Security Token Service (AWS STS) to generate temporary security credentials to sign requests. Amazon Keyspaces supports two methods of authenticating client requests. The first method uses service-specific credentials, which are password based credentials generated for a specific IAM user. You can create and manage the password using the IAM console, the AWS CLI, or the AWS API. For more information, see Using IAM with Amazon Keyspaces. The second method uses an authentication plugin for the open-source DataStax Java Driver for Cassandra. This plugin enables IAM users, roles, and federated identities to add authentication information to Amazon Keyspaces (for Apache Cassandra) API requests using the AWS Signature Version 4 process (SigV4). For more information, see the section called “Create IAM credentials for AWS authentication”. Infrastructure security 735 Amazon Keyspaces (for Apache Cassandra) Developer Guide You can use an interface VPC endpoint to keep traffic between your Amazon VPC and Amazon Keyspaces from leaving the Amazon network. Interface VPC endpoints are powered by AWS PrivateLink, an AWS technology that enables private communication between AWS services using an elastic network interface with private IPs in your Amazon VPC. For more information, see the section called “Using interface VPC endpoints”. Using Amazon Keyspaces with interface VPC endpoints Interface VPC endpoints enable private communication between your virtual private cloud (VPC) running in Amazon VPC and Amazon Keyspaces. Interface VPC endpoints are powered by AWS PrivateLink, which is an AWS service that enables private communication between VPCs and AWS services. AWS PrivateLink enables this by using an elastic network interface with private IP addresses in your VPC so that network traffic does not leave the Amazon network. Interface VPC endpoints don't require an internet gateway, NAT device, VPN connection, or AWS Direct Connect connection. For more information, see Amazon Virtual Private Cloud and Interface VPC endpoints (AWS PrivateLink). Topics • Using interface VPC endpoints for Amazon Keyspaces • Populating system.peers table entries with interface VPC endpoint information • Controlling access to interface VPC endpoints for Amazon Keyspaces • Availability • VPC endpoint policies and Amazon Keyspaces point-in-time recovery (PITR) • Common errors and warnings Using interface VPC endpoints for Amazon Keyspaces You can create an interface VPC endpoint so that traffic between Amazon Keyspaces and your Amazon VPC resources starts flowing through the interface VPC endpoint. To get started, follow the steps to create an interface endpoint. Next, edit the security group associated with the endpoint that you created in the previous step, and configure an inbound rule for port 9142. For more information, see Adding, removing, and updating rules. For a step-by-step tutorial to configure a connection to Amazon Keyspaces through a VPC endpoint, see the section called “Connecting with VPC endpoints”. To learn how to configure cross- Using interface VPC endpoints 736 Amazon Keyspaces (for Apache Cassandra) Developer Guide account access for Amazon Keyspaces resources separated from applications in different AWS accounts in a VPC, see the section called “Configure cross-account access”. Populating system.peers table entries with interface VPC endpoint information Apache Cassandra drivers use the system.peers table to query for node information about the cluster. Cassandra drivers use the node information to load balance connections and
AmazonKeyspaces-232
AmazonKeyspaces.pdf
232
rules. For a step-by-step tutorial to configure a connection to Amazon Keyspaces through a VPC endpoint, see the section called “Connecting with VPC endpoints”. To learn how to configure cross- Using interface VPC endpoints 736 Amazon Keyspaces (for Apache Cassandra) Developer Guide account access for Amazon Keyspaces resources separated from applications in different AWS accounts in a VPC, see the section called “Configure cross-account access”. Populating system.peers table entries with interface VPC endpoint information Apache Cassandra drivers use the system.peers table to query for node information about the cluster. Cassandra drivers use the node information to load balance connections and retry operations. Amazon Keyspaces populates nine entries in the system.peers table automatically for clients connecting through the public endpoint. To provide clients connecting through interface VPC endpoints with similar functionality, Amazon Keyspaces populates the system.peers table in your account with an entry for each Availability Zone where a VPC endpoint is available. To look up and store available interface VPC endpoints in the system.peers table, Amazon Keyspaces requires that you grant the IAM entity used to connect to Amazon Keyspaces access permissions to query your VPC for the endpoint and network interface information. Important Populating the system.peers table with your available interface VPC endpoints improves load balancing and increases read/write throughput. It is recommended for all clients accessing Amazon Keyspaces using interface VPC endpoints and is required for Apache Spark. To grant the IAM entity used to connect to Amazon Keyspaces permissions to look up the necessary interface VPC endpoint information, you can update your existing IAM role or user policy, or create a new IAM policy as shown in the following example. { "Version":"2012-10-17", "Statement":[ { "Sid":"ListVPCEndpoints", "Effect":"Allow", "Action":[ "ec2:DescribeNetworkInterfaces", "ec2:DescribeVpcEndpoints" ], "Resource":"*" Using interface VPC endpoints 737 Amazon Keyspaces (for Apache Cassandra) Developer Guide } ] } Note The managed policies AmazonKeyspacesReadOnlyAccess_v2 and AmazonKeyspacesFullAccess include the required permissions to let Amazon Keyspaces access the Amazon EC2 instance to read information about available interface VPC endpoints. To confirm that the policy has been set up correctly, query the system.peers table to see networking information. If the system.peers table is empty, it could indicate that the policy hasn't been configured successfully or that you have exceeded the request rate quota for DescribeNetworkInterfaces and DescribeVPCEndpoints API actions. DescribeVPCEndpoints falls into the Describe* category and is considered a non-mutating action. DescribeNetworkInterfaces falls into the subset of unfiltered and unpaginated non- mutating actions, and different quotas apply. For more information, see Request token bucket sizes and refill rates in the Amazon EC2 API Reference. If you do see an empty table, try again a few minutes later to rule out request rate quota issues. To verify that you have configured the VPC endpoints correctly, see the section called “VPC endpoint connection errors”. If your query returns results from the table, your policy has been configured correctly. Controlling access to interface VPC endpoints for Amazon Keyspaces With VPC endpoint policies, you can control access to resources in two ways: • IAM policy – You can control the requests, users, or groups that are allowed to access Amazon Keyspaces through a specific VPC endpoint. You can do this by using a condition key in the policy that is attached to an IAM user, group, or role. • VPC policy – You can control which VPC endpoints have access to your Amazon Keyspaces resources by attaching policies to them. To restrict access to a specific keyspace or table to only allow traffic coming through a specific VPC endpoint, edit the existing IAM policy that restricts resource access and add that VPC endpoint. Using interface VPC endpoints 738 Amazon Keyspaces (for Apache Cassandra) Developer Guide The following are example endpoint policies for accessing Amazon Keyspaces resources. • IAM policy example: Restrict all access to a specific Amazon Keyspaces table unless traffic comes from the specified VPC endpoint – This sample policy can be attached to an IAM user, role, or group. It restricts access to a specified Amazon Keyspaces table unless incoming traffic originates from a specified VPC endpoint. { "Version": "2012-10-17", "Statement": [ { "Sid": "UserOrRolePolicyToDenyAccess", "Action": "cassandra:*", "Effect": "Deny", "Resource": [ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/ mykeyspace/table/mytable", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ], "Condition": { "StringNotEquals" : { "aws:sourceVpce": "vpce-abc123" } } } ] } Note To restrict access to a specific table, you must also include access to the system tables. System tables are read-only. • VPC policy example: Read-only access – This sample policy can be attached to a VPC endpoint. (For more information, see Controlling access to Amazon VPC resources). It restricts actions to read-only access to Amazon Keyspaces resources through the VPC endpoint that it's attached to. { "Version": "2012-10-17", "Statement": [ { "Sid": "ReadOnly", "Principal": "*", "Action": [ Using interface VPC endpoints 739 Amazon Keyspaces (for Apache Cassandra) Developer Guide "cassandra:Select" ], "Effect": "Allow", "Resource": "*" } ]
AmazonKeyspaces-233
AmazonKeyspaces.pdf
233
] } Note To restrict access to a specific table, you must also include access to the system tables. System tables are read-only. • VPC policy example: Read-only access – This sample policy can be attached to a VPC endpoint. (For more information, see Controlling access to Amazon VPC resources). It restricts actions to read-only access to Amazon Keyspaces resources through the VPC endpoint that it's attached to. { "Version": "2012-10-17", "Statement": [ { "Sid": "ReadOnly", "Principal": "*", "Action": [ Using interface VPC endpoints 739 Amazon Keyspaces (for Apache Cassandra) Developer Guide "cassandra:Select" ], "Effect": "Allow", "Resource": "*" } ] } • VPC policy example: Restrict access to a specific Amazon Keyspaces table – This sample policy can be attached to a VPC endpoint. It restricts access to a specific table through the VPC endpoint that it's attached to. { "Version": "2012-10-17", "Statement": [ { "Sid": "RestrictAccessToTable", "Principal": "*", "Action": "cassandra:*", "Effect": "Allow", "Resource": [ "arn:aws:cassandra:us-east-1:111122223333:/keyspace/ mykeyspace/table/mytable", "arn:aws:cassandra:us-east-1:111122223333:/keyspace/system*" ] } ] } Note To restrict access to a specific table, you must also include access to the system tables. System tables are read-only. Availability Amazon Keyspaces supports using interface VPC endpoints in all of the AWS Regions where the service is available. For more information, see ???. Using interface VPC endpoints 740 Amazon Keyspaces (for Apache Cassandra) Developer Guide VPC endpoint policies and Amazon Keyspaces point-in-time recovery (PITR) If you are using IAM policies with condition keys to restrict incoming traffic, the table restore operation may fail. For example, if you restrict source traffic to specific VPC endpoints using aws:SourceVpce condition keys, the table restore operation fails. To allow Amazon Keyspaces to perform a restore operation on your principal's behalf, you must add an aws:ViaAWSService condition key to your IAM policy. The aws:ViaAWSService condition key allows access when any AWS service makes a request using the principal's credentials. For more information, see IAM JSON policy elements: Condition key in the IAM User Guide. The following policy is an example of this. { "Version":"2012-10-17", "Statement":[ { "Sid":"CassandraAccessForVPCE", "Effect":"Allow", "Action":"cassandra:*", "Resource":"*", "Condition":{ "Bool":{ "aws:ViaAWSService":"false" }, "StringEquals":{ "aws:SourceVpce":[ "vpce-12345678901234567" ] } } }, { "Sid":"CassandraAccessForAwsService", "Effect":"Allow", "Action":"cassandra:*", "Resource":"*", "Condition":{ "Bool":{ "aws:ViaAWSService":"true" } } } ] } Using interface VPC endpoints 741 Amazon Keyspaces (for Apache Cassandra) Developer Guide Common errors and warnings If you're using Amazon Virtual Private Cloud and you connect to Amazon Keyspaces, you might see the following warning. Control node cassandra.us-east-1.amazonaws.com/1.111.111.111:9142 has an entry for itself in system.peers: this entry will be ignored. This is likely due to a misconfiguration; please verify your rpc_address configuration in cassandra.yaml on all nodes in your cluster. This warning occurs because the system.peers table contains entries for all of the Amazon VPC endpoints that Amazon Keyspaces has permissions to view, including the Amazon VPC endpoint that you're connected through. You can safely ignore this warning. For other errors, see the section called “VPC endpoint connection errors”. Configuration and vulnerability analysis for Amazon Keyspaces AWS handles basic security tasks like guest operating system (OS) and database patching, firewall configuration, and disaster recovery. These procedures have been reviewed and certified by the appropriate third parties. For more details, see the following resources: • Shared responsibility model • Amazon Web Services: Overview of security processes(whitepaper) Security best practices for Amazon Keyspaces Amazon Keyspaces (for Apache Cassandra) provides a number of security features to consider as you develop and implement your own security policies. The following best practices are general guidelines and don’t represent a complete security solution. Because these best practices might not be appropriate or sufficient for your environment, treat them as helpful considerations rather than prescriptions. Topics • Preventative security best practices for Amazon Keyspaces • Detective security best practices for Amazon Keyspaces Configuration and vulnerability analysis for Amazon Keyspaces 742 Amazon Keyspaces (for Apache Cassandra) Developer Guide Preventative security best practices for Amazon Keyspaces The following security best practices are considered preventative because they can help you anticipate and prevent security incidents in Amazon Keyspaces. Use encryption at rest Amazon Keyspaces encrypts at rest all user data that's stored in tables by using encryption keys stored in AWS Key Management Service (AWS KMS). This provides an additional layer of data protection by securing your data from unauthorized access to the underlying storage. By default, Amazon Keyspaces uses an AWS owned key for encrypting all of your tables. If this key doesn’t exist, it's created for you. Service default keys can't be disabled. Alternatively, you can use a customer managed key for encryption at rest. For more information, see Amazon Keyspaces Encryption at Rest. Use IAM roles to authenticate access to Amazon Keyspaces For users, applications, and other AWS services to access Amazon Keyspaces, they must include valid AWS credentials in their AWS API requests. You should not store AWS credentials directly in the application
AmazonKeyspaces-234
AmazonKeyspaces.pdf
234
data from unauthorized access to the underlying storage. By default, Amazon Keyspaces uses an AWS owned key for encrypting all of your tables. If this key doesn’t exist, it's created for you. Service default keys can't be disabled. Alternatively, you can use a customer managed key for encryption at rest. For more information, see Amazon Keyspaces Encryption at Rest. Use IAM roles to authenticate access to Amazon Keyspaces For users, applications, and other AWS services to access Amazon Keyspaces, they must include valid AWS credentials in their AWS API requests. You should not store AWS credentials directly in the application or EC2 instance. These are long-term credentials that are not automatically rotated, and therefore could have significant business impact if they are compromised. An IAM role enables you to obtain temporary access keys that can be used to access AWS services and resources. For more information, see IAM Roles. Use IAM policies for Amazon Keyspaces base authorization When granting permissions, you decide who is getting them, which Amazon Keyspaces APIs they are getting permissions for, and the specific actions you want to allow on those resources. Implementing least privilege is key in reducing security risks and the impact that can result from errors or malicious intent. Attach permissions policies to IAM identities (that is, users, groups, and roles) and thereby grant permissions to perform operations on Amazon Keyspaces resources. You can do this by using the following: • AWS managed (predefined) policies • Customer managed policies Preventative security best practices 743 Amazon Keyspaces (for Apache Cassandra) Developer Guide Use IAM policy conditions for fine-grained access control When you grant permissions in Amazon Keyspaces, you can specify conditions that determine how a permissions policy takes effect. Implementing least privilege is key in reducing security risks and the impact that can result from errors or malicious intent. You can specify conditions when granting permissions using an IAM policy. For example, you can do the following: • Grant permissions to allow users read-only access to specific keyspaces or tables. • Grant permissions to allow a user write access to a certain table, based upon the identity of that user. For more information, see Identity-Based Policy Examples. Consider client-side encryption If you store sensitive or confidential data in Amazon Keyspaces, you might want to encrypt that data as close as possible to its origin so that your data is protected throughout its lifecycle. Encrypting your sensitive data in transit and at rest helps ensure that your plaintext data isn’t available to any third party. Detective security best practices for Amazon Keyspaces The following security best practices are considered detective because they can help you detect potential security weaknesses and incidents. Use AWS CloudTrail to monitor AWS Key Management Service (AWS KMS) AWS KMS key usage If you're using a customer managed AWS KMS key for encryption at rest, usage of this key is logged into AWS CloudTrail. CloudTrail provides visibility into user activity by recording actions taken on your account. CloudTrail records important information about each action, including who made the request, the services used, the actions performed, parameters for the actions, and the response elements returned by the AWS service. This information helps you track changes made to your AWS resources and troubleshoot operational issues. CloudTrail makes it easier to ensure compliance with internal policies and regulatory standards. You can use CloudTrail to audit key usage. CloudTrail creates log files that contain a history of AWS API calls and related events for your account. These log files include all AWS KMS API requests that were made using the console, AWS SDKs, and command line tools, in addition to Detective security best practices 744 Amazon Keyspaces (for Apache Cassandra) Developer Guide those made through integrated AWS services. You can use these log files to get information about when the AWS KMS key was used, the operation that was requested, the identity of the requester, the IP address that the request came from, and so on. For more information, see Logging AWS Key Management Service API Calls with AWS CloudTrail and the AWS CloudTrail User Guide. Use CloudTrail to monitor Amazon Keyspaces data definition language (DDL) operations CloudTrail provides visibility into user activity by recording actions taken on your account. CloudTrail records important information about each action, including who made the request, the services used, the actions performed, parameters for the actions, and the response elements returned by the AWS service. This information helps you to track changes made to your AWS resources and to troubleshoot operational issues. CloudTrail makes it easier to ensure compliance with internal policies and regulatory standards. All Amazon Keyspaces DDL operations are logged in CloudTrail automatically. DDL operations let you create and manage Amazon Keyspaces keyspaces and tables. When activity occurs in Amazon Keyspaces, that activity is recorded in a
AmazonKeyspaces-235
AmazonKeyspaces.pdf
235
actions taken on your account. CloudTrail records important information about each action, including who made the request, the services used, the actions performed, parameters for the actions, and the response elements returned by the AWS service. This information helps you to track changes made to your AWS resources and to troubleshoot operational issues. CloudTrail makes it easier to ensure compliance with internal policies and regulatory standards. All Amazon Keyspaces DDL operations are logged in CloudTrail automatically. DDL operations let you create and manage Amazon Keyspaces keyspaces and tables. When activity occurs in Amazon Keyspaces, that activity is recorded in a CloudTrail event along with other AWS service events in the event history. For more information, see Logging Amazon Keyspaces operations by using AWS CloudTrail. You can view, search, and download recent events in your AWS account. For more information, see Viewing events with CloudTrail event history in the AWS CloudTrail User Guide. For an ongoing record of events in your AWS account, including events for Amazon Keyspaces, create a trail. A trail enables CloudTrail to deliver log files to an Amazon Simple Storage Service (Amazon S3) bucket. By default, when you create a trail on the console, the trail applies to all AWS Regions. The trail logs events from all Regions in the AWS partition and delivers the log files to the S3 bucket that you specify. Additionally, you can configure other AWS services to further analyze and act upon the event data collected in CloudTrail logs. Tag your Amazon Keyspaces resources for identification and automation You can assign metadata to your AWS resources in the form of tags. Each tag is a simple label that consists of a customer-defined key and an optional value that can make it easier to manage, search for, and filter resources. Tagging allows for grouped controls to be implemented. Although there are no inherent types of tags, they enable you to categorize resources by purpose, owner, environment, or other criteria. The following are some examples: Detective security best practices 745 Amazon Keyspaces (for Apache Cassandra) Developer Guide • Access – Used to control access to Amazon Keyspaces resources based on tags. For more information, see the section called “Authorization based on Amazon Keyspaces tags”. • Security – Used to determine requirements such as data protection settings. • Confidentiality – An identifier for the specific data-confidentiality level that a resource supports. • Environment – Used to distinguish between development, test, and production infrastructure. For more information, see AWS tagging strategies and Adding tags and labels to resources. Detective security best practices 746 Amazon Keyspaces (for Apache Cassandra) Developer Guide CQL language reference for Amazon Keyspaces (for Apache Cassandra) After you connect to an Amazon Keyspaces endpoint, you use Cassandra Query Language (CQL) to work with your database. CQL is similar in many ways to Structured Query Language (SQL). • CQL elements – This section covers the fundamental elements of CQL supported in Amazon Keyspaces, including identifiers, constants, terms, and data types. It explains concepts like string types, numeric types, collection types, and more. • Data Definition Language (DDL) – DDL statements are used to manage data structures like keyspaces and tables in Amazon Keyspaces. This section covers statements for creating, altering, and dropping keyspaces and tables, as well as restoring tables from a point-in-time backup. • Data Manipulation Language (DML) – DML statements are used to manage data within tables. This section covers statements for selecting, inserting, updating, and deleting data. It also explains advanced querying capabilities like using the IN operator, ordering results, and pagination. • Built-in functions – Amazon Keyspaces supports a variety of built-in scalar functions that you can use in CQL statements. This section provides an overview of these functions, including examples of their usage. Throughout this topic, you'll find detailed syntax, examples, and best practices for using CQL effectively in Amazon Keyspaces. Topics • Cassandra Query Language (CQL) elements in Amazon Keyspaces • DDL statements (data definition language) in Amazon Keyspaces • DML statements (data manipulation language) in Amazon Keyspaces • Built-in functions in Amazon Keyspaces 747 Amazon Keyspaces (for Apache Cassandra) Developer Guide Cassandra Query Language (CQL) elements in Amazon Keyspaces Learn about the Cassandra Query Language (CQL) elements that are supported by Amazon Keyspaces, including identifiers, constants, terms, and data types. Topics • Identifiers • Constants • Terms • Data types • JSON encoding of Amazon Keyspaces data types Identifiers Identifiers (or names) are used to identify tables, columns, and other objects. An identifier can be quoted or not quoted. The following applies. identifier ::= unquoted_identifier | quoted_identifier unquoted_identifier ::= re('[a-zA-Z][a-zA-Z0-9_]*') quoted_identifier ::= '"' (any character where " can appear if doubled)+ '"' Constants The following constants are defined. constant ::= string | integer | float | boolean | uuid | blob | NULL string ::= '\'' (any
AmazonKeyspaces-236
AmazonKeyspaces.pdf
236
that are supported by Amazon Keyspaces, including identifiers, constants, terms, and data types. Topics • Identifiers • Constants • Terms • Data types • JSON encoding of Amazon Keyspaces data types Identifiers Identifiers (or names) are used to identify tables, columns, and other objects. An identifier can be quoted or not quoted. The following applies. identifier ::= unquoted_identifier | quoted_identifier unquoted_identifier ::= re('[a-zA-Z][a-zA-Z0-9_]*') quoted_identifier ::= '"' (any character where " can appear if doubled)+ '"' Constants The following constants are defined. constant ::= string | integer | float | boolean | uuid | blob | NULL string ::= '\'' (any character where ' can appear if doubled)+ '\'' '$$' (any character other than '$$') '$$' integer ::= re('-?[0-9]+') float ::= re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY boolean ::= TRUE | FALSE uuid ::= hex{8}-hex{4}-hex{4}-hex{4}-hex{12} hex ::= re("[0-9a-fA-F]") blob ::= '0' ('x' | 'X') hex+ Language elements 748 Amazon Keyspaces (for Apache Cassandra) Developer Guide Terms A term denotes the kind of values that are supported. Terms are defined by the following. term ::= constant | literal | function_call | arithmetic_operation | type_hint | bind_marker literal ::= collection_literal | tuple_literal function_call ::= identifier '(' [ term (',' term)* ] ')' arithmetic_operation ::= '-' term | term ('+' | '-' | '*' | '/' | '%') term Data types Amazon Keyspaces supports the following data types: String types Data type ascii text varchar Numeric types Data type bigint counter decimal double Terms Description Represents an ASCII character string. Represents a UTF-8 encoded string. Represents a UTF-8 encoded string (varchar is an alias for text). Description Represents a 64-bit signed long. Represents a 64-bit signed integer counter. For more information, see the section called “Counters”. Represents a variable-precision decimal. Represents a 64-bit IEEE 754 floating point. 749 Amazon Keyspaces (for Apache Cassandra) Developer Guide Data type float int varint Counters Description Represents a 32-bit IEEE 754 floating point. Represents a 32-bit signed int. Represents an integer of arbitrary precision. A counter column contains a 64-bit signed integer. The counter value is incremented or decremented using the the section called “UPDATE” statement, and it cannot be set directly. This makes counter columns useful for tracking counts. For example, you can use counters to track the number of entries in a log file or the number of times a post has been viewed on a social network. The following restrictions apply to counter columns: • A column of type counter cannot be part of the primary key of a table. • In a table that contains one or more columns of type counter, all columns in that table must be of type counter. In cases where a counter update fails (for example, because of timeouts or loss of connection with Amazon Keyspaces), the client doesn't know whether the counter value was updated. If the update is retried, the update to the counter value might get applied a second time. Blob type Data type blob Boolean type Data type boolean Data types Description Represents arbitrary bytes. Description Represents true or false. 750 Amazon Keyspaces (for Apache Cassandra) Time-related types Data type date timestamp Developer Guide Description A string in the format <yyyy>-<mm>-<dd> . 64-bit signed integer representing the date and time since epoch (January 1 1970 at 00:00:00 GMT) in milliseconds. timeuuid Represents a version 1 UUID. Collection types Data type list map set Description Represents an ordered collection of literal elements. Represents an unordered collection of key- value pairs. Represents an unordered collection of one or more literal elements. You declare a collection column by using the collection type followed by another data type (for example, TEXT or INT) in angled brackets. You can create a column with a SET of TEXT, or you can create a MAP of TEXT and INT key-value pairs, as shown in the following example. SET <TEXT> MAP <TEXT, INT> A non-frozen collection allows you to make updates to each individual collection element. Client- side timestamps and Time to Live (TTL) settings are stored for individual elements. When you use the FROZEN keyword on a collection type, the values of the collection are serialized into a single immutable value, and Amazon Keyspaces treats them like a BLOB. This is a frozen Data types 751 Amazon Keyspaces (for Apache Cassandra) Developer Guide collection. An INSERT or UPDATE statement overwrites the entire frozen collection. You can't make updates to individual elements inside a frozen collection. Client-side timestamps and Time to Live (TTL) settings apply to the entire frozen collection, not to individual elements. Frozen collection columns can be part of the PRIMARY KEY of a table. You can nest frozen collections. For example, you can define a MAP within a SET if the MAP is using the FROZEN keyword, as shown in the following example. SET <FROZEN> <MAP <TEXT, INT>>> Amazon Keyspaces supports
AmazonKeyspaces-237
AmazonKeyspaces.pdf
237
Data types 751 Amazon Keyspaces (for Apache Cassandra) Developer Guide collection. An INSERT or UPDATE statement overwrites the entire frozen collection. You can't make updates to individual elements inside a frozen collection. Client-side timestamps and Time to Live (TTL) settings apply to the entire frozen collection, not to individual elements. Frozen collection columns can be part of the PRIMARY KEY of a table. You can nest frozen collections. For example, you can define a MAP within a SET if the MAP is using the FROZEN keyword, as shown in the following example. SET <FROZEN> <MAP <TEXT, INT>>> Amazon Keyspaces supports nesting of up to 8 levels of frozen collections by default. For more information, see the section called “Amazon Keyspaces service quotas”. For more information about functional differences with Apache Cassandra, see the section called “FROZEN collections”. For more information about CQL syntax, see the section called “CREATE TABLE” and the section called “ALTER TABLE”. Tuple type The tuple data type represents a bounded group of literal elements. You can use a tuple as an alternative to a user defined type. You don't need to use the FROZEN keyword for tuples. This is because a tuple is always frozen and you can't update elements individually. Other types Data type inet Static Description A string representing an IP address, in either IPv4 or IPv6 format. In an Amazon Keyspaces table with clustering columns, you can use the STATIC keyword to create a static column of any type. The following statement is an example of this. my_column INT STATIC Data types 752 Amazon Keyspaces (for Apache Cassandra) Developer Guide For more information about working with static columns, see the section called “Estimate capacity consumption of static columns”. User-defined types (UDTs) Amazon Keyspaces supports user-defined types (UDTs). You can use any valid Amazon Keyspaces data type to create a UDT, including collections and other existing UDTs. You create UDTs in a keyspace and can use them to define columns in any table in the keyspace. For more information about CQL syntax, see the section called “Types”. For more information about working with UDTs, see the section called “User-defined types (UDTs)”. To review how many UDTs are supported per keyspace, supported levels of nesting, and other default values and quotas related to UDTs, see the section called “Quotas and default values for user-defined types (UDTs) in Amazon Keyspaces”. JSON encoding of Amazon Keyspaces data types Amazon Keyspaces offers the same JSON data type mappings as Apache Cassandra. The following table describes the data types Amazon Keyspaces accepts in INSERT JSON statements and the data types Amazon Keyspaces uses when returning data with the SELECT JSON statement. For single-field data types such as float, int, UUID, and date, you also can insert data as a string. For compound data types and collections, such as tuple, map, and list, you can also insert data as JSON or as an encoded JSON string. JSON data type Data types accepted in INSERT JSON statements Data types returned in SELECT JSON statements Notes ascii string string Uses JSON character bigint integer, string integer blob string string escape \u. String must be a valid 64-bit integer. String should begin with 0x followed by an even number of hex digits. JSON encoding of Amazon Keyspaces data types 753 Amazon Keyspaces (for Apache Cassandra) Developer Guide JSON data type Data types accepted in INSERT JSON statements Data types returned in SELECT JSON statements Notes boolean boolean, string boolean date string string decimal integer, float, float string double integer, float, float string float integer, float, float inet int list map string string integer, string integer list, string list map, string map smallint integer, string integer set list, string list String must be either true or false. Date in format YYYY- MM-DD , timezone UTC. Can exceed 32-bit or 64-bit IEEE-754 floating point precision in client-si de decoder. String must be a valid integer or float. String must be a valid integer or float. String must be a valid 32-bit integer. Uses the native JSON list representation. Uses the native JSON map representation. String must be a valid 16-bit integer. Uses the native JSON list representation. string IPv4 or IPv6 address. JSON encoding of Amazon Keyspaces data types 754 Amazon Keyspaces (for Apache Cassandra) Developer Guide JSON data type Data types accepted in INSERT JSON statements Data types returned in SELECT JSON statements Notes string string Uses JSON character text time string string timestamp integer, string string timeuuid string string tinyint integer, string integer tuple list, string list UDT map, string map uuid string string escape \u. Time of day in format HH-MM-SS[ .fffffffff] . A timestamp. String constants allow you to store timestamps as dates. Date stamps with format YYYY- MM-DD HH:MM:SS. SSS are returned. Type 1 UUID. See constants for the UUID
AmazonKeyspaces-238
AmazonKeyspaces.pdf
238
Amazon Keyspaces data types 754 Amazon Keyspaces (for Apache Cassandra) Developer Guide JSON data type Data types accepted in INSERT JSON statements Data types returned in SELECT JSON statements Notes string string Uses JSON character text time string string timestamp integer, string string timeuuid string string tinyint integer, string integer tuple list, string list UDT map, string map uuid string string escape \u. Time of day in format HH-MM-SS[ .fffffffff] . A timestamp. String constants allow you to store timestamps as dates. Date stamps with format YYYY- MM-DD HH:MM:SS. SSS are returned. Type 1 UUID. See constants for the UUID format. String must be a valid 8-bit integer. Uses the native JSON list representation. Uses the native JSON map representation with field names as keys. See constants for the UUID format. JSON encoding of Amazon Keyspaces data types 755 Amazon Keyspaces (for Apache Cassandra) Developer Guide JSON data type Data types accepted in INSERT JSON statements Data types returned in SELECT JSON statements Notes varchar string string Uses JSON character varint integer, string integer escape \u. Variable length; might overflow 32- bit or 64-bit integers in client-side decoder. DDL statements (data definition language) in Amazon Keyspaces Data definition language (DDL) is the set of Cassandra Query Language (CQL) statements that you use to manage data structures in Amazon Keyspaces (for Apache Cassandra), such as keyspaces and tables. You use DDL to create these data structures, modify them after they are created, and remove them when they're no longer in use. Amazon Keyspaces performs DDL operations asynchronously. For more information about how to confirm that an asynchronous operation has completed, see the section called “Asynchronous creation and deletion of keyspaces and tables”. The following DDL statements are supported: • CREATE KEYSPACE • ALTER KEYSPACE • DROP KEYSPACE • USE • CREATE TABLE • ALTER TABLE • RESTORE TABLE • DROP TABLE • CREATE TYPE • DROP TYPE DDL statements 756 Amazon Keyspaces (for Apache Cassandra) Developer Guide Topics • Keyspaces • Tables • User-defined types (UDTs) Keyspaces A keyspace groups related tables that are relevant for one or more applications. In terms of a relational database management system (RDBMS), keyspaces are roughly similar to databases, tablespaces, or similar constructs. Note In Apache Cassandra, keyspaces determine how data is replicated among multiple storage nodes. However, Amazon Keyspaces is a fully managed service: The details of its storage layer are managed on your behalf. For this reason, keyspaces in Amazon Keyspaces are logical constructs only, and aren't related to the underlying physical storage. For information about quota limits and constraints for Amazon Keyspaces keyspaces, see Quotas. Statements for keyspaces • CREATE KEYSPACE • ALTER KEYSPACE • DROP KEYSPACE • USE CREATE KEYSPACE Use the CREATE KEYSPACE statement to create a new keyspace. Syntax create_keyspace_statement ::= CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options Keyspaces 757 Amazon Keyspaces (for Apache Cassandra) Developer Guide Where: • keyspace_name is the name of the keyspace to be created. • options are one or more of the following: • REPLICATION – A map that indicates the replication strategy for the keyspace: • SingleRegionStrategy – For a single-Region keyspace. (Required) • NetworkTopologyStrategy – Specify at least two AWS Regions. The replication factor for each Region is three. (Optional) • DURABLE_WRITES – Writes to Amazon Keyspaces are always durable, so this option isn't required. However, if specified, the value must be true. • TAGS – A list of key-value pair tags to be attached to the resource when you create it. (Optional) Example Create a keyspace as follows. CREATE KEYSPACE my_keyspace WITH REPLICATION = {'class': 'SingleRegionStrategy'} and TAGS ={'key1':'val1', 'key2':'val2'} ; To create a multi-Region keyspace, specify NetworkTopologyStrategy and include at least two AWS Regions. The replication factor for each Region is three. CREATE KEYSPACE my_keyspace WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'us-east-1':'3', 'ap- southeast-1':'3','eu-west-1':'3'}; ALTER KEYSPACE You can use the ALTER KEYSPACE WITH statement for the following options • REPLICATION – Use this option to add a new AWS Region replica to a keyspace. You can add a new Region to a single-Region or to a multi-Region keyspace. • TAGS – Use this option to add or remove tags from a keyspace. Syntax Keyspaces 758 Amazon Keyspaces (for Apache Cassandra) Developer Guide alter_keyspace_statement ::= ALTER KEYSPACE keyspace_name WITH options Where: • keyspace_name is the name of the keyspace to be altered. • options are one of the following: • ADD | DROP TAGS – A list of key-value pair tags to be added or removed from the keyspace. • REPLICATION – A map that indicates the replication strategy for the keyspace; • class– NetworkTopologyStrategy defines the keyspace as a multi-Region keyspace. • region– Specify one additional AWS Region for this keyspace. The replication factor for each Region is three. • CLIENT_SIDE_TIMESTAMPS – The default is DISABLED. You can only
AmazonKeyspaces-239
AmazonKeyspaces.pdf
239
Cassandra) Developer Guide alter_keyspace_statement ::= ALTER KEYSPACE keyspace_name WITH options Where: • keyspace_name is the name of the keyspace to be altered. • options are one of the following: • ADD | DROP TAGS – A list of key-value pair tags to be added or removed from the keyspace. • REPLICATION – A map that indicates the replication strategy for the keyspace; • class– NetworkTopologyStrategy defines the keyspace as a multi-Region keyspace. • region– Specify one additional AWS Region for this keyspace. The replication factor for each Region is three. • CLIENT_SIDE_TIMESTAMPS – The default is DISABLED. You can only change the status to ENABLED. Examples Alter a keyspace as shown in the following example to add tags. ALTER KEYSPACE my_keyspace ADD TAGS {'key1':'val1', 'key2':'val2'}; To add a third Region to a multi-Region keyspace, you can use the following statement. ALTER KEYSPACE my_keyspace WITH REPLICATION = { 'class': 'NetworkTopologyStrategy', 'us-east-1': '3', 'us-west-2': '3', 'us-west-1': '3' } AND CLIENT_SIDE_TIMESTAMPS = {'status': 'ENABLED'}; DROP KEYSPACE Use the DROP KEYSPACE statement to remove a keyspace—including all of its contents, such as tables. Syntax Keyspaces 759 Amazon Keyspaces (for Apache Cassandra) Developer Guide drop_keyspace_statement ::= DROP KEYSPACE [ IF EXISTS ] keyspace_name Where: • keyspace_name is the name of the keyspace to be dropped. Example DROP KEYSPACE my_keyspace; USE Use the USE statement to define the current keyspace. This allows you to refer to objects bound to a specific keyspace, for example tables and types, without using the fully qualified name that includes the keyspace prefix. Syntax use_statement ::= USE keyspace_name Where: • keyspace_name is the name of the keyspace to be used. Example USE my_keyspace; Tables Tables are the primary data structures in Amazon Keyspaces. Data in a table is organized into rows and columns. A subset of those columns is used to determine partitioning (and ultimately data placement) through the specification of a partition key. Another set of columns can be defined into clustering columns, which means that they can participate as predicates in query execution. Tables 760 Amazon Keyspaces (for Apache Cassandra) Developer Guide By default, new tables are created with on-demand throughput capacity. You can change the capacity mode for new and existing tables. For more information about read/write capacity throughput modes, see the section called “Configure read/write capacity modes”. For tables in provisioned mode, you can configure optional AUTOSCALING_SETTINGS. For more information about Amazon Keyspaces auto scaling and the available options, see the section called “Configure automatic scaling on an existing table”. For information about quota limits and constraints for Amazon Keyspaces tables, see Quotas. Statements for tables • CREATE TABLE • ALTER TABLE • RESTORE TABLE • DROP TABLE CREATE TABLE Use the CREATE TABLE statement to create a new table. Syntax create_table_statement ::= CREATE TABLE [ IF NOT EXISTS ] table_name '(' column_definition ( ',' column_definition )* [ ',' PRIMARY KEY '(' primary_key ')' ] ')' [ WITH table_options ] column_definition ::= column_name cql_type [ FROZEN ][ STATIC ][ PRIMARY KEY] primary_key ::= partition_key [ ',' clustering_columns ] partition_key ::= column_name | '(' column_name ( ',' column_name )* ')' clustering_columns ::= column_name ( ',' column_name )* table_options ::= [table_options] | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ] Tables 761 Amazon Keyspaces (for Apache Cassandra) Developer Guide | options | CUSTOM_PROPERTIES | AUTOSCALING_SETTINGS | default_time_to_live | TAGS clustering_order ::= column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )* Where: • table_name is the name of the table to be created. The fully qualified name includes the keyspace prefix. Alternatively, you can set the current keyspace with the USE keyspace statement. • column_definition consists of the following: • column_name – The name of the column. • cql_type – An Amazon Keyspaces data type (see Data types). • FROZEN – Designates this column that is user-defined or of type collection (for example, LIST, SET, or MAP) as frozen. A frozen collection is serialized into a single immutable value and treated like a BLOB. For more information, see the section called “Collection types”. • STATIC – Designates this column as static. Static columns store values that are shared by all rows in the same partition. • PRIMARY KEY – Designates this column as the table's primary key. • primary_key consists of the following: • partition_key • clustering_columns • partition_key: • The partition key can be a single column, or it can be a compound value composed of two or more columns. The partition key portion of the primary key is required and determines how Amazon Keyspaces stores your data. • clustering_columns: • The optional clustering column portion of your primary key determines how the data is clustered and sorted within each partition. • table_options consist of the following: • CLUSTERING ORDER BY – The default CLUSTERING ORDER on a table is composed of your clustering keys
AmazonKeyspaces-240
AmazonKeyspaces.pdf
240
key. • primary_key consists of the following: • partition_key • clustering_columns • partition_key: • The partition key can be a single column, or it can be a compound value composed of two or more columns. The partition key portion of the primary key is required and determines how Amazon Keyspaces stores your data. • clustering_columns: • The optional clustering column portion of your primary key determines how the data is clustered and sorted within each partition. • table_options consist of the following: • CLUSTERING ORDER BY – The default CLUSTERING ORDER on a table is composed of your clustering keys in the ASC (ascending) sort direction. Specify it to override the default sort behavior. Tables 762 Amazon Keyspaces (for Apache Cassandra) Developer Guide • CUSTOM_PROPERTIES – A map of settings that are specific to Amazon Keyspaces. • capacity_mode: Specifies the read/write throughput capacity mode for the table. The options are throughput_mode:PAY_PER_REQUEST and throughput_mode:PROVISIONED. The provisioned capacity mode requires read_capacity_units and write_capacity_units as inputs. The default is throughput_mode:PAY_PER_REQUEST. • client_side_timestamps: Specifies if client-side timestamps are enabled or disabled for the table. The options are {'status': 'enabled'} and {'status': 'disabled'}. If it's not specified, the default is status:disabled. After client-side timestamps are enabled for a table, this setting cannot be disabled. • encryption_specification: Specifies the encryption options for encryption at rest. If it's not specified, the default is encryption_type:AWS_OWNED_KMS_KEY. The encryption option customer managed key requires the AWS KMS key in Amazon Resource Name (ARN) format as input: kms_key_identifier:ARN: kms_key_identifier:ARN. • point_in_time_recovery: Specifies if point-in-time restore is enabled or disabled for the table. The options are status:enabled and status:disabled. If it's not specified, the default is status:disabled. • replica_updates: Specifies the settings of a multi-Region table that are specific to an AWS Region. For a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters. For more information and examples, see the section called “Create a multi-Region table in provisioned mode”. • region – The AWS Region of the table replica with the following settings: • read_capacity_units • TTL: Enables Time to Live custom settings for the table. To enable, use status:enabled. The default is status:disabled. After TTL is enabled, you can't disable it for the table. • AUTOSCALING_SETTINGS includes the following optional settings for tables in provisioned mode. For more information and examples, see the section called “Create a new table with automatic scaling”. • provisioned_write_capacity_autoscaling_update: • autoscaling_disabled – To enable auto scaling for write capacity, set the value to false. The default is true. (Optional) Tables 763 Amazon Keyspaces (for Apache Cassandra) Developer Guide • minimum_units – The minimum level of write throughput that the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default). • maximum_units – The maximum level of write throughput that the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default). • scaling_policy – Amazon Keyspaces supports the target tracking policy. The auto scaling target is the provisioned write capacity of the table. • target_tracking_scaling_policy_configuration – To define the target tracking policy, you must define the target value. For more information about target tracking and cooldown periods, see Target Tracking Scaling Policies in the Application Auto Scaling User Guide. • target_value – The target utilization rate of the table. Amazon Keyspaces auto scaling ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define target_value as a percentage. A double between 20 and 90. (Required) • scale_in_cooldown – A cooldown period in seconds between scaling activities that lets the table stabilize before another scale in activity starts. If no value is provided, the default is 0. (Optional) • scale_out_cooldown – A cooldown period in seconds between scaling activities that lets the table stabilize before another scale out activity starts. If no value is provided, the default is 0. (Optional) • disable_scale_in: A boolean that specifies if scale-in is disabled or enabled for the table. This parameter is disabled by default. To turn on scale-in, set the boolean value to FALSE. This means that capacity is automatically scaled down for a table on your behalf. (Optional) • provisioned_read_capacity_autoscaling_update: • autoscaling_disabled – To enable auto scaling for read capacity, set the value to false. The default is true. (Optional) • minimum_units – The minimum level of throughput that the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default). Tables 764 Amazon Keyspaces (for Apache Cassandra) Developer Guide • maximum_units – The maximum level of throughput that the table should
AmazonKeyspaces-241
AmazonKeyspaces.pdf
241
set the boolean value to FALSE. This means that capacity is automatically scaled down for a table on your behalf. (Optional) • provisioned_read_capacity_autoscaling_update: • autoscaling_disabled – To enable auto scaling for read capacity, set the value to false. The default is true. (Optional) • minimum_units – The minimum level of throughput that the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default). Tables 764 Amazon Keyspaces (for Apache Cassandra) Developer Guide • maximum_units – The maximum level of throughput that the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default). • scaling_policy – Amazon Keyspaces supports the target tracking policy. The auto scaling target is the provisioned read capacity of the table. • target_tracking_scaling_policy_configuration – To define the target tracking policy, you must define the target value. For more information about target tracking and cooldown periods, see Target Tracking Scaling Policies in the Application Auto Scaling User Guide. • target_value – The target utilization rate of the table. Amazon Keyspaces auto scaling ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define target_value as a percentage. A double between 20 and 90. (Required) • scale_in_cooldown – A cooldown period in seconds between scaling activities that lets the table stabilize before another scale in activity starts. If no value is provided, the default is 0. (Optional) • scale_out_cooldown – A cooldown period in seconds between scaling activities that lets the table stabilize before another scale out activity starts. If no value is provided, the default is 0. (Optional) • disable_scale_in: A boolean that specifies if scale-in is disabled or enabled for the table. This parameter is disabled by default. To turn on scale-in, set the boolean value to FALSE. This means that capacity is automatically scaled down for a table on your behalf. (Optional) • replica_updates: Specifies the AWS Region specific auto scaling settings of a multi- Region table. For a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters. For more information and examples, see the section called “Update provisioned capacity and auto scaling settings for a multi-Region table”. • region – The AWS Region of the table replica with the following settings: • provisioned_read_capacity_autoscaling_update • autoscaling_disabled – To enable auto scaling for the table's read capacity, set the value to false. The default is true. (Optional) Tables 765 Amazon Keyspaces (for Apache Cassandra) Developer Guide Note Auto scaling for a multi-Region table has to be either enabled or disabled for all replicas of the table. • minimum_units – The minimum level of read throughput that the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default). • maximum_units – The maximum level of read throughput that the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default). • scaling_policy – Amazon Keyspaces supports the target tracking policy. The auto scaling target is the provisioned read capacity of the table. • target_tracking_scaling_policy_configuration – To define the target tracking policy, you must define the target value. For more information about target tracking and cooldown periods, see Target Tracking Scaling Policies in the Application Auto Scaling User Guide. • target_value – The target utilization rate of the table. Amazon Keyspaces auto scaling ensures that the ratio of consumed read capacity to provisioned read capacity stays at or near this value. You define target_value as a percentage. A double between 20 and 90. (Required) • scale_in_cooldown – A cooldown period in seconds between scaling activities that lets the table stabilize before another scale in activity starts. If no value is provided, the default is 0. (Optional) • scale_out_cooldown – A cooldown period in seconds between scaling activities that lets the table stabilize before another scale out activity starts. If no value is provided, the default is 0. (Optional) • disable_scale_in: A boolean that specifies if scale-in is disabled or enabled for the table. This parameter is disabled by default. To turn on scale-in, set the boolean value to FALSE. This means that read capacity is automatically scaled down for a table on your behalf. (Optional) • default_time_to_live – The default Time to Live setting in seconds for the table. • TAGS – A list of key-value pair tags to be attached to the resource when it's created. • clustering_order consists of the following: Tables 766 Amazon Keyspaces (for Apache Cassandra) Developer Guide • column_name – The
AmazonKeyspaces-242
AmazonKeyspaces.pdf
242
0. (Optional) • disable_scale_in: A boolean that specifies if scale-in is disabled or enabled for the table. This parameter is disabled by default. To turn on scale-in, set the boolean value to FALSE. This means that read capacity is automatically scaled down for a table on your behalf. (Optional) • default_time_to_live – The default Time to Live setting in seconds for the table. • TAGS – A list of key-value pair tags to be attached to the resource when it's created. • clustering_order consists of the following: Tables 766 Amazon Keyspaces (for Apache Cassandra) Developer Guide • column_name – The name of the column. • ASC | DESC – Sets the ascendant (ASC) or descendant (DESC) order modifier. If it's not specified, the default order is ASC. Example CREATE TABLE IF NOT EXISTS my_keyspace.my_table ( id text, name text, region text, division text, project text, role text, pay_scale int, vacation_hrs float, manager_id text, PRIMARY KEY (id,division)) WITH CUSTOM_PROPERTIES={ 'capacity_mode':{ 'throughput_mode': 'PROVISIONED', 'read_capacity_units': 10, 'write_capacity_units': 20 }, 'point_in_time_recovery':{'status': 'enabled'}, 'encryption_specification':{ 'encryption_type': 'CUSTOMER_MANAGED_KMS_KEY', 'kms_key_identifier':'arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111' } } AND CLUSTERING ORDER BY (division ASC) AND TAGS={'key1':'val1', 'key2':'val2'} AND default_time_to_live = 3024000; In a table that uses clustering columns, non-clustering columns can be declared as static in the table definition. For more information about static columns, see the section called “Estimate capacity consumption of static columns”. Example Tables 767 Amazon Keyspaces (for Apache Cassandra) Developer Guide CREATE TABLE my_keyspace.my_table ( id int, name text, region text, division text, project text STATIC, PRIMARY KEY (id,division)); You can create a table with a column that uses a user-defined type (UDT). The first statement in the examples creates a type, the second statement creates a table with a column that uses the type. Example CREATE TYPE my_keyspace."udt""N@ME" (my_field int); CREATE TABLE my_keyspace.my_table (my_col1 int pri key, my_col2 "udt""N@ME"); ALTER TABLE Use the ALTER TABLE statement to add new columns, add tags, or change the table's custom properties. Syntax alter_table_statement ::= ALTER TABLE table_name [ ADD ( column_definition | column_definition_list) ] [[ADD | DROP] TAGS {'key1':'val1', 'key2':'val2'}] [ WITH table_options [ , ... ] ] ; column_definition ::= column_name cql_type Where: • table_name is the name of the table to be altered. • column_definition is the name of the column and data type to be added. • column_definition_list is a comma-separated list of columns placed inside parentheses. Tables 768 Amazon Keyspaces (for Apache Cassandra) Developer Guide • table_options consist of the following: • CUSTOM_PROPERTIES – A map of settings specific to Amazon Keyspaces. • capacity_mode: Specifies the read/write throughput capacity mode for the table. The options are throughput_mode:PAY_PER_REQUEST and throughput_mode:PROVISIONED. The provisioned capacity mode requires read_capacity_units and write_capacity_units as inputs. The default is throughput_mode:PAY_PER_REQUEST. • client_side_timestamps: Specifies if client-side timestamps are enabled or disabled for the table. The options are {'status': 'enabled'} and {'status': 'disabled'}. If it's not specified, the default is status:disabled. After client-side timestamps are enabled for a table, this setting cannot be disabled. • encryption_specification: Specifies the encryption option for encryption at rest. The options are encryption_type:AWS_OWNED_KMS_KEY and encryption_type:CUSTOMER_MANAGED_KMS_KEY. The encryption option customer managed key requires the AWS KMS key in Amazon Resource Name (ARN) format as input: kms_key_identifier:ARN. • point_in_time_recovery: Specifies if point-in-time restore is enabled or disabled for the table. The options are status:enabled and status:disabled. The default is status:disabled. • replica_updates: Specifies the AWS Region specific settings of a multi-Region table. For a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters. For more information and examples, see the section called “Update provisioned capacity and auto scaling settings for a multi-Region table”. • region – The AWS Region of the table replica with the following settings: • read_capacity_units • ttl: Enables Time to Live custom settings for the table. To enable, use status:enabled. The default is status:disabled. After ttlis enabled, you can't disable it for the table. • AUTOSCALING_SETTINGS includes the optional auto scaling settings for provisioned tables. For syntax and detailed descriptions, see the section called “CREATE TABLE”. For examples, see the section called “Configure automatic scaling on an existing table”. • default_time_to_live: The default Time to Live setting in seconds for the table. • TAGS is a list of key-value pair tags to be attached to the resource. Tables 769 Amazon Keyspaces (for Apache Cassandra) Developer Guide Note With ALTER TABLE, you can only change a single custom property. You can't combine more than one ALTER TABLE command in the same statement. Examples The following statement shows how to add a column to an existing table. ALTER TABLE mykeyspace.mytable ADD (ID int); This statement shows how to add two collection columns to an existing table: • A frozen collection column col_frozen_list that contains a nested frozen collection • A non-frozen collection column col_map that contains a nested
AmazonKeyspaces-243
AmazonKeyspaces.pdf
243
list of key-value pair tags to be attached to the resource. Tables 769 Amazon Keyspaces (for Apache Cassandra) Developer Guide Note With ALTER TABLE, you can only change a single custom property. You can't combine more than one ALTER TABLE command in the same statement. Examples The following statement shows how to add a column to an existing table. ALTER TABLE mykeyspace.mytable ADD (ID int); This statement shows how to add two collection columns to an existing table: • A frozen collection column col_frozen_list that contains a nested frozen collection • A non-frozen collection column col_map that contains a nested frozen collection ALTER TABLE my_Table ADD(col_frozen_list FROZEN<LIST<FROZEN<SET<TEXT>>>>, col_map MAP<INT, FROZEN<SET<INT>>>); The following example shows how to add a column that uses a user-defined type (UDT) to a table. ALTER TABLE my_keyspace.my_table ADD (my_column, my_udt;); To change a table's capacity mode and specify read and write capacity units, you can use the following statement. ALTER TABLE mykeyspace.mytable WITH CUSTOM_PROPERTIES={'capacity_mode': {'throughput_mode': 'PROVISIONED', 'read_capacity_units': 10, 'write_capacity_units': 20}}; The following statement specifies a customer managed KMS key for the table. ALTER TABLE mykeyspace.mytable WITH CUSTOM_PROPERTIES={ 'encryption_specification':{ 'encryption_type': 'CUSTOMER_MANAGED_KMS_KEY', 'kms_key_identifier':'arn:aws:kms:eu- west-1:5555555555555:key/11111111-1111-111-1111-111111111111' Tables 770 Amazon Keyspaces (for Apache Cassandra) Developer Guide } }; To enable point-in-time restore for a table, you can use the following statement. ALTER TABLE mykeyspace.mytable WITH CUSTOM_PROPERTIES={'point_in_time_recovery': {'status': 'enabled'}}; To set a default Time to Live value in seconds for a table, you can use the following statement. ALTER TABLE my_table WITH default_time_to_live = 2592000; This statement enables custom Time to Live settings for a table. ALTER TABLE mytable WITH CUSTOM_PROPERTIES={'ttl':{'status': 'enabled'}}; RESTORE TABLE Use the RESTORE TABLE statement to restore a table to a point in time. This statement requires point-in-time recovery to be enabled on a table. For more information, see the section called “Backup and restore with point-in-time recovery”. Syntax restore_table_statement ::= RESTORE TABLE restored_table_name FROM TABLE source_table_name [ WITH table_options [ , ... ] ]; Where: • restored_table_name is the name of the restored table. • source_table_name is the name of the source table. • table_options consists of the following: • restore_timestamp is the restore point time in ISO 8601 format. If it's not specified, the current timestamp is used. • CUSTOM_PROPERTIES – A map of settings specific to Amazon Keyspaces. • capacity_mode: Specifies the read/write throughput capacity mode for the table. The options are throughput_mode:PAY_PER_REQUEST and Tables 771 Amazon Keyspaces (for Apache Cassandra) Developer Guide throughput_mode:PROVISIONED. The provisioned capacity mode requires read_capacity_units and write_capacity_units as inputs. The default is the current setting from the source table. • encryption_specification: Specifies the encryption option for encryption at rest. The options are encryption_type:AWS_OWNED_KMS_KEY and encryption_type:CUSTOMER_MANAGED_KMS_KEY. The encryption option customer managed key requires the AWS KMS key in Amazon Resource Name (ARN) format as input: kms_key_identifier:ARN. To restore a table encrypted with a customer managed key to a table encrypted with an AWS owned key, Amazon Keyspaces requires access to the AWS KMS key of the source table. • point_in_time_recovery: Specifies if point-in-time restore is enabled or disabled for the table. The options are status:enabled and status:disabled. Unlike when you create new tables, the default status for restored tables is status:enabled because the setting is inherited from the source table. To disable PITR for restored tables, you must set status:disabled explicitly. • replica_updates: Specifies the AWS Region specific settings of a multi-Region table. For a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters. • region – The AWS Region of the table replica with the following settings: • read_capacity_units • AUTOSCALING_SETTINGS includes the optional auto scaling settings for provisioned tables. For detailed syntax and descriptions, see the section called “CREATE TABLE”. • TAGS is a list of key-value pair tags to be attached to the resource. Note Deleted tables can only be restored to the time of deletion. Example RESTORE TABLE mykeyspace.mytable_restored from table mykeyspace.my_table WITH restore_timestamp = '2020-06-30T04:05:00+0000' AND custom_properties = {'point_in_time_recovery':{'status':'disabled'}, 'capacity_mode':{'throughput_mode': 'PROVISIONED', 'read_capacity_units': 10, 'write_capacity_units': 20}} Tables 772 Amazon Keyspaces (for Apache Cassandra) Developer Guide AND TAGS={'key1':'val1', 'key2':'val2'}; DROP TABLE Use the DROP TABLE statement to remove a table from the keyspace. Syntax drop_table_statement ::= DROP TABLE [ IF EXISTS ] table_name Where: • IF EXISTS prevents DROP TABLE from failing if the table doesn't exist. (Optional) • table_name is the name of the table to be dropped. Example DROP TABLE my_keyspace.my_table; User-defined types (UDTs) UDT – A grouping of fields and data types that you can use to define a single column in Amazon Keyspaces. Valid data types for UDTs are all supported Cassandra data types, including collections and other UDTs that you've already created in the same keyspace. For more information about supported Cassandra data types, see the section called “Cassandra data type support”. user_defined_type::= udt_name udt_name::= [ keyspace_name '.' ] identifier
AmazonKeyspaces-244
AmazonKeyspaces.pdf
244
IF EXISTS prevents DROP TABLE from failing if the table doesn't exist. (Optional) • table_name is the name of the table to be dropped. Example DROP TABLE my_keyspace.my_table; User-defined types (UDTs) UDT – A grouping of fields and data types that you can use to define a single column in Amazon Keyspaces. Valid data types for UDTs are all supported Cassandra data types, including collections and other UDTs that you've already created in the same keyspace. For more information about supported Cassandra data types, see the section called “Cassandra data type support”. user_defined_type::= udt_name udt_name::= [ keyspace_name '.' ] identifier Statements for types • CREATE TYPE • DROP TYPE CREATE TYPE Use the CREATE TYPE statement to create a new type. Types 773 Amazon Keyspaces (for Apache Cassandra) Developer Guide Syntax create_type_statement ::= CREATE TYPE [ IF NOT EXISTS ] udt_name '('field_definition ( ',' field_definition)* ')' field_definition::= identifier cql_type Where: • IF NOT EXISTS prevents CREATE TYPE from failing if the type already exists. (Optional) • udt_name is the fully-qualified name of the UDT in type format, for example my_keyspace.my_type. If you define the current keyspace with the USE statement, you don't need to specify the keyspace name. • field_definition consists of a name and a type. The following table shows examples of allowed UDT names. The first columns shows how to enter the name when you create the type, the second column shows how Amazon Keyspaces formats the name internally. Amazon Keyspaces expects the formatted name for operations like GetType. Entered name Formatted name Note MY_UDT my_udt Without double-quotes, Amazon Keyspaces converts all upper-case characters to lower-case. "MY_UDT" MY_UDT "1234" 1234 With double-quotes, Amazon Keyspaces respects the upper-case characters, and removes the double-quotes from the formatted name. With double-quotes, the name can begin with a number, and Amazon Keyspaces removes the double-quotes from the formatted name. "Special_ Special_C Ch@r@cter h@r@cters s<>!!" <>!! With double-quotes, the name can contain special characters, and Amazon Keyspaces removes the double- quotes from the formatted name. Types 774 Amazon Keyspaces (for Apache Cassandra) Developer Guide Entered name Formatted name Note "nested"" nested""" """"quote quotes Amazon Keyspaces removes the outer double-quotes and the escape double-quotes from the formatted name. s" Examples CREATE TYPE my_keyspace.phone ( country_code int, number text ); You can nest UDTs if the nested UDT is frozen. For more information about default values and quotas for types, see the section called “Amazon Keyspaces UDT quotas and default values”. CREATE TYPE my_keyspace.user ( first_name text, last_name text, phones FROZEN<phone> ); For more code examples that show how to create UDTs, see the section called “User-defined types (UDTs)”. DROP TYPE Use the DROP TYPE statement to delete a UDT. You can only delete a type that's not in use by another type or table. Syntax drop_type_statement ::= DROP TYPE [ IF EXISTS ] udt_name Where: • IF EXISTS prevents DROP TYPE from failing if the type doesn't exist. (Optional) Types 775 Amazon Keyspaces (for Apache Cassandra) Developer Guide • udt_name is the fully-qualified name of the UDT in type format, for example my_keyspace.my_type. If you define the current keyspace with the USE statement, you don't need to specify the keyspace name. Example DROP TYPE udt_name; DML statements (data manipulation language) in Amazon Keyspaces Data manipulation language (DML) is the set of Cassandra Query Language (CQL) statements that you use to manage data in Amazon Keyspaces (for Apache Cassandra) tables. You use DML statements to add, modify, or delete data in a table. You also use DML statements to query data in a table. (Note that CQL doesn't support joins or subqueries.) Topics • SELECT • INSERT • UPDATE • DELETE SELECT Use a SELECT statement to query data. Syntax select_statement ::= SELECT [ JSON ] ( select_clause | '*' ) FROM table_name [ WHERE 'where_clause' ] [ ORDER BY 'ordering_clause' ] [ LIMIT (integer | bind_marker) ] [ ALLOW FILTERING ] select_clause ::= selector [ AS identifier ] ( ',' selector [ AS identifier ] ) DML statements 776 Amazon Keyspaces (for Apache Cassandra) Developer Guide selector ::= column_name | term | CAST '(' selector AS cql_type ')' | function_name '(' [ selector ( ',' selector )* ] ')' where_clause ::= relation ( AND relation )* relation ::= column_name operator term TOKEN operator ::= '=' | '<' | '>' | '<=' | '>=' | IN | CONTAINS | CONTAINS KEY ordering_clause ::= column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )* Examples SELECT name, id, manager_id FROM "myGSGKeyspace".employees_tbl ; SELECT JSON name, id, manager_id FROM "myGSGKeyspace".employees_tbl ; For a table that maps JSON-encoded data types to Amazon Keyspaces data types, see the section called “JSON encoding of Amazon Keyspaces data types”. Using the IN keyword The IN keyword specifies equality for one or more values. It can be
AmazonKeyspaces-245
AmazonKeyspaces.pdf
245
relation )* relation ::= column_name operator term TOKEN operator ::= '=' | '<' | '>' | '<=' | '>=' | IN | CONTAINS | CONTAINS KEY ordering_clause ::= column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )* Examples SELECT name, id, manager_id FROM "myGSGKeyspace".employees_tbl ; SELECT JSON name, id, manager_id FROM "myGSGKeyspace".employees_tbl ; For a table that maps JSON-encoded data types to Amazon Keyspaces data types, see the section called “JSON encoding of Amazon Keyspaces data types”. Using the IN keyword The IN keyword specifies equality for one or more values. It can be applied to the partition key and the clustering column. Results are returned in the order the keys are presented in the SELECT statement. Examples SELECT * from mykeyspace.mytable WHERE primary.key1 IN (1,2) and clustering.key1 = 2; SELECT * from mykeyspace.mytable WHERE primary.key1 IN (1,2) and clustering.key1 <= 2; SELECT * from mykeyspace.mytable WHERE primary.key1 = 1 and clustering.key1 IN (1, 2); SELECT * from mykeyspace.mytable WHERE primary.key1 <= 2 and clustering.key1 IN (1, 2) ALLOW FILTERING; For more information about the IN keyword and how Amazon Keyspaces processes the statement, see the section called “Use IN SELECT”. Ordering results The ORDER BY clause specifies the sort order of the returned results. It takes as arguments a list of column names along with the sort order for each column. You can only specify clustering columns in ordering clauses. Non-clustering columns are not allowed. The sort order options are ASC for SELECT 777 Amazon Keyspaces (for Apache Cassandra) Developer Guide ascending and DESC for descending sort order. If the sort order is omitted, the default ordering of the clustering column is used. For possible sort orders, see the section called “Order results”. Example SELECT name, id, division, manager_id FROM "myGSGKeyspace".employees_tbl WHERE id = '012-34-5678' ORDER BY division; When using ORDER BY with the IN keyword, results are ordered within a page. Full re-ordering with disabled pagination is not supported. TOKEN You can apply the TOKEN function to the PARTITION KEY column in SELECT and WHERE clauses. With the TOKEN function, Amazon Keyspaces returns rows based on the mapped token value of the PARTITION_KEY rather than on the value of the PARTITION KEY. TOKEN relations are not supported with the IN keyword. Examples SELECT TOKEN(id) from my_table; SELECT TOKEN(id) from my_table WHERE TOKEN(id) > 100 and TOKEN(id) < 10000; TTL function You can use the TTL function with the SELECT statement to retrieve the expiration time in seconds that is stored for a column. If no TTL value is set, the function returns null. Example SELECT TTL(my_column) from my_table; The TTL function can’t be used on multi-cell columns such as collections. WRITETIME function You can use the WRITETIME function with the SELECT statement to retrieve the timestamp that is stored as metadata for the value of a column only if the table uses client-side timestamps. For more information, see the section called “Client-side timestamps”. SELECT 778 Amazon Keyspaces (for Apache Cassandra) Developer Guide SELECT WRITETIME(my_column) from my_table; The WRITETIME function can’t be used on multi-cell columns such as collections. Note For compatibility with established Cassandra driver behavior, tag-based authorization policies are not enforced when you perform operations on system tables by using Cassandra Query Language (CQL) API calls through Cassandra drivers and developer tools. For more information, see the section called “ Amazon Keyspaces resource access based on tags”. INSERT Use the INSERT statement to add a row to a table. Syntax insert_statement ::= INSERT INTO table_name ( names_values | json_clause ) [ IF NOT EXISTS ] [ USING update_parameter ( AND update_parameter )* ] names_values ::= names VALUES tuple_literal json_clause ::= JSON string [ DEFAULT ( NULL | UNSET ) ] names ::= '(' column_name ( ',' column_name )* ')' Example INSERT INTO "myGSGKeyspace".employees_tbl (id, name, project, region, division, role, pay_scale, vacation_hrs, manager_id) VALUES ('012-34-5678','Russ','NightFlight','US','Engineering','IC',3,12.5, '234-56-7890') ; Update parameters INSERT supports the following values as update_parameter: • TTL – A time value in seconds. The maximum configurable value is 630,720,000 seconds, which is the equivalent of 20 years. INSERT 779 Amazon Keyspaces (for Apache Cassandra) Developer Guide • TIMESTAMP – A bigint value representing the number of microseconds since the standard base time known as the epoch: January 1 1970 at 00:00:00 GMT. A timestamp in Amazon Keyspaces has to fall between the range of 2 days in the past and 5 minutes in the future. Example INSERT INTO my_table (userid, time, subject, body, user) VALUES (B79CB3BA-745E-5D9A-8903-4A02327A7E09, 96a29100-5e25-11ec-90d7- b5d91eceda0a, 'Message', 'Hello','205.212.123.123') USING TTL 259200; JSON support For a table that maps JSON-encoded data types to Amazon Keyspaces data types, see the section called “JSON encoding of Amazon Keyspaces data types”. You can use the JSON keyword to insert a JSON-encoded map as a single row. For columns that exist in the table
AmazonKeyspaces-246
AmazonKeyspaces.pdf
246
time known as the epoch: January 1 1970 at 00:00:00 GMT. A timestamp in Amazon Keyspaces has to fall between the range of 2 days in the past and 5 minutes in the future. Example INSERT INTO my_table (userid, time, subject, body, user) VALUES (B79CB3BA-745E-5D9A-8903-4A02327A7E09, 96a29100-5e25-11ec-90d7- b5d91eceda0a, 'Message', 'Hello','205.212.123.123') USING TTL 259200; JSON support For a table that maps JSON-encoded data types to Amazon Keyspaces data types, see the section called “JSON encoding of Amazon Keyspaces data types”. You can use the JSON keyword to insert a JSON-encoded map as a single row. For columns that exist in the table but are omitted in the JSON insert statement, use DEFAULT UNSET to preserve the existing values. Use DEFAULT NULL to write a NULL value into each row of omitted columns and overwrite the existing values (standard write charges apply). DEFAULT NULL is the default option. Example INSERT INTO "myGSGKeyspace".employees_tbl JSON '{"id":"012-34-5678", "name": "Russ", "project": "NightFlight", "region": "US", "division": "Engineering", "role": "IC", "pay_scale": 3, "vacation_hrs": 12.5, "manager_id": "234-56-7890"}'; If the JSON data contains duplicate keys, Amazon Keyspaces stores the last value for the key (similar to Apache Cassandra). In the following example, where the duplicate key is id, the value 234-56-7890 is used. Example INSERT 780 Amazon Keyspaces (for Apache Cassandra) Developer Guide INSERT INTO "myGSGKeyspace".employees_tbl JSON '{"id":"012-34-5678", "name": "Russ", "project": "NightFlight", "region": "US", "division": "Engineering", "role": "IC", "pay_scale": 3, "vacation_hrs": 12.5, "id": "234-56-7890"}'; UPDATE Use the UPDATE statement to modify a row in a table. Syntax update_statement ::= UPDATE table_name [ USING update_parameter ( AND update_parameter )* ] SET assignment ( ',' assignment )* WHERE where_clause [ IF ( EXISTS | condition ( AND condition )*) ] update_parameter ::= ( integer | bind_marker ) assignment ::= simple_selection '=' term | column_name '=' column_name ( '+' | '-' ) term | column_name '=' list_literal '+' column_name simple_selection ::= column_name | column_name '[' term ']' | column_name '.' `field_name condition ::= simple_selection operator term Example UPDATE "myGSGKeyspace".employees_tbl SET pay_scale = 5 WHERE id = '567-89-0123' AND division = 'Marketing' ; To increment a counter, use the following syntax. For more information, see the section called “Counters”. UPDATE ActiveUsers SET counter = counter + 1 WHERE user = A70FE1C0-5408-4AE3- BE34-8733E5K09F14 AND action = 'click'; UPDATE 781 Amazon Keyspaces (for Apache Cassandra) Developer Guide Update parameters UPDATE supports the following values as update_parameter: • TTL – A time value in seconds. The maximum configurable value is 630,720,000 seconds, which is the equivalent of 20 years. • TIMESTAMP – A bigint value representing the number of microseconds since the standard base time known as the epoch: January 1 1970 at 00:00:00 GMT. A timestamp in Amazon Keyspaces has to fall between the range of 2 days in the past and 5 minutes in the future. Example UPDATE my_table (userid, time, subject, body, user) VALUES (B79CB3BA-745E-5D9A-8903-4A02327A7E09, 96a29100-5e25-11ec-90d7- b5d91eceda0a, 'Message', 'Hello again','205.212.123.123') USING TIMESTAMP '2022-11-03 13:30:54+0400'; DELETE Use the DELETE statement to remove a row from a table. Syntax delete_statement ::= DELETE [ simple_selection ( ',' simple_selection ) ] FROM table_name [ USING update_parameter ( AND update_parameter )* ] WHERE where_clause [ IF ( EXISTS | condition ( AND condition )*) ] simple_selection ::= column_name | column_name '[' term ']' | column_name '.' `field_name condition ::= simple_selection operator term Where: • table_name is the table that contains the row you want to delete. Example DELETE 782 Amazon Keyspaces (for Apache Cassandra) Developer Guide DELETE manager_id FROM "myGSGKeyspace".employees_tbl WHERE id='789-01-2345' AND division='Executive' ; DELETE supports the following value as update_parameter: • TIMESTAMP – A bigint value representing the number of microseconds since the standard base time known as the epoch: January 1 1970 at 00:00:00 GMT. Built-in functions in Amazon Keyspaces Amazon Keyspaces (for Apache Cassandra) supports a variety of built-in functions that you can use in Cassandra Query Language (CQL) statements. Topics • Scalar functions Scalar functions A scalar function performs a calculation on a single value and returns the result as a single value. Amazon Keyspaces supports the following scalar functions. Function blobAsType cast currentDate currentTime Description Returns a value of the specified data type. Converts one native data type into another native data type. Returns the current date/time as a date. Returns the current date/time as a time. currentTimestamp Returns the current date/time as a timestamp. currentTimeUUID Returns the current date/time as a timeuuid. fromJson Built-in functions Converts the JSON string into the selected column's data type. 783 Amazon Keyspaces (for Apache Cassandra) Developer Guide Function maxTimeuuid minTimeuuid now toDate toJson token toTimestamp TTL Description Returns the largest possible timeuuid for timestamp or date string. Returns the smallest possible timeuuid for timestamp or date string. Returns a new unique timeuuid. Supported for INSERT, UPDATE, and DELETE statement s, and as part of the WHERE clause in SELECT statements. Converts either a timeuuid or a timestamp
AmazonKeyspaces-247
AmazonKeyspaces.pdf
247
as a time. currentTimestamp Returns the current date/time as a timestamp. currentTimeUUID Returns the current date/time as a timeuuid. fromJson Built-in functions Converts the JSON string into the selected column's data type. 783 Amazon Keyspaces (for Apache Cassandra) Developer Guide Function maxTimeuuid minTimeuuid now toDate toJson token toTimestamp TTL Description Returns the largest possible timeuuid for timestamp or date string. Returns the smallest possible timeuuid for timestamp or date string. Returns a new unique timeuuid. Supported for INSERT, UPDATE, and DELETE statement s, and as part of the WHERE clause in SELECT statements. Converts either a timeuuid or a timestamp to a date type. Returns the column value of the selected column in JSON format. Returns the hash value of the partition key. Converts either a timeuuid or a date to a timestamp. Returns the expiration time in seconds for a column. typeAsBlob Converts the specified data type into a blob. toUnixTimestamp uuid writetime Converts either a timeuuid or a timestamp into a bigInt. Returns a random version 4 UUID. Supported for INSERT, UPDATE, and DELETE statement s, and as part of the WHERE clause in SELECT statements. Returns the timestamp of the value of the specified column. Scalar functions 784 Amazon Keyspaces (for Apache Cassandra) Developer Guide Function dateOf unixTimestampOf Description (Deprecated) Extracts the timestamp of a timeuuid, and returns the value as a date. (Deprecated) Extracts the timestamp of a timeuuid, and returns the value as a raw, 64- bit integer timestamp. Scalar functions 785 Amazon Keyspaces (for Apache Cassandra) Developer Guide Quotas for Amazon Keyspaces (for Apache Cassandra) This section describes current quotas and default values for Amazon Keyspaces (for Apache Cassandra). Topics • Amazon Keyspaces service quotas • Increasing or decreasing throughput (for provisioned tables) • Amazon Keyspaces encryption at rest • Quotas and default values for user-defined types (UDTs) in Amazon Keyspaces Amazon Keyspaces service quotas The following table contains Amazon Keyspaces (for Apache Cassandra) quotas and the default values. Information about which quotas can be adjusted is available in the Service Quotas console, where you can also request quota increases. For more information on quotas, contact AWS Support. Quota Description Amazon Keyspaces default Max keyspaces per AWS Region Max tables per AWS Region 256 256 The maximum number of keyspaces for this subscribe r per Region. You can adjust this default value in the Service Quotas console. The maximum number of tables across all keyspaces for this subscriber per Region. You can adjust this default value in the Service Quotas console. Amazon Keyspaces service quotas 786 Amazon Keyspaces (for Apache Cassandra) Developer Guide Quota Description Amazon Keyspaces default Max amount of data restored using ADD REGION operation s 10 TB The maximum size of data that can be concurrently restored by ADD REGION operations. To increase the amount of data to be concurrently restored, contact Support. Max table schema size The maximum size of a table schema. 350 KB Max concurrent DDL operations Max queries per connection Max row size The maximum number of concurrent DDL operations 50 allowed for this subscriber per Region. The maximum number of CQL queries that can be processed by a single client TCP connection per second. 3000 The maximum size of a row, excluding static column data. 1 MB For details, see the section called “Estimate row size”. Amazon Keyspaces service quotas 787 Amazon Keyspaces (for Apache Cassandra) Developer Guide Quota Description Amazon Keyspaces default Max number of columns in INSERT and UPDATE statements 225/166 The maximum number of columns allowed in CQL INSERT or UPDATE statement s. An INSERT or UPDATE statement supports up to 225 regular columns when Time to Live (TTL) is turned off. If TTL is turned on, up to 166 regular columns can be modified in a single operation . Max static data per logical partition The maximum aggregate size of static data in a logical 1 MB partition. For details, see the section called “Calculate static column size per logical partition”. Max subqueries per IN SELECT statement The maximum number of subqueries you can use for 100 the IN keyword in a SELECT statement. You can adjust this default value in the Service Quotas console. Amazon Keyspaces service quotas 788 Amazon Keyspaces (for Apache Cassandra) Developer Guide Quota Description Amazon Keyspaces default Max number of nested frozen collections per AWS Region The maximum number of nested collections supported 8 when you're using the FROZEN keyword for a column with a collection data type. For more informati on about frozen collectio ns, see the section called “Collection types”. To increase the nesting level, contact Support. The maximum read throughput per second—re ad request units (RRUs) or read capacity units (RCUs) —that can be allocated to a table per Region. You can adjust this default value in the Service Quotas console. 40,000 Max read
AmazonKeyspaces-248
AmazonKeyspaces.pdf
248
(for Apache Cassandra) Developer Guide Quota Description Amazon Keyspaces default Max number of nested frozen collections per AWS Region The maximum number of nested collections supported 8 when you're using the FROZEN keyword for a column with a collection data type. For more informati on about frozen collectio ns, see the section called “Collection types”. To increase the nesting level, contact Support. The maximum read throughput per second—re ad request units (RRUs) or read capacity units (RCUs) —that can be allocated to a table per Region. You can adjust this default value in the Service Quotas console. 40,000 Max read throughput per second Max write throughput per second The maximum write throughput per second—wr 40,000 ite request units (WRUs) or write capacity units (WCUs) —that can be allocated to a table per Region. You can adjust this default value in the Service Quotas console. Amazon Keyspaces service quotas 789 Amazon Keyspaces (for Apache Cassandra) Developer Guide Quota Description Amazon Keyspaces default Account-level read throughpu t (provisioned) The maximum number of aggregate read capacity 80,000 Account-level write throughput (provisioned) Max number of scalable targets per Region per account 80,000 1,500 units (RCUs) allocated for the account per Region. This is applicable only for tables in provisioned read/writ e capacity mode. You can adjust this default value in the Service Quotas console. The maximum number of aggregate write capacity units (WCU) allocated for the account per Region. This is applicable only for tables in provisioned read/writ e capacity mode. You can adjust this default value in the Service Quotas console. The maximum number of scalable targets for the account per Region. An Amazon Keyspaces table counts as one scalable target if auto scaling is enabled for read capacity, and as another scalable target if auto scaling is enabled for write capacity. You can adjust this default value in the Service Quotas console for Application Auto Scaling by choosing Scalable targets for Amazon Keyspaces. Amazon Keyspaces service quotas 790 Amazon Keyspaces (for Apache Cassandra) Developer Guide Quota Description Amazon Keyspaces default Max partition key size Max clustering key size 2048 bytes 850 bytes The maximum size of the compound partition key. Up to 3 bytes of additiona l storage are added to the raw size of each column included in the partition key for metadata. The maximum combined size of all clustering columns. Up to 4 bytes of additional storage are added to the raw size of each clustering column for metadata. Max concurrent table restores using Point-in-time Recovery The maximum number of concurrent table restores 4 (PITR) using PITR per subscriber is 4. You can adjust this default value in the Service Quotas console. Max amount of data restored using point-in-time recovery The maximum size of data that can be restored using 5 TB (PITR) PITR within 24 hours. You can adjust this default value in the Service Quotas console. Increasing or decreasing throughput (for provisioned tables) Increasing provisioned throughput You can increase ReadCapacityUnits or WriteCapacityUnits as often as necessary by using the console or the ALTER TABLE statement. The new settings don't take effect until the ALTER TABLE operation is complete. Increasing or decreasing throughput (for provisioned tables) 791 Amazon Keyspaces (for Apache Cassandra) Developer Guide You can't exceed your per-account quotas when you add provisioned capacity. And you can increase the provisioned capacity for your tables as much as you need. For more information about per- account quotas, see the preceding section, the section called “Amazon Keyspaces service quotas”. Decreasing provisioned throughput For every table in an ALTER TABLE statement, you can decrease ReadCapacityUnits or WriteCapacityUnits (or both). The new settings don't take effect until the ALTER TABLE operation is complete. A decrease is allowed up to four times, anytime per day. A day is defined according to Universal Coordinated Time (UTC). Additionally, if there was no decrease in the past hour, an additional decrease is allowed. This effectively brings the maximum number of decreases in a day to 27 (4 decreases in the first hour, and 1 decrease for each of the subsequent 1-hour windows in a day). Amazon Keyspaces encryption at rest You can change encryption options between an AWS owned AWS KMS key and a customer managed AWS KMS key up to four times within a 24-hour window, on a per table basis, starting from when the table was created. If there was no change in the past six hours, an additional change is allowed. This effectively brings the maximum number of changes in a day to eight (four changes in the first six hours, and one change for each of the subsequent six-hour windows in a day). You can change the encryption option to use an AWS owned AWS KMS key as often as necessary, even if the earlier quota has been exhausted. These are the
AmazonKeyspaces-249
AmazonKeyspaces.pdf
249
AWS KMS key up to four times within a 24-hour window, on a per table basis, starting from when the table was created. If there was no change in the past six hours, an additional change is allowed. This effectively brings the maximum number of changes in a day to eight (four changes in the first six hours, and one change for each of the subsequent six-hour windows in a day). You can change the encryption option to use an AWS owned AWS KMS key as often as necessary, even if the earlier quota has been exhausted. These are the quotas unless you request a higher amount. To request a service quota increase, see Support. Quotas and default values for user-defined types (UDTs) in Amazon Keyspaces Amazon Keyspaces UDT quotas and default values The following table contains quotas and default values related to UDTs in Amazon Keyspaces. For more information about these quotas, contact AWS Support. Decreasing provisioned throughput 792 Amazon Keyspaces (for Apache Cassandra) Developer Guide Quota Description Amazon Keyspaces default Max number of UDTs per AWS Region The maximum number of UDTs across all keyspaces for 256 this subscriber per Region. Max number of tables per UDT The maximum number of tables that can reference the 100 same UDT. Max number of UDTs per table The maximum number of UDTs that a table can reference. Max level of nesting of UDTs The maximum nesting depth supported for UDTs. Max amount of direct child UDTs per UDT The maximum number of child UDTs supported for a UDT. Max amount of direct parent UDTs per UDT The maximum number of parent UDTs supported for a UDT. 50 8 10 10 Max UDT schema size The maximum size of the schema for a UDT. 25 KB Max UDT name length Max UDT field name length The maximum number of characters in the UDT name. The maximum number of characters in a UDT field name. 48 128 Amazon Keyspaces UDT quotas and default values 793 Amazon Keyspaces (for Apache Cassandra) Developer Guide Document history for Amazon Keyspaces (for Apache Cassandra) The following table describes the important changes to the documentation since the last release of Amazon Keyspaces (for Apache Cassandra). For notification about updates to this documentation, you can subscribe to an RSS feed. • Latest documentation update: March 31, 2025 Change Description Date Amazon Keyspaces multi- Region replication now You can now define the Africa (Cape Town) Region for multi- March 31, 2025 supports the Africa (Cape Region keyspaces. Town) Region. Amazon Keyspaces multi-Reg ion replication now supports You can now define as many Regions as you want for March 25, 2025 unlimited AWS Regions. multi-Region keyspaces. Amazon Keyspaces support for user-defined types (UDTs) You can now create and use UDTs in multi-Region March 24, 2025 in multi-Region keyspaces keyspaces. Amazon Keyspaces managed policy update Amazon Keyspaces added new permissions to the November 19, 2024 AmazonKeyspacesFul lAccess managed policy to allow IAM principals to add new Regions to an existing keyspace. This includes an update to the service-linked role AWSServiceRoleForA mazonKeyspacesReplication. 794 Amazon Keyspaces (for Apache Cassandra) Developer Guide Add replicas to multi-Region tables in Amazon Keyspaces You can now add new AWS Regions to existing single and November 19, 2024 multi-Region keyspaces. Support for user-defined types (UDTs) in Amazon With native support for UDTs in Amazon Keyspaces you can October 30, 2024 Keyspaces now define data structure s in your applications that represent real-world data hierarchies. ADD COLUMN support for Amazon Keyspaces multi-Reg Amazon Keyspaces now supports schema changes for September 17, 2024 ion replication multi-Region tables. Updated migration guidance for Amazon Keyspaces The updated migration guidance outlines how to June 28, 2024 create a migration plan to successfully migrate from Apache Cassandra to Amazon Keyspaces, including different strategies for offline and online migrations. Connect to Amazon Keyspaces from Amazon You can now follow a step- by-step tutorial to connect Elastic Kubernetes Service to Amazon Keyspaces from February 7, 2024 Amazon EKS. Amazon Keyspaces multi-Reg ion replication support for provisioned tables Amazon Keyspaces now supports provisioned capacity mode for multi-Region tables. January 23, 2024 Amazon Keyspaces auto scaling APIs for provisioned tables Amazon Keyspaces now offers January 23, 2024 CQL and AWS API support for setting up auto scaling with provisioned capacity mode. 795 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces DML activity included in CloudTrail You can now audit Amazon Keyspaces Data Manipulation December 20, 2023 logs Language (DML) API calls in AWS CloudTrail. Amazon Keyspaces support Amazon Keyspaces now November 15, 2023 for the FROZEN keyword supports the FROZEN keyword for collection data types. Amazon Keyspaces managed policy update Amazon Keyspaces added new permissions to the October 3, 2023 Amazon Keyspaces managed policy update AmazonKeyspacesFul lAccess managed policy to allow clients connecting to Amazon Keyspaces through
AmazonKeyspaces-250
AmazonKeyspaces.pdf
250
and AWS API support for setting up auto scaling with provisioned capacity mode. 795 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces DML activity included in CloudTrail You can now audit Amazon Keyspaces Data Manipulation December 20, 2023 logs Language (DML) API calls in AWS CloudTrail. Amazon Keyspaces support Amazon Keyspaces now November 15, 2023 for the FROZEN keyword supports the FROZEN keyword for collection data types. Amazon Keyspaces managed policy update Amazon Keyspaces added new permissions to the October 3, 2023 Amazon Keyspaces managed policy update AmazonKeyspacesFul lAccess managed policy to allow clients connecting to Amazon Keyspaces through interface VPC endpoints access to the Amazon EC2 instance to update the Amazon Keyspaces system.peers table with network information from the VPC. Amazon Keyspaces created a September 12, 2023 new AmazonKeyspacesRea dOnlyAccess_v2 managed policy to allow clients connecting to Amazon Keyspaces through interface VPC endpoints access to the Amazon EC2 instance to update the Amazon Keyspaces system.peers table with network informati on from the VPC. 796 Amazon Keyspaces (for Apache Cassandra) Developer Guide Best practices for creating connections in Amazon Keyspaces Learn how to improve and optimize client driver configurations in Amazon Keyspaces. June 30, 2023 System keyspaces are now documented for Amazon Learn what is stored in system keyspaces and how to query June 21, 2023 Keyspaces Amazon Keyspaces now supports multi-Region replication them for useful information in Amazon Keyspaces. Amazon Keyspaces multi-Reg ion replication helps you to maintain globally distributed applications by providing you with improved fault tolerance, stability, and resilience. June 5, 2023 Amazon Keyspaces managed policy update Amazon Keyspaces added new permissions to the June 5, 2023 AmazonKeyspacesFul lAccess managed policy to allow Amazon Keyspaces to create a service-linked role when an administr ator creates a multi-Region keyspace. Amazon Keyspaces support Amazon Keyspaces now April 25, 2023 for the IN keyword Cross-account access for Amazon Keyspaces and interface VPC endpoints supports the IN keyword in SELECT statements. Learn how to implement cross-account access for Amazon Keyspaces with VPC endpoints. April 20, 2023 797 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces support for client-side timestamps Amazon Keyspaces client-si de timestamps are Cassandra March 14, 2023 -compatible cell-level timestamps that help distributed applications to determine the order of write operations when different clients make changes to the same data. Getting started with Amazon Keyspaces and interface VPC In this step-by-step tutorial, learn how to connect to endpoints Amazon Keyspaces from a March 1, 2023 VPC. Optimizing costs of Amazon Keyspaces tables Best practices and guidance are available to help you February 17, 2023 identify strategies for optimizing costs of your existing Amazon Keyspaces tables. The Murmur3Partitioner The Murmur3Partitioner November 17, 2022 is now the default is now the default partition er in Amazon Keyspaces. Amazon Keyspaces now The Murmur3Partitioner November 9, 2022 supports Murmur3Pa is now available in Amazon rtitioner Keyspaces. Support update for empty strings and blob values Amazon Keyspaces now also supports empty strings and blob values as clustering column values. October 19, 2022 798 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces is now available in AWS GovCloud Amazon Keyspaces is now available in the AWS August 4, 2022 (US) GovCloud (US) Region and is in scope for FedRAMP-High compliance. For information about available endpoints, see AWS GovCloud (US) Region FIPS endpoints. Monitor Amazon Keyspaces table storage costs with Amazon Keyspaces now helps you monitor and track table June 14, 2022 Amazon CloudWatch storage costs over time with Amazon Keyspaces now supports Terraform the BillableTableSizeI nBytes CloudWatch metric. You can now use Terraform to perform data definition language (DDL) operations in Amazon Keyspaces. June 9, 2022 Amazon Keyspaces token function support Amazon Keyspaces now helps you optimize application April 19, 2022 queries by using the token function. Amazon Keyspaces integrati on with Apache Spark Amazon Keyspaces now helps you read and write data in April 19, 2022 Apache Spark more easily by using the open-source Spark Cassandra Connector. 799 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces API Reference March 2, 2022 Amazon Keyspaces supports control plane operations to manage keyspaces and tables using the AWS SDK and AWS CLI. The API reference guide describes the supported control plane operations in detail. How to troubleshoot common configuration issues when Learn more about how to resolve common configura using Amazon Keyspaces. tion issues you may encounter November 22, 2021 when using Amazon Keyspaces. Amazon Keyspaces support for Time to Live (TTL). Amazon Keyspaces Time to Live (TTL) helps you simplify October 18, 2021 your application logic and optimize the price of storage by expiring data from tables automatically. Migrating data to Amazon Keyspaces using DSBulk. Step-by-step tutorial for migrating data from Apache August 9, 2021 Amazon Keyspaces support for VPC Endpoint entries in the system.peers table. Cassandra to Amazon Keyspaces using the
AmazonKeyspaces-251
AmazonKeyspaces.pdf
251
detail. How to troubleshoot common configuration issues when Learn more about how to resolve common configura using Amazon Keyspaces. tion issues you may encounter November 22, 2021 when using Amazon Keyspaces. Amazon Keyspaces support for Time to Live (TTL). Amazon Keyspaces Time to Live (TTL) helps you simplify October 18, 2021 your application logic and optimize the price of storage by expiring data from tables automatically. Migrating data to Amazon Keyspaces using DSBulk. Step-by-step tutorial for migrating data from Apache August 9, 2021 Amazon Keyspaces support for VPC Endpoint entries in the system.peers table. Cassandra to Amazon Keyspaces using the DataStax Bulk Loader (DSBulk). Amazon Keyspaces allows you July 29, 2021 to populate the system.pe ers table with available interface VPC endpoint information to improve load balancing and increase read/ write throughput. 800 Amazon Keyspaces (for Apache Cassandra) Developer Guide Update to IAM managed policies to support customer IAM managed policies for Amazon Keyspaces now managed AWS KMS keys. include permissions to list June 1, 2021 and view available customer managed AWS KMS keys stored in AWS KMS. Amazon Keyspaces support for customer managed AWS Amazon Keyspaces allows you to take control of customer June 1, 2021 KMS keys. managed AWS KMS keys stored in AWS KMS for encryption at rest. Amazon Keyspaces support for JSON syntax Amazon Keyspaces helps you read and write JSON January 21, 2021 documents more easily by supporting JSON syntax for INSERT and SELECT operation s. Amazon Keyspaces support for static columns Amazon Keyspaces now helps you update and store November 9, 2020 GA release of NoSQL Workbench support for Amazon Keyspaces October 28, 2020 common data between multiple rows efficiently by using static columns. NoSQL Workbench is a client- side application that helps you design and visualize nonrelational data models for Amazon Keyspaces more easily. NoSQL Workbench clients are available for Windows, macOS, and Linux. 801 Amazon Keyspaces (for Apache Cassandra) Developer Guide Preview release of NoSQL Workbench support for NoSQL Workbench is a client- side application that helps October 5, 2020 Amazon Keyspaces you design and visualize nonrelational data models for Amazon Keyspaces more easily. NoSQL Workbench clients are available for Windows, macOS, and Linux. New code examples for programmatic access to We continue to add code examples for programmatic July 17, 2020 Amazon Keyspaces access to Amazon Keyspaces . Samples are now available for Java, Python, Go, C#, and Perl Cassandra drivers that support Apache Cassandra version 3.11.2. Amazon Keyspaces point-in- time recovery (PITR) Amazon Keyspaces now offers point-in-time recovery July 9, 2020 Amazon Keyspaces general availability April 23, 2020 (PITR) to help protect your tables from accidental write or delete operations by providing you continuous backups of your table data. With Amazon Keyspaces , formerly known during preview as Amazon Managed Apache Cassandra Service (MCS), you can use the Cassandra Query Language (CQL) code, Apache 2.0–licen sed Cassandra drivers, and developer tools that you already use today. 802 Amazon Keyspaces (for Apache Cassandra) Developer Guide Amazon Keyspaces automatic scaling Amazon Keyspaces (for Apache Cassandra) integrate April 23, 2020 s with Application Auto Scaling to help you provision throughput capacity efficient ly for variable workloads in response to actual application traffic by adjusting throughpu t capacity automatically. Interface virtual private cloud (VPC) endpoints for Amazon Amazon Keyspaces offers private communication April 16, 2020 Keyspaces Tag-based access policies Counter data type Tagging resources AWS CloudFormation support between the service and your VPC so that network traffic doesn't leave the Amazon network. You can now use resource tags in IAM policies to manage access to Amazon Keyspaces. Amazon Keyspaces now helps you coordinate increments and decrements to column values by using counters. Amazon Keyspaces now enables you to label and categorize resources by using tags. Amazon Keyspaces now helps you automate the creation and management of resources by using AWS CloudForm ation. April 8, 2020 April 7, 2020 March 31, 2020 March 25, 2020 803 Amazon Keyspaces (for Apache Cassandra) Developer Guide Support for IAM roles and policies and SigV4 authentic Added information on how you can use AWS Identity March 17, 2020 ation Read/write capacity mode Initial release and Access Managemen t (IAM) to manage access permissions and implement security policies for Amazon Keyspaces and how to use the authentication plugin for the DataStax Java Driver for Cassandra to programma tically access Amazon Keyspaces using IAM roles and federated identities. Amazon Keyspaces now supports two read/write throughput capacity modes. The read/write capacity mode controls how you're charged for read and write throughpu t and how table throughput capacity is managed. This documentation covers the initial release of Amazon Keyspaces (for Apache Cassandra). February 20, 2020 December 3, 2019 804
amazonml-api-001
amazonml-api.pdf
1
API Reference MachineLearning API Version 2014-12-12 Copyright © 2025 Amazon Web Services, Inc. and/or its affiliates. All rights reserved. MachineLearning API Reference MachineLearning: API Reference Copyright © 2025 Amazon Web Services, Inc. and/or its affiliates. All rights reserved. Amazon's trademarks and trade dress may not be used in connection with any product or service that is not Amazon's, in any manner that is likely to cause confusion among customers, or in any manner that disparages or discredits Amazon. All other trademarks not owned by Amazon are the property of their respective owners, who may or may not be affiliated with, connected to, or sponsored by Amazon. MachineLearning Table of Contents API Reference Welcome ........................................................................................................................................... 1 Actions .............................................................................................................................................. 2 AddTags .......................................................................................................................................................... 4 Request Syntax ........................................................................................................................................ 4 Request Parameters ................................................................................................................................ 4 Response Syntax ...................................................................................................................................... 5 Response Elements ................................................................................................................................. 5 Errors .......................................................................................................................................................... 6 Examples ................................................................................................................................................... 6 See Also ..................................................................................................................................................... 7 CreateBatchPrediction ................................................................................................................................. 9 Request Syntax ........................................................................................................................................ 9 Request Parameters ................................................................................................................................ 9 Response Syntax ................................................................................................................................... 11 Response Elements ............................................................................................................................... 11 Errors ....................................................................................................................................................... 11 Examples ................................................................................................................................................. 12 See Also .................................................................................................................................................. 13 CreateDataSourceFromRDS ...................................................................................................................... 14 Request Syntax ...................................................................................................................................... 14 Request Parameters .............................................................................................................................. 15 Response Syntax ................................................................................................................................... 17 Response Elements ............................................................................................................................... 17 Errors ....................................................................................................................................................... 17 Examples ................................................................................................................................................. 18 See Also .................................................................................................................................................. 19 CreateDataSourceFromRedshift ............................................................................................................... 21 Request Syntax ...................................................................................................................................... 21 Request Parameters .............................................................................................................................. 22 Response Syntax ................................................................................................................................... 24 Response Elements ............................................................................................................................... 24 Errors ....................................................................................................................................................... 24 Examples ................................................................................................................................................. 25 See Also .................................................................................................................................................. 26 API Version 2014-12-12 iii MachineLearning API Reference CreateDataSourceFromS3 ......................................................................................................................... 28 Request Syntax ...................................................................................................................................... 28 Request Parameters .............................................................................................................................. 29 Response Syntax ................................................................................................................................... 30 Response Elements ............................................................................................................................... 30 Errors ....................................................................................................................................................... 31 Examples ................................................................................................................................................. 31 See Also .................................................................................................................................................. 32 CreateEvaluation ......................................................................................................................................... 33 Request Syntax ...................................................................................................................................... 33 Request Parameters .............................................................................................................................. 33 Response Syntax ................................................................................................................................... 34 Response Elements ............................................................................................................................... 35 Errors ....................................................................................................................................................... 35 Examples ................................................................................................................................................. 36 See Also .................................................................................................................................................. 36 CreateMLModel ........................................................................................................................................... 38 Request Syntax ...................................................................................................................................... 38 Request Parameters .............................................................................................................................. 38 Response Syntax ................................................................................................................................... 41 Response Elements ............................................................................................................................... 41 Errors ....................................................................................................................................................... 42 Examples ................................................................................................................................................. 42 See Also .................................................................................................................................................. 43 CreateRealtimeEndpoint ........................................................................................................................... 44 Request Syntax ...................................................................................................................................... 44 Request Parameters .............................................................................................................................. 44 Response Syntax ................................................................................................................................... 44 Response Elements ............................................................................................................................... 45 Errors ....................................................................................................................................................... 45 Examples ................................................................................................................................................. 46 See Also .................................................................................................................................................. 47 DeleteBatchPrediction ............................................................................................................................... 48 Request Syntax ...................................................................................................................................... 48 Request Parameters .............................................................................................................................. 48 Response Syntax ................................................................................................................................... 48 API Version 2014-12-12 iv MachineLearning API Reference Response Elements ............................................................................................................................... 49 Errors ....................................................................................................................................................... 49 Examples ................................................................................................................................................. 50 See Also .................................................................................................................................................. 50 DeleteDataSource ....................................................................................................................................... 52 Request Syntax ...................................................................................................................................... 52 Request Parameters .............................................................................................................................. 52 Response Syntax ................................................................................................................................... 52 Response Elements ............................................................................................................................... 53 Errors ....................................................................................................................................................... 53 Examples ................................................................................................................................................. 54 See Also .................................................................................................................................................. 54 DeleteEvaluation ......................................................................................................................................... 56 Request Syntax ...................................................................................................................................... 56 Request Parameters .............................................................................................................................. 56 Response Syntax ................................................................................................................................... 56 Response Elements ............................................................................................................................... 57 Errors ....................................................................................................................................................... 57 Examples ................................................................................................................................................. 58 See Also .................................................................................................................................................. 58 DeleteMLModel ........................................................................................................................................... 60 Request Syntax ...................................................................................................................................... 60 Request Parameters .............................................................................................................................. 60 Response Syntax ................................................................................................................................... 60 Response Elements ............................................................................................................................... 61 Errors ....................................................................................................................................................... 61 Examples ................................................................................................................................................. 61 See Also .................................................................................................................................................. 62 DeleteRealtimeEndpoint ........................................................................................................................... 64 Request Syntax ...................................................................................................................................... 64 Request Parameters .............................................................................................................................. 64 Response Syntax ................................................................................................................................... 64 Response Elements ............................................................................................................................... 65 Errors ....................................................................................................................................................... 65 Examples ................................................................................................................................................. 66 See Also .................................................................................................................................................. 67 API Version 2014-12-12 v MachineLearning API Reference DeleteTags ................................................................................................................................................... 68 Request Syntax ...................................................................................................................................... 68 Request Parameters .............................................................................................................................. 68 Response Syntax ................................................................................................................................... 69 Response Elements ............................................................................................................................... 69 Errors ....................................................................................................................................................... 70 Examples ................................................................................................................................................. 70 See Also .................................................................................................................................................. 71 DescribeBatchPredictions .......................................................................................................................... 73 Request Syntax ...................................................................................................................................... 73 Request Parameters .............................................................................................................................. 73 Response Syntax ................................................................................................................................... 77 Response Elements ............................................................................................................................... 77 Errors ....................................................................................................................................................... 78 Examples ................................................................................................................................................. 78 See Also .................................................................................................................................................. 79 DescribeDataSources .................................................................................................................................. 81 Request Syntax ...................................................................................................................................... 81 Request Parameters .............................................................................................................................. 81 Response Syntax ................................................................................................................................... 84 Response Elements ............................................................................................................................... 85 Errors ....................................................................................................................................................... 86 Examples ................................................................................................................................................. 86 See Also .................................................................................................................................................. 88 DescribeEvaluations ................................................................................................................................... 89 Request Syntax ...................................................................................................................................... 89 Request Parameters .............................................................................................................................. 89 Response Syntax ................................................................................................................................... 93 Response Elements ............................................................................................................................... 93 Errors ....................................................................................................................................................... 94 Examples ................................................................................................................................................. 94 See Also .................................................................................................................................................. 95 DescribeMLModels ...................................................................................................................................... 97 Request Syntax ...................................................................................................................................... 97 Request Parameters .............................................................................................................................. 97 Response Syntax ................................................................................................................................. 101 API Version 2014-12-12 vi MachineLearning API Reference Response Elements ............................................................................................................................ 102 Errors ..................................................................................................................................................... 102 Examples ............................................................................................................................................... 102 See Also ................................................................................................................................................ 104 DescribeTags ............................................................................................................................................. 105 Request Syntax .................................................................................................................................... 105 Request Parameters ........................................................................................................................... 105 Response Syntax ................................................................................................................................. 105 Response Elements ............................................................................................................................ 106 Errors ..................................................................................................................................................... 106 Examples ............................................................................................................................................... 107 See Also ................................................................................................................................................ 108 GetBatchPrediction .................................................................................................................................. 109 Request Syntax .................................................................................................................................... 109 Request Parameters ........................................................................................................................... 109 Response Syntax ................................................................................................................................. 109 Response Elements ............................................................................................................................ 110 Errors ..................................................................................................................................................... 113 Examples ............................................................................................................................................... 114 See Also ................................................................................................................................................ 115 GetDataSource .......................................................................................................................................... 116 Request Syntax .................................................................................................................................... 116 Request Parameters ........................................................................................................................... 116 Response Syntax ................................................................................................................................. 117 Response Elements ............................................................................................................................ 118 Errors ..................................................................................................................................................... 121 Examples ............................................................................................................................................... 122 See Also ................................................................................................................................................ 123 GetEvaluation ............................................................................................................................................ 125 Request Syntax .................................................................................................................................... 125 Request Parameters ........................................................................................................................... 125 Response Syntax ................................................................................................................................. 125 Response Elements ............................................................................................................................ 126 Errors ..................................................................................................................................................... 129 Examples ............................................................................................................................................... 130 See Also ................................................................................................................................................ 131 API Version 2014-12-12 vii MachineLearning API Reference GetMLModel .............................................................................................................................................. 132 Request Syntax .................................................................................................................................... 132 Request Parameters
amazonml-api-002
amazonml-api.pdf
2
Request Syntax .................................................................................................................................... 109 Request Parameters ........................................................................................................................... 109 Response Syntax ................................................................................................................................. 109 Response Elements ............................................................................................................................ 110 Errors ..................................................................................................................................................... 113 Examples ............................................................................................................................................... 114 See Also ................................................................................................................................................ 115 GetDataSource .......................................................................................................................................... 116 Request Syntax .................................................................................................................................... 116 Request Parameters ........................................................................................................................... 116 Response Syntax ................................................................................................................................. 117 Response Elements ............................................................................................................................ 118 Errors ..................................................................................................................................................... 121 Examples ............................................................................................................................................... 122 See Also ................................................................................................................................................ 123 GetEvaluation ............................................................................................................................................ 125 Request Syntax .................................................................................................................................... 125 Request Parameters ........................................................................................................................... 125 Response Syntax ................................................................................................................................. 125 Response Elements ............................................................................................................................ 126 Errors ..................................................................................................................................................... 129 Examples ............................................................................................................................................... 130 See Also ................................................................................................................................................ 131 API Version 2014-12-12 vii MachineLearning API Reference GetMLModel .............................................................................................................................................. 132 Request Syntax .................................................................................................................................... 132 Request Parameters ........................................................................................................................... 132 Response Syntax ................................................................................................................................. 133 Response Elements ............................................................................................................................ 133 Errors ..................................................................................................................................................... 138 Examples ............................................................................................................................................... 139 See Also ................................................................................................................................................ 141 Predict ........................................................................................................................................................ 142 Request Syntax .................................................................................................................................... 142 Request Parameters ........................................................................................................................... 142 Response Syntax ................................................................................................................................. 143 Response Elements ............................................................................................................................ 143 Errors ..................................................................................................................................................... 144 Examples ............................................................................................................................................... 144 See Also ................................................................................................................................................ 145 UpdateBatchPrediction ........................................................................................................................... 147 Request Syntax .................................................................................................................................... 147 Request Parameters ........................................................................................................................... 147 Response Syntax ................................................................................................................................. 148 Response Elements ............................................................................................................................ 148 Errors ..................................................................................................................................................... 148 Examples ............................................................................................................................................... 149 See Also ................................................................................................................................................ 149 UpdateDataSource ................................................................................................................................... 151 Request Syntax .................................................................................................................................... 151 Request Parameters ........................................................................................................................... 151 Response Syntax ................................................................................................................................. 152 Response Elements ............................................................................................................................ 152 Errors ..................................................................................................................................................... 152 Examples ............................................................................................................................................... 153 See Also ................................................................................................................................................ 153 UpdateEvaluation ..................................................................................................................................... 155 Request Syntax .................................................................................................................................... 155 Request Parameters ........................................................................................................................... 155 Response Syntax ................................................................................................................................. 156 API Version 2014-12-12 viii MachineLearning API Reference Response Elements ............................................................................................................................ 156 Errors ..................................................................................................................................................... 156 Examples ............................................................................................................................................... 157 See Also ................................................................................................................................................ 157 UpdateMLModel ....................................................................................................................................... 159 Request Syntax .................................................................................................................................... 159 Request Parameters ........................................................................................................................... 159 Response Syntax ................................................................................................................................. 160 Response Elements ............................................................................................................................ 160 Errors ..................................................................................................................................................... 160 Examples ............................................................................................................................................... 161 See Also ................................................................................................................................................ 162 Data Types ................................................................................................................................... 163 BatchPrediction ........................................................................................................................................ 164 Contents ............................................................................................................................................... 164 See Also ................................................................................................................................................ 167 DataSource ................................................................................................................................................ 169 Contents ............................................................................................................................................... 169 See Also ................................................................................................................................................ 173 Evaluation .................................................................................................................................................. 174 Contents ............................................................................................................................................... 174 See Also ................................................................................................................................................ 177 MLModel .................................................................................................................................................... 178 Contents ............................................................................................................................................... 178 See Also ................................................................................................................................................ 183 PerformanceMetrics ................................................................................................................................. 184 Contents ............................................................................................................................................... 184 See Also ................................................................................................................................................ 184 Prediction ................................................................................................................................................... 185 Contents ............................................................................................................................................... 185 See Also ................................................................................................................................................ 186 RDSDatabase ............................................................................................................................................. 187 Contents ............................................................................................................................................... 187 See Also ................................................................................................................................................ 187 RDSDatabaseCredentials ......................................................................................................................... 188 Contents ............................................................................................................................................... 188 API Version 2014-12-12 ix MachineLearning API Reference See Also ................................................................................................................................................ 188 RDSDataSpec ............................................................................................................................................. 189 Contents ............................................................................................................................................... 189 See Also ................................................................................................................................................ 194 RDSMetadata ............................................................................................................................................ 195 Contents ............................................................................................................................................... 195 See Also ................................................................................................................................................ 196 RealtimeEndpointInfo ............................................................................................................................. 197 Contents ............................................................................................................................................... 197 See Also ................................................................................................................................................ 198 RedshiftDatabase ..................................................................................................................................... 199 Contents ............................................................................................................................................... 199 See Also ................................................................................................................................................ 199 RedshiftDatabaseCredentials ................................................................................................................. 200 Contents ............................................................................................................................................... 200 See Also ................................................................................................................................................ 200 RedshiftDataSpec ..................................................................................................................................... 201 Contents ............................................................................................................................................... 201 See Also ................................................................................................................................................ 205 RedshiftMetadata ..................................................................................................................................... 206 Contents ............................................................................................................................................... 206 See Also ................................................................................................................................................ 206 S3DataSpec ............................................................................................................................................... 208 Contents ............................................................................................................................................... 208 See Also ................................................................................................................................................ 211 Tag ............................................................................................................................................................... 212 Contents ............................................................................................................................................... 212 See Also ................................................................................................................................................ 212 Common Parameters ................................................................................................................... 213 Common Errors ............................................................................................................................ 216 API Version 2014-12-12 x MachineLearning Welcome Definition of the public APIs exposed by Amazon Machine Learning This document was last published on May 14, 2025. API Reference API Version 2014-12-12 1 MachineLearning Actions The following actions are supported: • AddTags • CreateBatchPrediction • CreateDataSourceFromRDS • CreateDataSourceFromRedshift • CreateDataSourceFromS3 • CreateEvaluation • CreateMLModel • CreateRealtimeEndpoint • DeleteBatchPrediction • DeleteDataSource • DeleteEvaluation • DeleteMLModel • DeleteRealtimeEndpoint • DeleteTags • DescribeBatchPredictions • DescribeDataSources • DescribeEvaluations • DescribeMLModels • DescribeTags • GetBatchPrediction • GetDataSource • GetEvaluation • GetMLModel • Predict • UpdateBatchPrediction • UpdateDataSource • UpdateEvaluation API Reference API Version 2014-12-12 2 MachineLearning • UpdateMLModel API Reference API Version 2014-12-12 3 MachineLearning AddTags API Reference Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, AddTags updates the tag's value. Request Syntax { "ResourceId": "string", "ResourceType": "string", "Tags": [ { "Key": "string", "Value": "string" } ] } Request Parameters For information about the parameters that are common to all actions, see Common Parameters. The request accepts the following data in JSON format. ResourceId The ID of the ML object to tag. For example, exampleModelId. Type: String Length Constraints: Minimum length of 1. Maximum length of 64. Pattern: [a-zA-Z0-9_.-]+ Required: Yes ResourceType The type of the ML object to tag. Type: String AddTags API Version 2014-12-12 4 MachineLearning API Reference Valid Values: BatchPrediction | DataSource | Evaluation | MLModel Required: Yes Tags The key-value pairs to use to create tags. If you specify a key without specifying a value, Amazon ML creates a tag with the specified key and a value of null. Type: Array of Tag objects Array Members: Maximum number of 100 items. Required: Yes Response Syntax { "ResourceId": "string", "ResourceType": "string" } Response Elements If the action is successful, the service sends back an HTTP 200 response. The following data is returned in JSON format by the service. ResourceId The ID
amazonml-api-003
amazonml-api.pdf
3
Version 2014-12-12 4 MachineLearning API Reference Valid Values: BatchPrediction | DataSource | Evaluation | MLModel Required: Yes Tags The key-value pairs to use to create tags. If you specify a key without specifying a value, Amazon ML creates a tag with the specified key and a value of null. Type: Array of Tag objects Array Members: Maximum number of 100 items. Required: Yes Response Syntax { "ResourceId": "string", "ResourceType": "string" } Response Elements If the action is successful, the service sends back an HTTP 200 response. The following data is returned in JSON format by the service. ResourceId The ID of the ML object that was tagged. Type: String Length Constraints: Minimum length of 1. Maximum length of 64. Pattern: [a-zA-Z0-9_.-]+ ResourceType The type of the ML object that was tagged. Type: String Response Syntax API Version 2014-12-12 5 MachineLearning API Reference Valid Values: BatchPrediction | DataSource | Evaluation | MLModel Errors For information about the errors that are common to all actions, see Common Errors. InternalServerException An error on the server occurred when trying to process a request. HTTP Status Code: 500 InvalidInputException An error on the client occurred. Typically, the cause is an invalid input value. HTTP Status Code: 400 InvalidTagException A submitted tag is invalid. HTTP Status Code: 400 ResourceNotFoundException A specified resource cannot be located. HTTP Status Code: 400 TagLimitExceededException The limit in the number of tags has been exceeded. HTTP Status Code: 400 Examples The following is an example of a request and response for the AddTags operation. This example illustrates one usage of AddTags. Sample Request POST / HTTP/1.1 Errors API Version 2014-12-12 6 MachineLearning API Reference Host: machinelearning.<region>.<domain> x-amz-Date: <Date> Authorization: AWS4-HMAC-SHA256 Credential=<Credential>, SignedHeaders=contenttype;date;host;user-agent;x-amz-date;x-amz-target;x-amzn- requestid,Signature=<Signature> User-Agent: <UserAgentString> Content-Type: application/x-amz-json-1.1 Content-Length: <PayloadSizeBytes> Connection: Keep-Alive X-Amz-Target: AmazonML_20141212.AddTags { "ResourceId": "exampleModelId", "ResourceType": "MLModel", "Tags": { "Key":"exampleKey", "Value":"exampleKeyValue" } } Sample Response HTTP/1.1 200 OK x-amzn-RequestId: <RequestId> Content-Type: application/x-amz-json-1.1 Content-Length: <PayloadSizeBytes> Date: <Date> { "ResourceId": "exampleModelId", "ResourceType": "MLModel" } See Also For more information about using this API in one of the language-specific AWS SDKs, see the following: • AWS Command Line Interface • AWS SDK for .NET • AWS SDK for C++ • AWS SDK for Go v2 • AWS SDK for Java V2 See Also API Version 2014-12-12 7 MachineLearning • AWS SDK for JavaScript V3 • AWS SDK for Kotlin • AWS SDK for PHP V3 • AWS SDK for Python • AWS SDK for Ruby V3 API Reference See Also API Version 2014-12-12 8 MachineLearning API Reference CreateBatchPrediction Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a DataSource. This operation creates a new BatchPrediction, and uses an MLModel and the data files referenced by the DataSource as information sources. CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction status to PENDING. After the BatchPrediction completes, Amazon ML sets the status to COMPLETED. You can poll for status updates by using the GetBatchPrediction operation and checking the Status parameter of the result. After the COMPLETED status appears, the results are available in the location specified by the OutputUri parameter. Request Syntax { "BatchPredictionDataSourceId": "string", "BatchPredictionId": "string", "BatchPredictionName": "string", "MLModelId": "string", "OutputUri": "string" } Request Parameters For information about the parameters that are common to all actions, see Common Parameters. The request accepts the following data in JSON format. BatchPredictionDataSourceId The ID of the DataSource that points to the group of observations to predict. Type: String Length Constraints: Minimum length of 1. Maximum length of 64. Pattern: [a-zA-Z0-9_.-]+ CreateBatchPrediction API Version 2014-12-12 9 API Reference MachineLearning Required: Yes BatchPredictionId A user-supplied ID that uniquely identifies the BatchPrediction. Type: String Length Constraints: Minimum length of 1. Maximum length of 64. Pattern: [a-zA-Z0-9_.-]+ Required: Yes BatchPredictionName A user-supplied name or description of the BatchPrediction. BatchPredictionName can only use the UTF-8 character set. Type: String Length Constraints: Maximum length of 1024. Pattern: .*\S.*|^$ Required: No MLModelId The ID of the MLModel that will generate predictions for the group of observations. Type: String Length Constraints: Minimum length of 1. Maximum length of 64. Pattern: [a-zA-Z0-9_.-]+ Required: Yes OutputUri The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory to store the batch prediction results. The following substrings are not allowed in the s3 key portion of the outputURI field: ':', '//', '/./', '/../'. Amazon ML needs permissions to store and retrieve the logs on your behalf. For information about how to set permissions, see the Amazon Machine Learning Developer Guide. Request Parameters API Version 2014-12-12 10 MachineLearning Type: String Length Constraints: Maximum length of 2048. API Reference Pattern: s3://([^/]+)(/.*)? Required: Yes Response Syntax { "BatchPredictionId": "string" } Response Elements If the action is successful, the service sends back an HTTP 200 response. The
amazonml-api-004
amazonml-api.pdf
4
(Amazon S3) bucket or directory to store the batch prediction results. The following substrings are not allowed in the s3 key portion of the outputURI field: ':', '//', '/./', '/../'. Amazon ML needs permissions to store and retrieve the logs on your behalf. For information about how to set permissions, see the Amazon Machine Learning Developer Guide. Request Parameters API Version 2014-12-12 10 MachineLearning Type: String Length Constraints: Maximum length of 2048. API Reference Pattern: s3://([^/]+)(/.*)? Required: Yes Response Syntax { "BatchPredictionId": "string" } Response Elements If the action is successful, the service sends back an HTTP 200 response. The following data is returned in JSON format by the service. BatchPredictionId A user-supplied ID that uniquely identifies the BatchPrediction. This value is identical to the value of the BatchPredictionId in the request. Type: String Length Constraints: Minimum length of 1. Maximum length of 64. Pattern: [a-zA-Z0-9_.-]+ Errors For information about the errors that are common to all actions, see Common Errors. IdempotentParameterMismatchException A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request. HTTP Status Code: 400 Response Syntax API Version 2014-12-12 11 MachineLearning InternalServerException API Reference An error on the server occurred when trying to process a request. HTTP Status Code: 500 InvalidInputException An error on the client occurred. Typically, the cause is an invalid input value. HTTP Status Code: 400 Examples The following is a sample request and response of the BatchPrediction operation. This example illustrates one usage of CreateBatchPrediction. Sample Request POST / HTTP/1.1 Host: machinelearning.<region>.<domain> x-amz-Date: <Date> Authorization: AWS4-HMAC-SHA256 Credential=<Credential>, SignedHeaders=contenttype;date;host;user-agent;x-amz-date;x-amz-target;x-amzn- requestid,Signature=<Signature> User-Agent: <UserAgentString> Content-Type: application/x-amz-json-1.1 Content-Length: <PayloadSizeBytes> Connection: Keep-Alive X-Amz-Target: AmazonML_20141212.CreateBatchPrediction { "BatchPredictionId": "EXAMPLE-bp-2014-09-12-15-14-04-156", "BatchPredictionName": "EXAMPLE", "MLModelId": "EXAMPLE-pr-2014-09-12-15-14-04-924", "BatchPredictionDataSourceId": "EXAMPLE-tr-ds-2014-09-12-15-14-04-989", "OutputUri": "s3://eml-test-EXAMPLE/test-outputs/EXAMPLE-bp-2014-09-12-15-14-04-156/ results" } Sample Response HTTP/1.1 200 OK Examples API Version 2014-12-12 12 MachineLearning API Reference x-amzn-RequestId: <RequestId> Content-Type: application/x-amz-json-1.1 Content-Length: <PayloadSizeBytes> Date: <Date> {"BatchPredictionId":"EXAMPLE-bp-2014-09-12-15-14-04-156"} See Also For more information about using this API in one of the language-specific AWS SDKs, see the following: • AWS Command Line Interface • AWS SDK for .NET • AWS SDK for C++ • AWS SDK for Go v2 • AWS SDK for Java V2 • AWS SDK for JavaScript V3 • AWS SDK for Kotlin • AWS SDK for PHP V3 • AWS SDK for Python • AWS SDK for Ruby V3 See Also API Version 2014-12-12 13 MachineLearning API Reference CreateDataSourceFromRDS Creates a DataSource object from an Amazon Relational Database Service (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations. CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or PENDING state can be used only to perform >CreateMLModel>, CreateEvaluation, or CreateBatchPrediction operations. If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response. Request Syntax { "ComputeStatistics": boolean, "DataSourceId": "string", "DataSourceName": "string", "RDSData": { "DatabaseCredentials": { "Password": "string", "Username": "string" }, "DatabaseInformation": { "DatabaseName": "string", "InstanceIdentifier": "string" }, "DataRearrangement": "string", "DataSchema": "string", "DataSchemaUri": "string", "ResourceRole": "string", "S3StagingLocation": "string", "SecurityGroupIds": [ "string" ], "SelectSqlQuery": "string", "ServiceRole": "string", "SubnetId": "string" }, CreateDataSourceFromRDS API Version 2014-12-12 14 MachineLearning "RoleARN": "string" } Request Parameters API Reference For information about the parameters that are common to all actions, see Common Parameters. The request accepts the following data in JSON format. ComputeStatistics The compute statistics for a DataSource. The statistics are generated from the observation data referenced by a DataSource. Amazon ML uses the statistics internally during MLModel training. This parameter must be set to true if the DataSource needs to be used for MLModel training. Type: Boolean Required: No DataSourceId A user-supplied ID that uniquely identifies the DataSource. Typically, an Amazon Resource Number (ARN) becomes the ID for a DataSource. Type: String Length Constraints: Minimum length of 1. Maximum length of 64. Pattern: [a-zA-Z0-9_.-]+ Required: Yes DataSourceName A user-supplied name or description of the DataSource. Type: String Length Constraints: Maximum length of 1024. Pattern: .*\S.*|^$ Request Parameters API Version 2014-12-12 15 MachineLearning Required: No RDSData API Reference The data specification of an Amazon RDS DataSource: • DatabaseInformation - • DatabaseName - The name of the Amazon RDS database. • InstanceIdentifier - A unique identifier for the Amazon RDS database instance. • DatabaseCredentials - AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon RDS database. • ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by an EC2 instance to carry out the copy task from Amazon RDS to Amazon Simple Storage Service (Amazon S3). For more information,
amazonml-api-005
amazonml-api.pdf
5
length of 1024. Pattern: .*\S.*|^$ Request Parameters API Version 2014-12-12 15 MachineLearning Required: No RDSData API Reference The data specification of an Amazon RDS DataSource: • DatabaseInformation - • DatabaseName - The name of the Amazon RDS database. • InstanceIdentifier - A unique identifier for the Amazon RDS database instance. • DatabaseCredentials - AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon RDS database. • ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by an EC2 instance to carry out the copy task from Amazon RDS to Amazon Simple Storage Service (Amazon S3). For more information, see Role templates for data pipelines. • ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines. • SecurityInfo - The security information to use to access an RDS DB instance. You need to set up appropriate ingress rules for the security entity IDs provided to allow access to the Amazon RDS instance. Specify a [SubnetId, SecurityGroupIds] pair for a VPC-based RDS DB instance. • SelectSqlQuery - A query that is used to retrieve the observation data for the Datasource. • S3StagingLocation - The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using SelectSqlQuery is stored in this location. • DataSchemaUri - The Amazon S3 location of the DataSchema. • DataSchema - A JSON string representing the schema. This is not required if DataSchemaUri is specified. • DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the Datasource. Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" Type: RDSDataSpec object Required: Yes Request Parameters API Version 2014-12-12 16 MachineLearning RoleARN API Reference The role that Amazon ML assumes on behalf of the user to create and activate a data pipeline in the user's account and copy data using the SelectSqlQuery query from Amazon RDS to Amazon S3. Type: String Length Constraints: Minimum length of 1. Maximum length of 110. Required: Yes Response Syntax { "DataSourceId": "string" } Response Elements If the action is successful, the service sends back an HTTP 200 response. The following data is returned in JSON format by the service. DataSourceId A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the DataSourceID in the request. Type: String Length Constraints: Minimum length of 1. Maximum length of 64. Pattern: [a-zA-Z0-9_.-]+ Errors For information about the errors that are common to all actions, see Common Errors. Response Syntax API Version 2014-12-12 17 MachineLearning API Reference IdempotentParameterMismatchException A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request. HTTP Status Code: 400 InternalServerException An error on the server occurred when trying to process a request. HTTP Status Code: 500 InvalidInputException An error on the client occurred. Typically, the cause is an invalid input value. HTTP Status Code: 400 Examples The following is a sample HTTP request and response of the CreateDataSourceFromRDS operation. This example illustrates one usage of CreateDataSourceFromRDS. Sample Request POST / HTTP/1.1 Host: machinelearning.<region>.<domain> x-amz-Date: <Date> Authorization: AWS4-HMAC-SHA256 Credential=<Credential>, SignedHeaders=contenttype;date;host;user-agent;x-amz-date;x-amz-target;x-amzn- requestid,Signature=<Signature> User-Agent: <UserAgentString> Content-Type: application/x-amz-json-1.1 Content-Length: <PayloadSizeBytes> Connection: Keep-Alive X-Amz-Target: AmazonML_20141212.CreateDataSourceFromRDS { "DataSourceId": "ml-rds-data-source-demo", "DataSourceName": "ml-rds-data-source-demo", "RDSData": Examples API Version 2014-12-12 18 API Reference MachineLearning { "DatabaseInformation": { "InstanceIdentifier": "demo", "DatabaseName": "demo" }, "SelectSqlQuery": "select feature1, feature2, feature3, ...., featureN from RDS_DEMO_TABLE;", "DatabaseCredentials": { "Username": "demo_user", "Password": "demo_password" }, "S3StagingLocation": "s3://mldemo/data/", "DataSchemaUri": "s3://mldemo/schema/mldemo.csv.schema", "ResourceRole": "DataPipelineDefaultResourceRole", "ServiceRole": "DataPipelineDefaultRole", "SubnetId": "subnet-XXXX", "SecurityGroupIds": ["sg-XXXXXX", "sg-XXXXXX"] }, "RoleARN": "arn:aws:iam::<awsAccountId>:role/<roleToAssume>" } Sample Response HTTP/1.1 200 OK x-amzn-RequestId: <RequestId> Content-Type: application/x-amz-json-1.1 Content-Length: <PayloadSizeBytes> Date: <Date> { "DataSourceId":"ml-rds-data-source-demo" } See Also For more information about using this API in one of the language-specific AWS SDKs, see the following: • AWS Command Line Interface • AWS SDK for .NET See Also API Version 2014-12-12 19 API Reference MachineLearning • AWS SDK for C++ • AWS SDK for Go v2 • AWS SDK for Java V2 • AWS SDK for JavaScript V3 • AWS SDK for Kotlin • AWS SDK for PHP V3 • AWS SDK for Python • AWS SDK for Ruby V3 See Also API Version 2014-12-12 20 MachineLearning API Reference CreateDataSourceFromRedshift Creates a DataSource from a database hosted on an Amazon Redshift cluster. A DataSource references data that can be used to perform either CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations. CreateDataSourceFromRedshift is an asynchronous operation. In response to CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING states can be used to perform only CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations. If Amazon