Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"type": "feature",
"category": "Amazon DynamoDB Enhanced Client",
"contributor": "",
"description": "Added support for @DynamoDbUpdateBehavior on attributes within nested objects. The @DynamoDbUpdateBehavior annotation will only take effect for nested attributes when using IgnoreNullsMode.SCALAR_ONLY."
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,21 @@

package software.amazon.awssdk.enhanced.dynamodb.extensions;

import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.getNestedSchema;
import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.hasMap;
import static software.amazon.awssdk.enhanced.dynamodb.internal.extensions.utility.NestedRecordUtils.getTableSchemaForListElement;
import static software.amazon.awssdk.enhanced.dynamodb.internal.extensions.utility.NestedRecordUtils.reconstructCompositeKey;
import static software.amazon.awssdk.enhanced.dynamodb.internal.extensions.utility.NestedRecordUtils.resolveSchemasPerPath;

import java.time.Clock;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Consumer;
import software.amazon.awssdk.annotations.NotThreadSafe;
import software.amazon.awssdk.annotations.SdkPublicApi;
Expand All @@ -30,6 +39,7 @@
import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension;
import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext;
import software.amazon.awssdk.enhanced.dynamodb.EnhancedType;
import software.amazon.awssdk.enhanced.dynamodb.TableSchema;
import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTag;
import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableMetadata;
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
Expand Down Expand Up @@ -64,10 +74,23 @@
* <p>
* Every time a new update of the record is successfully written to the database, the timestamp at which it was modified will
* be automatically updated. This extension applies the conversions as defined in the attribute convertor.
* The implementation handles both flattened nested parameters (identified by keys separated with
* {@code "_NESTED_ATTR_UPDATE_"}) and entire nested maps or lists, ensuring consistent behavior across both representations.
* If a nested object or list is {@code null}, no timestamp values will be generated for any of its annotated fields.
* The same timestamp value is used for both top-level attributes and all applicable nested fields.
*
* <p>
* <b>Note:</b> This implementation uses a temporary cache keyed by {@link TableSchema} instance.
* When updating timestamps in nested objects or lists, the correct {@code TableSchema} must be used for each object.
* This cache ensures that each nested object is processed with its own schema, avoiding redundant lookups and ensuring
* all annotated timestamp fields are updated correctly.
* </p>
*/
@SdkPublicApi
@ThreadSafe
public final class AutoGeneratedTimestampRecordExtension implements DynamoDbEnhancedClientExtension {

private static final String NESTED_OBJECT_UPDATE = "_NESTED_ATTR_UPDATE_";
private static final String CUSTOM_METADATA_KEY = "AutoGeneratedTimestampExtension:AutoGeneratedTimestampAttribute";
private static final AutoGeneratedTimestampAttribute
AUTO_GENERATED_TIMESTAMP_ATTRIBUTE = new AutoGeneratedTimestampAttribute();
Expand Down Expand Up @@ -126,26 +149,187 @@ public static AutoGeneratedTimestampRecordExtension create() {
*/
@Override
public WriteModification beforeWrite(DynamoDbExtensionContext.BeforeWrite context) {
Map<String, AttributeValue> itemToTransform = new HashMap<>(context.items());

Map<String, AttributeValue> updatedItems = new HashMap<>();
Instant currentInstant = clock.instant();

Collection<String> customMetadataObject = context.tableMetadata()
.customMetadataObject(CUSTOM_METADATA_KEY, Collection.class).orElse(null);
// Use TableSchema<?> instance as the cache key
Map<TableSchema<?>, TableSchema<?>> schemaInstanceCache = new HashMap<>();

if (customMetadataObject == null) {
itemToTransform.forEach((key, value) -> {
if (hasMap(value)) {
Optional<? extends TableSchema<?>> nestedSchemaOpt = getNestedSchema(context.tableSchema(), key);
if (nestedSchemaOpt.isPresent()) {
TableSchema<?> nestedSchema = nestedSchemaOpt.get();
TableSchema<?> cachedSchema = getOrCacheSchema(schemaInstanceCache, nestedSchema);
Map<String, AttributeValue> processed =
processNestedObject(value.m(), cachedSchema, currentInstant, schemaInstanceCache);
updatedItems.put(key, AttributeValue.builder().m(processed).build());
}
} else if (value.hasL() && !value.l().isEmpty()) {
// Check first non-null element to determine if this is a list of maps
AttributeValue firstElement = value.l().stream()
.filter(Objects::nonNull)
.findFirst()
.orElse(null);

if (hasMap(firstElement)) {
TableSchema<?> elementListSchema = getTableSchemaForListElement(context.tableSchema(), key);
if (elementListSchema != null) {
TableSchema<?> cachedSchema = getOrCacheSchema(schemaInstanceCache, elementListSchema);
Collection<AttributeValue> updatedList = new ArrayList<>(value.l().size());

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you can avoid creating new arraylist and create it only when you use it in the loop

for (AttributeValue listItem : value.l()) {
if (hasMap(listItem)) {
updatedList.add(AttributeValue.builder()
.m(processNestedObject(
listItem.m(),
cachedSchema,
currentInstant,
schemaInstanceCache))
.build());
} else {
updatedList.add(listItem);
}
}
updatedItems.put(key, AttributeValue.builder().l(updatedList).build());
}
}
}
});

Map<String, TableSchema<?>> stringTableSchemaMap = resolveSchemasPerPath(itemToTransform, context.tableSchema());

stringTableSchemaMap.forEach((path, schema) -> {
Collection<String> customMetadataObject = schema.tableMetadata()
.customMetadataObject(CUSTOM_METADATA_KEY, Collection.class)
.orElse(null);

if (customMetadataObject != null) {
customMetadataObject.forEach(
key -> {
AttributeConverter<?> converter = schema.converterForAttribute(key);
if (converter != null) {
insertTimestampInItemToTransform(updatedItems, reconstructCompositeKey(path, key),
converter, currentInstant);
}
});
}
});

if (updatedItems.isEmpty()) {
return WriteModification.builder().build();
}
Map<String, AttributeValue> itemToTransform = new HashMap<>(context.items());
customMetadataObject.forEach(
key -> insertTimestampInItemToTransform(itemToTransform, key,
context.tableSchema().converterForAttribute(key)));

itemToTransform.putAll(updatedItems);

return WriteModification.builder()
.transformedItem(Collections.unmodifiableMap(itemToTransform))
.build();
}

private static TableSchema<?> getOrCacheSchema(
Copy link

@amzn-erdemkemer amzn-erdemkemer Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Im not sure I understand how this caching helps. We are adding same object as key and value.

// First call with a schema instance
TableSchema<?> schema1 = getNestedSchema(context.tableSchema(), "address");
// schema1 = TableSchema@12345

// Call getOrCacheSchema
schemaInstanceCache.get(schema1);  // Returns null (first time)
schemaInstanceCache.put(schema1, schema1);  // Stores: {TableSchema@12345 -> TableSchema@12345}
return schema1;  // Returns TableSchema@12345

// Second call with THE SAME schema instance
TableSchema<?> schema2 = getNestedSchema(context.tableSchema(), "address");
// schema2 = TableSchema@12345 (SAME INSTANCE!)

// Call getOrCacheSchema again
schemaInstanceCache.get(schema2);  // Returns TableSchema@12345 (cache hit!)
return schema2;  // Returns TableSchema@12345

getNestedSchema() already returns the same instance for the same attribute path. The cache is just checking if we've seen this exact object reference before, but we're passing in the same reference we just got, am i missing something ?

Map<TableSchema<?>, TableSchema<?>> schemaInstanceCache, TableSchema<?> schema) {

TableSchema<?> cachedSchema = schemaInstanceCache.get(schema);
if (cachedSchema == null) {
schemaInstanceCache.put(schema, schema);
cachedSchema = schema;
}
return cachedSchema;
}

private Map<String, AttributeValue> processNestedObject(Map<String, AttributeValue> nestedMap, TableSchema<?> nestedSchema,
Instant currentInstant,
Map<TableSchema<?>, TableSchema<?>> schemaInstanceCache) {
Map<String, AttributeValue> updatedNestedMap = nestedMap;
boolean updated = false;

Collection<String> customMetadataObject = nestedSchema.tableMetadata()
.customMetadataObject(CUSTOM_METADATA_KEY, Collection.class)
.orElse(null);

if (customMetadataObject != null) {
for (String key : customMetadataObject) {
AttributeConverter<?> converter = nestedSchema.converterForAttribute(key);
if (converter != null) {
if (!updated) {
updatedNestedMap = new HashMap<>(nestedMap);
updated = true;
}
insertTimestampInItemToTransform(updatedNestedMap, String.valueOf(key),
converter, currentInstant);
}
}
}

for (Map.Entry<String, AttributeValue> entry : nestedMap.entrySet()) {
String nestedKey = entry.getKey();
AttributeValue nestedValue = entry.getValue();
if (nestedValue.hasM()) {
Optional<? extends TableSchema<?>> childSchemaOpt = getNestedSchema(nestedSchema, nestedKey);
if (childSchemaOpt.isPresent()) {
TableSchema<?> childSchema = childSchemaOpt.get();
TableSchema<?> cachedSchema = getOrCacheSchema(schemaInstanceCache, childSchema);
Map<String, AttributeValue> processed = processNestedObject(
nestedValue.m(), cachedSchema, currentInstant, schemaInstanceCache);

if (!Objects.equals(processed, nestedValue.m())) {
if (!updated) {
updatedNestedMap = new HashMap<>(nestedMap);
updated = true;
}
updatedNestedMap.put(nestedKey, AttributeValue.builder().m(processed).build());
}
}
} else if (nestedValue.hasL() && !nestedValue.l().isEmpty()) {
// Check first non-null element to determine if this is a list of maps
AttributeValue firstElement = nestedValue.l().stream()
.filter(Objects::nonNull)
.findFirst()
.orElse(null);
if (hasMap(firstElement)) {
TableSchema<?> listElementSchema = getTableSchemaForListElement(nestedSchema, nestedKey);
if (listElementSchema != null) {
TableSchema<?> cachedSchema = getOrCacheSchema(schemaInstanceCache, listElementSchema);
Collection<AttributeValue> updatedList = new ArrayList<>(nestedValue.l().size());
Copy link

@amzn-erdemkemer amzn-erdemkemer Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is more of a question than ask to change immediately.

So, we are doing eager arraylist creation which will create a new array list (with 8kb I guess) in memory. You can lazy init the array list to avoid that. It will for sure make the implementation a bit more complex but creating so many objects in memory also wouldn't be ideal.

What's your thoughts on this, leave it simple in favor of code readability but sacrifice memory or not ?

} else if (nestedValue.hasL() && !nestedValue.l().isEmpty()) {
    AttributeValue firstElement = nestedValue.l().stream()
                                       .filter(Objects::nonNull)
                                       .findFirst()
                                       .orElse(null);
    if (firstElement != null && firstElement.hasM()) {
        TableSchema<?> listElementSchema = getTableSchemaForListElement(nestedSchema, nestedKey);
        if (listElementSchema != null) {
            TableSchema<?> cachedSchema = getOrCacheSchema(schemaInstanceCache, listElementSchema);
            
            // LAZY: Start with null
            Collection<AttributeValue> updatedList = null;
            boolean listModified = false;
            
            List<AttributeValue> originalList = nestedValue.l();
            for (int i = 0; i < originalList.size(); i++) {
                AttributeValue listItem = originalList.get(i);
                
                if (listItem != null && listItem.hasM()) {
                    Map<String, AttributeValue> processedMap = processNestedObject(
                        listItem.m(),
                        cachedSchema,
                        currentInstant,
                        schemaInstanceCache
                    );
                    
                    AttributeValue processedItem = AttributeValue.builder().m(processedMap).build();
                    
                    // Check if this item was modified
                    if (!Objects.equals(processedItem, listItem)) {
                        // First modification detected!
                        if (!listModified) {
                            // Create list NOW and copy all previous items
                            updatedList = new ArrayList<>(originalList.size());
                            for (int j = 0; j < i; j++) {
                                updatedList.add(originalList.get(j));
                            }
                            listModified = true;
                        }
                        // Add the modified item
                        updatedList.add(processedItem);
                    } else if (listModified) {
                        // Already modifying, add unchanged item
                        updatedList.add(listItem);
                    }
                    // else: No changes yet, don't create list
                    
                } else {
                    // Handle null or non-map items
                    if (listModified) {
                        updatedList.add(listItem);
                    }
                }
            }
            
            // Only update if something actually changed
            if (listModified) {
                if (!updated) {
                    updatedNestedMap = new HashMap<>(nestedMap);
                    updated = true;
                }
                updatedNestedMap.put(nestedKey, AttributeValue.builder().l(updatedList).build());
            }
        }
    }
}

boolean listModified = false;
for (AttributeValue listItem : nestedValue.l()) {
if (hasMap(listItem)) {
AttributeValue updatedItem = AttributeValue.builder()
.m(processNestedObject(
listItem.m(),
cachedSchema,
currentInstant,
schemaInstanceCache))
.build();
updatedList.add(updatedItem);
if (!Objects.equals(updatedItem, listItem)) {
listModified = true;
}
} else {
updatedList.add(listItem);
}
}
if (listModified) {
if (!updated) {
updatedNestedMap = new HashMap<>(nestedMap);
updated = true;
}
updatedNestedMap.put(nestedKey, AttributeValue.builder().l(updatedList).build());
}
}
}
}
}
return updatedNestedMap;
}

private void insertTimestampInItemToTransform(Map<String, AttributeValue> itemToTransform,
String key,
AttributeConverter converter) {
itemToTransform.put(key, converter.transformFrom(clock.instant()));
AttributeConverter converter,
Instant instant) {
itemToTransform.put(key, converter.transformFrom(instant));
}

/**
Expand Down Expand Up @@ -190,6 +374,7 @@ public <R> void validateType(String attributeName, EnhancedType<R> type,
Validate.notNull(type, "type is null");
Validate.notNull(type.rawClass(), "rawClass is null");
Validate.notNull(attributeValueType, "attributeValueType is null");
validateAttributeName(attributeName);

if (!type.rawClass().equals(Instant.class)) {
throw new IllegalArgumentException(String.format(
Expand All @@ -204,5 +389,15 @@ public Consumer<StaticTableMetadata.Builder> modifyMetadata(String attributeName
return metadata -> metadata.addCustomMetadataObject(CUSTOM_METADATA_KEY, Collections.singleton(attributeName))
.markAttributeAsKey(attributeName, attributeValueType);
}

private static void validateAttributeName(String attributeName) {
if (attributeName.contains(NESTED_OBJECT_UPDATE)) {
throw new IllegalArgumentException(
String.format(
"Attribute name '%s' contains reserved marker '%s' and is not allowed.",
attributeName,
NESTED_OBJECT_UPDATE));
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;
import software.amazon.awssdk.annotations.SdkInternalApi;
import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter;
import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension;
import software.amazon.awssdk.enhanced.dynamodb.EnhancedType;
import software.amazon.awssdk.enhanced.dynamodb.Key;
import software.amazon.awssdk.enhanced.dynamodb.OperationContext;
import software.amazon.awssdk.enhanced.dynamodb.TableSchema;
Expand All @@ -37,6 +39,8 @@
import software.amazon.awssdk.enhanced.dynamodb.model.Page;
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
import software.amazon.awssdk.services.dynamodb.model.ConsumedCapacity;
import software.amazon.awssdk.utils.CollectionUtils;
import software.amazon.awssdk.utils.StringUtils;

@SdkInternalApi
public final class EnhancedClientUtils {
Expand Down Expand Up @@ -146,7 +150,7 @@ public static <ResponseT, ItemT> Page<ItemT> readAndTransformPaginatedItems(
.scannedCount(scannedCount.apply(response))
.consumedCapacity(consumedCapacity.apply(response));

if (getLastEvaluatedKey.apply(response) != null && !getLastEvaluatedKey.apply(response).isEmpty()) {
if (CollectionUtils.isNotEmpty(getLastEvaluatedKey.apply(response))) {
pageBuilder.lastEvaluatedKey(getLastEvaluatedKey.apply(response));
}
return pageBuilder.build();
Expand Down Expand Up @@ -204,4 +208,42 @@ public static <T> List<T> getItemsFromSupplier(List<Supplier<T>> itemSupplierLis
public static boolean isNullAttributeValue(AttributeValue attributeValue) {
return attributeValue.nul() != null && attributeValue.nul();
}

public static boolean hasMap(AttributeValue attributeValue) {
return attributeValue != null && attributeValue.hasM();
}

/**
* Retrieves the nested {@link TableSchema} for an attribute from the parent schema.
* For parameterized types (e.g., Set, List, Map), extracts the first type parameter's schema.
*
* @param parentSchema the parent schema; must not be null
* @param attributeName the attribute name; must not be null or empty
* @return the nested schema, or empty if unavailable
*/
public static Optional<? extends TableSchema<?>> getNestedSchema(TableSchema<?> parentSchema, CharSequence attributeName) {
if (parentSchema == null) {
throw new IllegalArgumentException("Parent schema cannot be null.");
}
if (StringUtils.isEmpty(attributeName)) {
throw new IllegalArgumentException("Attribute name cannot be null or empty.");
}

AttributeConverter<?> converter = parentSchema.converterForAttribute(attributeName);
if (converter == null) {
return Optional.empty();
}

EnhancedType<?> enhancedType = converter.type();
if (enhancedType == null) {
return Optional.empty();
}

List<EnhancedType<?>> rawClassParameters = enhancedType.rawClassParameters();
if (!CollectionUtils.isNullOrEmpty(rawClassParameters)) {
enhancedType = rawClassParameters.get(0);
}

return enhancedType.tableSchema();
}
}
Loading