Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import Foundation
/// Maps to "seriesConfiguration" internally in the charting library.
///
/// Subset of https://echarts.apache.org/en/option.html#series-line
public struct ChartAggregationConfiguration: Codable, Equatable {
public struct ChartAggregationConfiguration: Codable, Equatable, Sendable {
public var startAngle: Int?
public var endAngle: Int?
public var radius: [String]?
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import Foundation
/// Display configuration for charts. Overrides various default display options.
///
/// Not hashable, because we don't want to include these values in the cache, as cached calculation results won't change based on these values.
public struct ChartConfiguration: Codable, Equatable {
public struct ChartConfiguration: Codable, Equatable, Sendable {
/// The display mode for the chart.
public var displayMode: ChartDisplayMode?

Expand Down Expand Up @@ -33,7 +33,7 @@ public struct ChartConfiguration: Codable, Equatable {
}
}

public enum ChartDisplayMode: String, Codable {
public enum ChartDisplayMode: String, Codable, Sendable {
case raw
case barChart
case lineChart
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import Foundation
/// Options for configuring a chart in our charting library
///
/// Subset of echart's options https://echarts.apache.org/en/option.html
public struct ChartConfigurationOptions: Codable, Equatable {
public struct ChartConfigurationOptions: Codable, Equatable, Sendable {
/// Whether to enable animation.
public var animation: Bool?

Expand Down Expand Up @@ -40,15 +40,15 @@ public struct ChartConfigurationOptions: Codable, Equatable {
}
}

public struct ToolTipConfiguration: Codable, Equatable {
public struct ToolTipConfiguration: Codable, Equatable, Sendable {
public var show: Bool?

public init(show: Bool? = nil) {
self.show = show
}
}

public struct GridConfiguration: Codable, Equatable {
public struct GridConfiguration: Codable, Equatable, Sendable {
public var top: Int?
public var bottom: Int?
public var left: Int?
Expand All @@ -70,7 +70,7 @@ public struct GridConfiguration: Codable, Equatable {
}
}

public enum EasingFunction: String, Codable {
public enum EasingFunction: String, Codable, Sendable {
case linear
case quadraticIn
case quadraticOut
Expand Down Expand Up @@ -104,7 +104,7 @@ public enum EasingFunction: String, Codable {
case bounceInOut
}

public struct AxisOptions: Codable, Equatable {
public struct AxisOptions: Codable, Equatable, Sendable {
/// Set this to false to prevent the axis from showing.
public var show: Bool?
public var position: Position?
Expand All @@ -127,12 +127,12 @@ public struct AxisOptions: Codable, Equatable {
self.inverse = inverse
}

public enum Position: String, Codable, Equatable {
public enum Position: String, Codable, Equatable, Sendable {
case top
case bottom
}

public enum AxisType: String, Codable, Equatable {
public enum AxisType: String, Codable, Equatable, Sendable {
/// Numerical axis, suitable for continuous data.
case value

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/// Defines Automatic Compaction settings for a data source
public struct AutoCompactionDynamicConfig: Codable, Hashable, Equatable {
public struct AutoCompactionDynamicConfig: Codable, Hashable, Equatable, Sendable {
public init(
dataSource: String,
taskPriority: Int?,
Expand All @@ -26,7 +26,7 @@

/// Maximum number of total segment bytes processed per compaction task.
///
/// Since a time chunk must be processed in its entirety, if the segments for a particular time chunk have a total size in bytes greater than this parameter, compaction will not run for that time chunk. (default = 100,000,000,000,000 i.e. 100TB)

Check warning on line 29 in Sources/DataTransferObjects/Druid/configuration/AutomaticCompactionConfig/AutoCompactionDynamicConfiguration.swift

View workflow job for this annotation

GitHub Actions / Run Swiftlint

Line should be 200 characters or less: currently 249 characters (line_length)
public let inputSegmentSizeBytes: Int?

/// The offset for searching segments to be compacted in ISO 8601 duration format. Strongly recommended to set for realtime datasources. See Data handling with compaction. (default = "P1D")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
public struct IndexParallelTuningConfig: Codable, Hashable, Equatable {
public struct IndexParallelTuningConfig: Codable, Hashable, Equatable, Sendable {
public init(
maxRowsInMemory: Int? = nil,
maxBytesInMemory: Int? = nil,
Expand Down Expand Up @@ -41,13 +41,13 @@
self.awaitSegmentAvailabilityTimeoutMillis = awaitSegmentAvailabilityTimeoutMillis
}

/// Used in determining when intermediate persists to disk should occur. Normally user does not need to set this, but depending on the nature of data, if rows are short in terms of bytes, user may not want to store a million rows in memory and this value should be set. (default = 1000000)

Check warning on line 44 in Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift

View workflow job for this annotation

GitHub Actions / Run Swiftlint

Line should be 200 characters or less: currently 293 characters (line_length)
public let maxRowsInMemory: Int?

/// Used in determining when intermediate persists to disk should occur. Normally this is computed internally and user does not need to set it. This value represents number of bytes to aggregate in heap memory before persisting. This is based on a rough estimate of memory usage and not actual usage. The maximum heap memory usage for indexing is maxBytesInMemory * (2 + maxPendingPersists) (default = 1/6 of max JVM memory)

Check warning on line 47 in Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift

View workflow job for this annotation

GitHub Actions / Run Swiftlint

Line should be 200 characters or less: currently 428 characters (line_length)
public let maxBytesInMemory: Int?

/// Limit of the number of segments to merge in a single phase when merging segments for publishing. This limit affects the total number of columns present in a set of segments to merge. If the limit is exceeded, segment merging occurs in multiple phases. Druid merges at least 2 segments per phase, regardless of this setting. (Default = -1 i.e. no limit)

Check warning on line 50 in Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift

View workflow job for this annotation

GitHub Actions / Run Swiftlint

Line should be 200 characters or less: currently 360 characters (line_length)
public let maxColumnsToMerge: Int?

/// Hint to control the amount of data that each first phase task reads. Druid may ignore the hint depending on the implementation of the input source. (default: size-based split hint spec)
Expand All @@ -59,10 +59,10 @@
/// Defines segment storage format options to use at indexing time
public let indexSpec: IndexSpec?

/// Defines segment storage format options to use at indexing time for intermediate persisted temporary segments. You can use indexSpecForIntermediatePersists to disable dimension/metric compression on intermediate segments to reduce memory required for final merging. However, disabling compression on intermediate segments might increase page cache use while they are used before getting merged into final segment published.

Check warning on line 62 in Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift

View workflow job for this annotation

GitHub Actions / Run Swiftlint

Line should be 200 characters or less: currently 430 characters (line_length)
public let indexSpecForIntermediatePersists: IndexSpec?

/// Maximum number of persists that can be pending but not started. If a new intermediate persist exceeds this limit, Druid blocks ingestion until the currently running persist finishes. One persist can be running concurrently with ingestion, and none can be queued up. The maximum heap memory usage for indexing scales is maxRowsInMemory * (2 + maxPendingPersists).

Check warning on line 65 in Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift

View workflow job for this annotation

GitHub Actions / Run Swiftlint

Line should be 200 characters or less: currently 370 characters (line_length)
public let maxPendingPersists: Int?

/// Forces perfect rollup. The perfect rollup optimizes the total size of generated segments and querying time but increases indexing time. If true, specify intervals in the granularitySpec and use either hashed or single_dim for the partitionsSpec. You cannot use this flag in conjunction with appendToExisting of IOConfig. (default = false)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/// https://druid.apache.org/docs/latest/ingestion/supervisor/#tuning-configuration
/// https://druid.apache.org/docs/latest/ingestion/kinesis-ingestion#tuning-configuration
public struct KinesisTuningConfig: Codable, Hashable, Equatable {
public struct KinesisTuningConfig: Codable, Hashable, Equatable, Sendable {
public init(
skipSequenceNumberAvailabilityCheck: Bool? = nil,
recordBufferSizeBytes: Int? = nil,
Expand Down Expand Up @@ -165,7 +165,7 @@ public struct KinesisTuningConfig: Codable, Hashable, Equatable {
public let appendableIndexSpec: AppendableIndexSpec?
}

public struct AppendableIndexSpec: Codable, Hashable, Equatable {
public struct AppendableIndexSpec: Codable, Hashable, Equatable, Sendable {
public init(type: String, preserveExistingMetrics: Bool? = nil) {
self.type = type
self.preserveExistingMetrics = preserveExistingMetrics
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
public indirect enum TuningConfig: Codable, Hashable, Equatable {
public indirect enum TuningConfig: Codable, Hashable, Equatable, Sendable {
case kinesis(KinesisTuningConfig)
case indexParallel(IndexParallelTuningConfig)
// case kafka not implemented
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
/// https://druid.apache.org/docs/latest/ingestion/data-formats/#input-format
///
/// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/data/input/InputFormat.java
public struct InputFormat: Codable, Hashable, Equatable {
public struct InputFormat: Codable, Hashable, Equatable, Sendable {
public init(type: InputFormat.InputFormatType, keepNullColumns: Bool? = nil) {
self.type = type
self.keepNullColumns = keepNullColumns
}

public enum InputFormatType: String, Codable, CaseIterable {
public enum InputFormatType: String, Codable, CaseIterable, Sendable {
case json
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/data/input/InputSource.java#L61
public indirect enum InputSource: Codable, Hashable, Equatable {
public indirect enum InputSource: Codable, Hashable, Equatable, Sendable {
case druid(DruidInputSource)

enum CodingKeys: String, CodingKey {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/// https://druid.apache.org/docs/latest/ingestion/ingestion-spec/#dimensionsspec
/// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/data/input/impl/DimensionsSpec.java
public struct DimensionsSpec: Codable, Hashable, Equatable {
public struct DimensionsSpec: Codable, Hashable, Equatable, Sendable {
public init(
dimensions: [IngestionDimensionSpecDimension]? = nil,
dimensionExclusions: [String]? = nil,
Expand Down Expand Up @@ -60,7 +60,7 @@ public struct DimensionsSpec: Codable, Hashable, Equatable {
public let forceSegmentSortByTime: Bool?
}

public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable {
public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable, Sendable {
public init(
type: IngestionDimensionSpecDimension.DimensionType? = nil,
name: String,
Expand All @@ -73,7 +73,7 @@ public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable {
self.multiValueHandling = multiValueHandling
}

public enum DimensionType: String, Codable, Hashable, Equatable {
public enum DimensionType: String, Codable, Hashable, Equatable, Sendable {
case auto
case string
case long
Expand All @@ -82,7 +82,7 @@ public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable {
case json
}

public enum MultiValueHandlingOption: String, Codable, Hashable, Equatable {
public enum MultiValueHandlingOption: String, Codable, Hashable, Equatable, Sendable {
case array
case sorted_array
case sorted_set
Expand All @@ -108,7 +108,7 @@ public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable {
public let multiValueHandling: MultiValueHandlingOption?
}

public struct IngestionDimensionSpecSpatialDimension: Codable, Hashable, Equatable {
public struct IngestionDimensionSpecSpatialDimension: Codable, Hashable, Equatable, Sendable {
public init(dimName: String, dims: [String]? = nil) {
self.dimName = dimName
self.dims = dims
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/// Treat __time as a millisecond timestamp: the number of milliseconds since Jan 1, 1970 at midnight UTC.
///
/// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/data/input/impl/TimestampSpec.java
public struct TimestampSpec: Codable, Hashable, Equatable {
public struct TimestampSpec: Codable, Hashable, Equatable, Sendable {
public init(
column: String? = nil,
format: TimestampSpec.TimestampSpecFormat? = nil,
Expand All @@ -20,7 +20,7 @@ public struct TimestampSpec: Codable, Hashable, Equatable {
self.missingValue = missingValue
}

public enum TimestampSpecFormat: String, Codable, CaseIterable {
public enum TimestampSpecFormat: String, Codable, CaseIterable, Sendable {
case iso
case posix
case millis
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/// https://druid.apache.org/docs/latest/ingestion/ingestion-spec/#granularityspec
/// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/indexer/granularity/GranularitySpec.java
public struct GranularitySpec: Codable, Hashable, Equatable {
public struct GranularitySpec: Codable, Hashable, Equatable, Sendable {
public init(
type: GranularitySpec.GranularitySpecType? = nil,
segmentGranularity: QueryGranularity? = nil,
Expand All @@ -15,7 +15,7 @@ public struct GranularitySpec: Codable, Hashable, Equatable {
self.intervals = intervals
}

public enum GranularitySpecType: String, Codable, CaseIterable {
public enum GranularitySpecType: String, Codable, CaseIterable, Sendable {
case uniform
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/// https://druid.apache.org/docs/latest/ingestion/native-batch#ioconfig
/// https://github.com/apache/druid/blob/master/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexIOConfig.java
public struct ParallelIndexIOConfig: Codable, Hashable, Equatable {
public struct ParallelIndexIOConfig: Codable, Hashable, Equatable, Sendable {
public init(inputFormat: InputFormat?, inputSource: InputSource? = nil, appendToExisting: Bool? = nil, dropExisting: Bool? = nil) {
self.inputFormat = inputFormat
self.inputSource = inputSource
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/// The container object for the supervisor configuration.
public struct ParallelIndexIngestionSpec: Codable, Hashable, Equatable {
public struct ParallelIndexIngestionSpec: Codable, Hashable, Equatable, Sendable {
public init(ioConfig: IoConfig? = nil, tuningConfig: TuningConfig? = nil, dataSchema: DataSchema? = nil) {
self.ioConfig = ioConfig
self.tuningConfig = tuningConfig
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
/// field needs to be specified in the ingestion spec when using this input source.
/// https://github.com/apache/druid/blob/master/indexing-service/src/main/java/org/apache/druid/indexing/input/DruidInputSource.java
///
public struct DruidInputSource: Codable, Hashable, Equatable {
public struct DruidInputSource: Codable, Hashable, Equatable, Sendable {
public init(dataSource: String, interval: QueryTimeInterval, filter: Filter? = nil) {
self.dataSource = dataSource
self.interval = interval
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/// https://druid.apache.org/docs/latest/ingestion/supervisor/#io-configuration
/// https://druid.apache.org/docs/latest/ingestion/kinesis-ingestion#io-configuration
/// https://github.com/apache/druid/blob/master/extensions-core/kinesis-indexing-service/src/main/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskIOConfig.java
public struct KinesisIndexTaskIOConfig: Codable, Hashable, Equatable {
public struct KinesisIndexTaskIOConfig: Codable, Hashable, Equatable, Sendable {
public init(
stream: String,
inputFormat: InputFormat,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/// Defines segment storage format options to use at indexing time
///
/// https://druid.apache.org/docs/latest/ingestion/ingestion-spec/#indexspec
public struct IndexSpec: Codable, Hashable, Equatable {
public struct IndexSpec: Codable, Hashable, Equatable, Sendable {
public init(
bitmap: IndexSpecBitmap? = nil,
dimensionCompression: IndexSpec.CompressionType? = nil,
Expand All @@ -20,15 +20,15 @@ public struct IndexSpec: Codable, Hashable, Equatable {
self.jsonCompression = jsonCompression
}

public enum CompressionType: String, Codable, CaseIterable {
public enum CompressionType: String, Codable, CaseIterable, Sendable {
case lz4
case lzf
case zstd
case uncompressed
case none
}

public enum LongEncodingType: String, Codable, CaseIterable {
public enum LongEncodingType: String, Codable, CaseIterable, Sendable {
case longs
case auto
}
Expand All @@ -42,20 +42,20 @@ public struct IndexSpec: Codable, Hashable, Equatable {
public let jsonCompression: CompressionType?
}

public struct IndexSpecBitmap: Codable, Hashable, Equatable {
public struct IndexSpecBitmap: Codable, Hashable, Equatable, Sendable {
public init(type: IndexSpecBitmap.IndexSpecBitmapType) {
self.type = type
}

public enum IndexSpecBitmapType: String, Codable, CaseIterable {
public enum IndexSpecBitmapType: String, Codable, CaseIterable, Sendable {
case roaring
case concise
}

public let type: IndexSpecBitmapType
}

public struct StringDictionaryEncoding: Codable, Hashable, Equatable {
public struct StringDictionaryEncoding: Codable, Hashable, Equatable, Sendable {
public init(
type: StringDictionaryEncoding.StringDictionaryEncodingType,
bucketSize: Int? = nil,
Expand All @@ -66,7 +66,7 @@ public struct StringDictionaryEncoding: Codable, Hashable, Equatable {
self.formatVersion = formatVersion
}

public enum StringDictionaryEncodingType: String, Codable, CaseIterable {
public enum StringDictionaryEncodingType: String, Codable, CaseIterable, Sendable {
case utf8
case frontCoded
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/// For best-effort rollup, use dynamic.
///
/// https://druid.apache.org/docs/latest/ingestion/native-batch/#partitionsspec
public indirect enum PartitionsSpec: Codable, Hashable, Equatable {
public indirect enum PartitionsSpec: Codable, Hashable, Equatable, Sendable {
case dynamic(DynamicPartitionSpec)
case hashed(HashedPartitionSpec)
case singleDimension(SingleDimensionPartitionSpec)
Expand Down Expand Up @@ -64,7 +64,7 @@ public indirect enum PartitionsSpec: Codable, Hashable, Equatable {
///
/// Whenever the number of rows in the current segment exceeds maxRowsPerSegment.
/// When the total number of rows in all segments across all time chunks reaches to maxTotalRows. At this point the task pushes all segments created so far to the deep storage and creates new ones.
public struct DynamicPartitionSpec: Codable, Hashable, Equatable {
public struct DynamicPartitionSpec: Codable, Hashable, Equatable, Sendable {
public init(maxRowsPerSegment: Int? = nil, maxTotalRows: Int? = nil) {
self.maxRowsPerSegment = maxRowsPerSegment
self.maxTotalRows = maxTotalRows
Expand All @@ -84,7 +84,7 @@ public struct DynamicPartitionSpec: Codable, Hashable, Equatable {
/// In the partial segment generation phase, just like the Map phase in MapReduce, the Parallel task splits the input data based on the split hint spec and assigns each split to a worker task. Each worker task (type partial_index_generate) reads the assigned split, and partitions rows by the time chunk from segmentGranularity (primary partition key) in the granularitySpec and then by the hash value of partitionDimensions (secondary partition key) in the partitionsSpec. The partitioned data is stored in local storage of the middle Manager or the indexer.
///
/// The partial segment merge phase is similar to the Reduce phase in MapReduce. The Parallel task spawns a new set of worker tasks (type partial_index_generic_merge) to merge the partitioned data created in the previous phase. Here, the partitioned data is shuffled based on the time chunk and the hash value of partitionDimensions to be merged; each worker task reads the data falling in the same time chunk and the same hash value from multiple Middle Manager/Indexer processes and merges them to create the final segments. Finally, they push the final segments to the deep storage at once.
public struct HashedPartitionSpec: Codable, Hashable, Equatable {
public struct HashedPartitionSpec: Codable, Hashable, Equatable, Sendable {
public init(
numShards: Int? = nil,
targetRowsPerSegment: Int? = nil,
Expand Down Expand Up @@ -114,7 +114,7 @@ public struct HashedPartitionSpec: Codable, Hashable, Equatable {
/// When you use this technique to partition your data, segment sizes may be unequally distributed if the data in your partitionDimension is also unequally distributed. Therefore, to avoid imbalance in data layout, review the distribution of values in your source data before deciding on a partitioning strategy.
///
/// Range partitioning is not possible on multi-value dimensions. If the provided partitionDimension is multi-value, your ingestion job will report an error.
public struct SingleDimensionPartitionSpec: Codable, Hashable, Equatable {
public struct SingleDimensionPartitionSpec: Codable, Hashable, Equatable, Sendable {
public init(
partitionDimension: String,
targetRowsPerSegment: Int? = nil,
Expand Down Expand Up @@ -145,7 +145,7 @@ public struct SingleDimensionPartitionSpec: Codable, Hashable, Equatable {
/// Range partitioning has several benefits related to storage footprint and query performance. Multi-dimension range partitioning improves over single-dimension range partitioning by allowing Druid to distribute segment sizes more evenly, and to prune on more dimensions.
///
/// Range partitioning is not possible on multi-value dimensions. If one of the provided partitionDimensions is multi-value, your ingestion job will report an error.
public struct RangePartitionSpec: Codable, Hashable, Equatable {
public struct RangePartitionSpec: Codable, Hashable, Equatable, Sendable {
public init(
partitionDimensions: [String],
targetRowsPerSegment: Int? = nil,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/// The split hint spec is used to help the supervisor task divide input sources. Each worker task processes a single input division. You can control the amount of data each worker task reads during the first phase.

Check warning on line 1 in Sources/DataTransferObjects/Druid/ingestion/native-batch/SplitHintSpec.swift

View workflow job for this annotation

GitHub Actions / Run Swiftlint

Line should be 200 characters or less: currently 215 characters (line_length)
///
/// https://druid.apache.org/docs/latest/ingestion/native-batch/#split-hint-spec
public indirect enum SplitHintSpec: Codable, Hashable, Equatable {
public indirect enum SplitHintSpec: Codable, Hashable, Equatable, Sendable {
case notImplemented
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
/// The detailed behavior of the parallel task is different depending on the partitionsSpec. See partitionsSpec for more details.
///
/// https://druid.apache.org/docs/latest/ingestion/native-batch
public struct IndexParallelTaskSpec: Codable, Hashable, Equatable {
public struct IndexParallelTaskSpec: Codable, Hashable, Equatable, Sendable {
public init(id: String? = nil, spec: ParallelIndexIngestionSpec) {
self.id = id
self.spec = spec
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
/// tasks are generally submitted for you by a supervisor.
///
/// https://druid.apache.org/docs/latest/ingestion/tasks
public indirect enum TaskSpec: Codable, Hashable, Equatable {
public indirect enum TaskSpec: Codable, Hashable, Equatable, Sendable {
case indexParallel(IndexParallelTaskSpec)

enum CodingKeys: String, CodingKey {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/// https://druid.apache.org/docs/latest/ingestion/ingestion-spec#dataschema
///
/// https://github.com/apache/druid/blob/master/server/src/main/java/org/apache/druid/segment/indexing/DataSchema.java
public struct DataSchema: Codable, Hashable, Equatable {
public struct DataSchema: Codable, Hashable, Equatable, Sendable {
public init(
dataSource: String,
timestampSpec: TimestampSpec? = nil,
Expand Down
Loading
Loading