From bf763f42eaa492ed3cdd75dfb09e735bcaf290f0 Mon Sep 17 00:00:00 2001 From: Daniel Jilg Date: Sun, 10 Aug 2025 17:47:16 +0200 Subject: [PATCH] Make everything sendable --- .../ChartAggregationConfiguration.swift | 2 +- .../ChartConfiguration.swift | 4 +- .../ChartConfigurationOptions.swift | 14 +++---- .../AutoCompactionDynamicConfiguration.swift | 2 +- .../IndexParallelTuningConfig.swift | 2 +- .../TuningConfig/KinesisTuningConfig.swift | 4 +- .../TuningConfig/TuningConfig.swift | 2 +- .../Druid/data/input/InputFormat.swift | 4 +- .../Druid/data/input/InputSource.swift | 2 +- .../data/input/impl/DimensionsSpec.swift | 10 ++--- .../Druid/data/input/impl/TimestampSpec.swift | 4 +- .../indexer/granularity/GranularitySpec.swift | 4 +- .../parallel/ParallelIndexIOConfig.swift | 2 +- .../parallel/ParallelIndexIngestionSpec.swift | 2 +- .../indexing/input/DruidInputSource.swift | 2 +- .../kinesis/KinesisIndexTaskIOConfig.swift | 2 +- .../ingestion/ingestion-spec/IndexSpec.swift | 14 +++---- .../native-batch/PartitionsSpec.swift | 10 ++--- .../native-batch/SplitHintSpec.swift | 2 +- .../tasks/IndexParallelTaskSpec.swift | 2 +- .../Druid/ingestion/tasks/TaskSpec.swift | 2 +- .../Druid/segment/indexing/DataSchema.swift | 2 +- .../Druid/segment/indexing/IoConfig.swift | 2 +- .../segment/transform/TransformSpec.swift | 4 +- .../Query/Aggregator.swift | 24 +++++------ .../Query/BaseFilters.swift | 2 +- .../Query/CustomQuery.swift | 8 ++-- .../Query/CustomSQLQuery.swift | 2 +- .../Query/Datasource.swift | 4 +- .../Query/DimensionSpec.swift | 8 ++-- .../Query/ExtractionFunction.swift | 10 ++--- .../DataTransferObjects/Query/Filter.swift | 18 ++++---- .../Query/HavingSpec.swift | 18 ++++---- .../Query/NamedFilter.swift | 2 +- .../Query/PostAggregator.swift | 42 +++++++++---------- .../Query/QueryContext.swift | 2 +- .../Query/QueryGranularity.swift | 2 +- .../Query/QueryTimeInterval.swift | 6 +-- .../Query/RelativeTimeInterval.swift | 8 ++-- .../Query/StringComparators.swift | 2 +- .../Query/TopNMetricSpec.swift | 8 ++-- .../Query/ValueFormatter.swift | 26 ++++++------ .../ExpressionVirtualColumn.swift | 2 +- .../ListFilteredVirtualColumn.swift | 2 +- .../Query/Virtual Column/VirtualColumn.swift | 2 +- .../QueryResult/QueryResult.swift | 32 +++++++------- .../QueryResult/QueryResultWrapper.swift | 2 +- .../Supervisor/IdleConfig.swift | 2 +- .../Supervisor/Supervisor.swift | 4 +- 49 files changed, 169 insertions(+), 169 deletions(-) diff --git a/Sources/DataTransferObjects/Chart Configuration/ChartAggregationConfiguration.swift b/Sources/DataTransferObjects/Chart Configuration/ChartAggregationConfiguration.swift index 1b66e99..2128594 100644 --- a/Sources/DataTransferObjects/Chart Configuration/ChartAggregationConfiguration.swift +++ b/Sources/DataTransferObjects/Chart Configuration/ChartAggregationConfiguration.swift @@ -5,7 +5,7 @@ import Foundation /// Maps to "seriesConfiguration" internally in the charting library. /// /// Subset of https://echarts.apache.org/en/option.html#series-line -public struct ChartAggregationConfiguration: Codable, Equatable { +public struct ChartAggregationConfiguration: Codable, Equatable, Sendable { public var startAngle: Int? public var endAngle: Int? public var radius: [String]? diff --git a/Sources/DataTransferObjects/Chart Configuration/ChartConfiguration.swift b/Sources/DataTransferObjects/Chart Configuration/ChartConfiguration.swift index 4e3091e..efa6902 100644 --- a/Sources/DataTransferObjects/Chart Configuration/ChartConfiguration.swift +++ b/Sources/DataTransferObjects/Chart Configuration/ChartConfiguration.swift @@ -3,7 +3,7 @@ import Foundation /// Display configuration for charts. Overrides various default display options. /// /// Not hashable, because we don't want to include these values in the cache, as cached calculation results won't change based on these values. -public struct ChartConfiguration: Codable, Equatable { +public struct ChartConfiguration: Codable, Equatable, Sendable { /// The display mode for the chart. public var displayMode: ChartDisplayMode? @@ -33,7 +33,7 @@ public struct ChartConfiguration: Codable, Equatable { } } -public enum ChartDisplayMode: String, Codable { +public enum ChartDisplayMode: String, Codable, Sendable { case raw case barChart case lineChart diff --git a/Sources/DataTransferObjects/Chart Configuration/ChartConfigurationOptions.swift b/Sources/DataTransferObjects/Chart Configuration/ChartConfigurationOptions.swift index 81fc15c..2cddd28 100644 --- a/Sources/DataTransferObjects/Chart Configuration/ChartConfigurationOptions.swift +++ b/Sources/DataTransferObjects/Chart Configuration/ChartConfigurationOptions.swift @@ -3,7 +3,7 @@ import Foundation /// Options for configuring a chart in our charting library /// /// Subset of echart's options https://echarts.apache.org/en/option.html -public struct ChartConfigurationOptions: Codable, Equatable { +public struct ChartConfigurationOptions: Codable, Equatable, Sendable { /// Whether to enable animation. public var animation: Bool? @@ -40,7 +40,7 @@ public struct ChartConfigurationOptions: Codable, Equatable { } } -public struct ToolTipConfiguration: Codable, Equatable { +public struct ToolTipConfiguration: Codable, Equatable, Sendable { public var show: Bool? public init(show: Bool? = nil) { @@ -48,7 +48,7 @@ public struct ToolTipConfiguration: Codable, Equatable { } } -public struct GridConfiguration: Codable, Equatable { +public struct GridConfiguration: Codable, Equatable, Sendable { public var top: Int? public var bottom: Int? public var left: Int? @@ -70,7 +70,7 @@ public struct GridConfiguration: Codable, Equatable { } } -public enum EasingFunction: String, Codable { +public enum EasingFunction: String, Codable, Sendable { case linear case quadraticIn case quadraticOut @@ -104,7 +104,7 @@ public enum EasingFunction: String, Codable { case bounceInOut } -public struct AxisOptions: Codable, Equatable { +public struct AxisOptions: Codable, Equatable, Sendable { /// Set this to false to prevent the axis from showing. public var show: Bool? public var position: Position? @@ -127,12 +127,12 @@ public struct AxisOptions: Codable, Equatable { self.inverse = inverse } - public enum Position: String, Codable, Equatable { + public enum Position: String, Codable, Equatable, Sendable { case top case bottom } - public enum AxisType: String, Codable, Equatable { + public enum AxisType: String, Codable, Equatable, Sendable { /// Numerical axis, suitable for continuous data. case value diff --git a/Sources/DataTransferObjects/Druid/configuration/AutomaticCompactionConfig/AutoCompactionDynamicConfiguration.swift b/Sources/DataTransferObjects/Druid/configuration/AutomaticCompactionConfig/AutoCompactionDynamicConfiguration.swift index 924c255..08b2143 100644 --- a/Sources/DataTransferObjects/Druid/configuration/AutomaticCompactionConfig/AutoCompactionDynamicConfiguration.swift +++ b/Sources/DataTransferObjects/Druid/configuration/AutomaticCompactionConfig/AutoCompactionDynamicConfiguration.swift @@ -1,5 +1,5 @@ /// Defines Automatic Compaction settings for a data source -public struct AutoCompactionDynamicConfig: Codable, Hashable, Equatable { +public struct AutoCompactionDynamicConfig: Codable, Hashable, Equatable, Sendable { public init( dataSource: String, taskPriority: Int?, diff --git a/Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift b/Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift index 6f2a609..498fc9a 100644 --- a/Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift +++ b/Sources/DataTransferObjects/Druid/configuration/TuningConfig/IndexParallelTuningConfig.swift @@ -1,4 +1,4 @@ -public struct IndexParallelTuningConfig: Codable, Hashable, Equatable { +public struct IndexParallelTuningConfig: Codable, Hashable, Equatable, Sendable { public init( maxRowsInMemory: Int? = nil, maxBytesInMemory: Int? = nil, diff --git a/Sources/DataTransferObjects/Druid/configuration/TuningConfig/KinesisTuningConfig.swift b/Sources/DataTransferObjects/Druid/configuration/TuningConfig/KinesisTuningConfig.swift index 881671e..7aab4dd 100644 --- a/Sources/DataTransferObjects/Druid/configuration/TuningConfig/KinesisTuningConfig.swift +++ b/Sources/DataTransferObjects/Druid/configuration/TuningConfig/KinesisTuningConfig.swift @@ -1,6 +1,6 @@ /// https://druid.apache.org/docs/latest/ingestion/supervisor/#tuning-configuration /// https://druid.apache.org/docs/latest/ingestion/kinesis-ingestion#tuning-configuration -public struct KinesisTuningConfig: Codable, Hashable, Equatable { +public struct KinesisTuningConfig: Codable, Hashable, Equatable, Sendable { public init( skipSequenceNumberAvailabilityCheck: Bool? = nil, recordBufferSizeBytes: Int? = nil, @@ -165,7 +165,7 @@ public struct KinesisTuningConfig: Codable, Hashable, Equatable { public let appendableIndexSpec: AppendableIndexSpec? } -public struct AppendableIndexSpec: Codable, Hashable, Equatable { +public struct AppendableIndexSpec: Codable, Hashable, Equatable, Sendable { public init(type: String, preserveExistingMetrics: Bool? = nil) { self.type = type self.preserveExistingMetrics = preserveExistingMetrics diff --git a/Sources/DataTransferObjects/Druid/configuration/TuningConfig/TuningConfig.swift b/Sources/DataTransferObjects/Druid/configuration/TuningConfig/TuningConfig.swift index c89516b..3e221e8 100644 --- a/Sources/DataTransferObjects/Druid/configuration/TuningConfig/TuningConfig.swift +++ b/Sources/DataTransferObjects/Druid/configuration/TuningConfig/TuningConfig.swift @@ -1,4 +1,4 @@ -public indirect enum TuningConfig: Codable, Hashable, Equatable { +public indirect enum TuningConfig: Codable, Hashable, Equatable, Sendable { case kinesis(KinesisTuningConfig) case indexParallel(IndexParallelTuningConfig) // case kafka not implemented diff --git a/Sources/DataTransferObjects/Druid/data/input/InputFormat.swift b/Sources/DataTransferObjects/Druid/data/input/InputFormat.swift index 6fb3409..c2479ab 100644 --- a/Sources/DataTransferObjects/Druid/data/input/InputFormat.swift +++ b/Sources/DataTransferObjects/Druid/data/input/InputFormat.swift @@ -1,13 +1,13 @@ /// https://druid.apache.org/docs/latest/ingestion/data-formats/#input-format /// /// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/data/input/InputFormat.java -public struct InputFormat: Codable, Hashable, Equatable { +public struct InputFormat: Codable, Hashable, Equatable, Sendable { public init(type: InputFormat.InputFormatType, keepNullColumns: Bool? = nil) { self.type = type self.keepNullColumns = keepNullColumns } - public enum InputFormatType: String, Codable, CaseIterable { + public enum InputFormatType: String, Codable, CaseIterable, Sendable { case json } diff --git a/Sources/DataTransferObjects/Druid/data/input/InputSource.swift b/Sources/DataTransferObjects/Druid/data/input/InputSource.swift index b7f961e..7f2c666 100644 --- a/Sources/DataTransferObjects/Druid/data/input/InputSource.swift +++ b/Sources/DataTransferObjects/Druid/data/input/InputSource.swift @@ -1,5 +1,5 @@ /// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/data/input/InputSource.java#L61 -public indirect enum InputSource: Codable, Hashable, Equatable { +public indirect enum InputSource: Codable, Hashable, Equatable, Sendable { case druid(DruidInputSource) enum CodingKeys: String, CodingKey { diff --git a/Sources/DataTransferObjects/Druid/data/input/impl/DimensionsSpec.swift b/Sources/DataTransferObjects/Druid/data/input/impl/DimensionsSpec.swift index c3b88a3..7b183ee 100644 --- a/Sources/DataTransferObjects/Druid/data/input/impl/DimensionsSpec.swift +++ b/Sources/DataTransferObjects/Druid/data/input/impl/DimensionsSpec.swift @@ -1,6 +1,6 @@ /// https://druid.apache.org/docs/latest/ingestion/ingestion-spec/#dimensionsspec /// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/data/input/impl/DimensionsSpec.java -public struct DimensionsSpec: Codable, Hashable, Equatable { +public struct DimensionsSpec: Codable, Hashable, Equatable, Sendable { public init( dimensions: [IngestionDimensionSpecDimension]? = nil, dimensionExclusions: [String]? = nil, @@ -60,7 +60,7 @@ public struct DimensionsSpec: Codable, Hashable, Equatable { public let forceSegmentSortByTime: Bool? } -public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable { +public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable, Sendable { public init( type: IngestionDimensionSpecDimension.DimensionType? = nil, name: String, @@ -73,7 +73,7 @@ public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable { self.multiValueHandling = multiValueHandling } - public enum DimensionType: String, Codable, Hashable, Equatable { + public enum DimensionType: String, Codable, Hashable, Equatable, Sendable { case auto case string case long @@ -82,7 +82,7 @@ public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable { case json } - public enum MultiValueHandlingOption: String, Codable, Hashable, Equatable { + public enum MultiValueHandlingOption: String, Codable, Hashable, Equatable, Sendable { case array case sorted_array case sorted_set @@ -108,7 +108,7 @@ public struct IngestionDimensionSpecDimension: Codable, Hashable, Equatable { public let multiValueHandling: MultiValueHandlingOption? } -public struct IngestionDimensionSpecSpatialDimension: Codable, Hashable, Equatable { +public struct IngestionDimensionSpecSpatialDimension: Codable, Hashable, Equatable, Sendable { public init(dimName: String, dims: [String]? = nil) { self.dimName = dimName self.dims = dims diff --git a/Sources/DataTransferObjects/Druid/data/input/impl/TimestampSpec.swift b/Sources/DataTransferObjects/Druid/data/input/impl/TimestampSpec.swift index de18061..b03ed84 100644 --- a/Sources/DataTransferObjects/Druid/data/input/impl/TimestampSpec.swift +++ b/Sources/DataTransferObjects/Druid/data/input/impl/TimestampSpec.swift @@ -9,7 +9,7 @@ /// Treat __time as a millisecond timestamp: the number of milliseconds since Jan 1, 1970 at midnight UTC. /// /// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/data/input/impl/TimestampSpec.java -public struct TimestampSpec: Codable, Hashable, Equatable { +public struct TimestampSpec: Codable, Hashable, Equatable, Sendable { public init( column: String? = nil, format: TimestampSpec.TimestampSpecFormat? = nil, @@ -20,7 +20,7 @@ public struct TimestampSpec: Codable, Hashable, Equatable { self.missingValue = missingValue } - public enum TimestampSpecFormat: String, Codable, CaseIterable { + public enum TimestampSpecFormat: String, Codable, CaseIterable, Sendable { case iso case posix case millis diff --git a/Sources/DataTransferObjects/Druid/indexer/granularity/GranularitySpec.swift b/Sources/DataTransferObjects/Druid/indexer/granularity/GranularitySpec.swift index 80008cd..be20265 100644 --- a/Sources/DataTransferObjects/Druid/indexer/granularity/GranularitySpec.swift +++ b/Sources/DataTransferObjects/Druid/indexer/granularity/GranularitySpec.swift @@ -1,6 +1,6 @@ /// https://druid.apache.org/docs/latest/ingestion/ingestion-spec/#granularityspec /// https://github.com/apache/druid/blob/master/processing/src/main/java/org/apache/druid/indexer/granularity/GranularitySpec.java -public struct GranularitySpec: Codable, Hashable, Equatable { +public struct GranularitySpec: Codable, Hashable, Equatable, Sendable { public init( type: GranularitySpec.GranularitySpecType? = nil, segmentGranularity: QueryGranularity? = nil, @@ -15,7 +15,7 @@ public struct GranularitySpec: Codable, Hashable, Equatable { self.intervals = intervals } - public enum GranularitySpecType: String, Codable, CaseIterable { + public enum GranularitySpecType: String, Codable, CaseIterable, Sendable { case uniform } diff --git a/Sources/DataTransferObjects/Druid/indexing/common/task/batch/parallel/ParallelIndexIOConfig.swift b/Sources/DataTransferObjects/Druid/indexing/common/task/batch/parallel/ParallelIndexIOConfig.swift index ac7a34f..05f3b61 100644 --- a/Sources/DataTransferObjects/Druid/indexing/common/task/batch/parallel/ParallelIndexIOConfig.swift +++ b/Sources/DataTransferObjects/Druid/indexing/common/task/batch/parallel/ParallelIndexIOConfig.swift @@ -1,6 +1,6 @@ /// https://druid.apache.org/docs/latest/ingestion/native-batch#ioconfig /// https://github.com/apache/druid/blob/master/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexIOConfig.java -public struct ParallelIndexIOConfig: Codable, Hashable, Equatable { +public struct ParallelIndexIOConfig: Codable, Hashable, Equatable, Sendable { public init(inputFormat: InputFormat?, inputSource: InputSource? = nil, appendToExisting: Bool? = nil, dropExisting: Bool? = nil) { self.inputFormat = inputFormat self.inputSource = inputSource diff --git a/Sources/DataTransferObjects/Druid/indexing/common/task/batch/parallel/ParallelIndexIngestionSpec.swift b/Sources/DataTransferObjects/Druid/indexing/common/task/batch/parallel/ParallelIndexIngestionSpec.swift index 179a388..39d13c9 100644 --- a/Sources/DataTransferObjects/Druid/indexing/common/task/batch/parallel/ParallelIndexIngestionSpec.swift +++ b/Sources/DataTransferObjects/Druid/indexing/common/task/batch/parallel/ParallelIndexIngestionSpec.swift @@ -1,5 +1,5 @@ /// The container object for the supervisor configuration. -public struct ParallelIndexIngestionSpec: Codable, Hashable, Equatable { +public struct ParallelIndexIngestionSpec: Codable, Hashable, Equatable, Sendable { public init(ioConfig: IoConfig? = nil, tuningConfig: TuningConfig? = nil, dataSchema: DataSchema? = nil) { self.ioConfig = ioConfig self.tuningConfig = tuningConfig diff --git a/Sources/DataTransferObjects/Druid/indexing/input/DruidInputSource.swift b/Sources/DataTransferObjects/Druid/indexing/input/DruidInputSource.swift index 7948ddd..312acc1 100644 --- a/Sources/DataTransferObjects/Druid/indexing/input/DruidInputSource.swift +++ b/Sources/DataTransferObjects/Druid/indexing/input/DruidInputSource.swift @@ -4,7 +4,7 @@ /// field needs to be specified in the ingestion spec when using this input source. /// https://github.com/apache/druid/blob/master/indexing-service/src/main/java/org/apache/druid/indexing/input/DruidInputSource.java /// -public struct DruidInputSource: Codable, Hashable, Equatable { +public struct DruidInputSource: Codable, Hashable, Equatable, Sendable { public init(dataSource: String, interval: QueryTimeInterval, filter: Filter? = nil) { self.dataSource = dataSource self.interval = interval diff --git a/Sources/DataTransferObjects/Druid/indexing/kinesis/KinesisIndexTaskIOConfig.swift b/Sources/DataTransferObjects/Druid/indexing/kinesis/KinesisIndexTaskIOConfig.swift index c4f079d..c8b0d5b 100644 --- a/Sources/DataTransferObjects/Druid/indexing/kinesis/KinesisIndexTaskIOConfig.swift +++ b/Sources/DataTransferObjects/Druid/indexing/kinesis/KinesisIndexTaskIOConfig.swift @@ -1,7 +1,7 @@ /// https://druid.apache.org/docs/latest/ingestion/supervisor/#io-configuration /// https://druid.apache.org/docs/latest/ingestion/kinesis-ingestion#io-configuration /// https://github.com/apache/druid/blob/master/extensions-core/kinesis-indexing-service/src/main/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskIOConfig.java -public struct KinesisIndexTaskIOConfig: Codable, Hashable, Equatable { +public struct KinesisIndexTaskIOConfig: Codable, Hashable, Equatable, Sendable { public init( stream: String, inputFormat: InputFormat, diff --git a/Sources/DataTransferObjects/Druid/ingestion/ingestion-spec/IndexSpec.swift b/Sources/DataTransferObjects/Druid/ingestion/ingestion-spec/IndexSpec.swift index abc7d49..19b19d9 100644 --- a/Sources/DataTransferObjects/Druid/ingestion/ingestion-spec/IndexSpec.swift +++ b/Sources/DataTransferObjects/Druid/ingestion/ingestion-spec/IndexSpec.swift @@ -1,7 +1,7 @@ /// Defines segment storage format options to use at indexing time /// /// https://druid.apache.org/docs/latest/ingestion/ingestion-spec/#indexspec -public struct IndexSpec: Codable, Hashable, Equatable { +public struct IndexSpec: Codable, Hashable, Equatable, Sendable { public init( bitmap: IndexSpecBitmap? = nil, dimensionCompression: IndexSpec.CompressionType? = nil, @@ -20,7 +20,7 @@ public struct IndexSpec: Codable, Hashable, Equatable { self.jsonCompression = jsonCompression } - public enum CompressionType: String, Codable, CaseIterable { + public enum CompressionType: String, Codable, CaseIterable, Sendable { case lz4 case lzf case zstd @@ -28,7 +28,7 @@ public struct IndexSpec: Codable, Hashable, Equatable { case none } - public enum LongEncodingType: String, Codable, CaseIterable { + public enum LongEncodingType: String, Codable, CaseIterable, Sendable { case longs case auto } @@ -42,12 +42,12 @@ public struct IndexSpec: Codable, Hashable, Equatable { public let jsonCompression: CompressionType? } -public struct IndexSpecBitmap: Codable, Hashable, Equatable { +public struct IndexSpecBitmap: Codable, Hashable, Equatable, Sendable { public init(type: IndexSpecBitmap.IndexSpecBitmapType) { self.type = type } - public enum IndexSpecBitmapType: String, Codable, CaseIterable { + public enum IndexSpecBitmapType: String, Codable, CaseIterable, Sendable { case roaring case concise } @@ -55,7 +55,7 @@ public struct IndexSpecBitmap: Codable, Hashable, Equatable { public let type: IndexSpecBitmapType } -public struct StringDictionaryEncoding: Codable, Hashable, Equatable { +public struct StringDictionaryEncoding: Codable, Hashable, Equatable, Sendable { public init( type: StringDictionaryEncoding.StringDictionaryEncodingType, bucketSize: Int? = nil, @@ -66,7 +66,7 @@ public struct StringDictionaryEncoding: Codable, Hashable, Equatable { self.formatVersion = formatVersion } - public enum StringDictionaryEncodingType: String, Codable, CaseIterable { + public enum StringDictionaryEncodingType: String, Codable, CaseIterable, Sendable { case utf8 case frontCoded } diff --git a/Sources/DataTransferObjects/Druid/ingestion/native-batch/PartitionsSpec.swift b/Sources/DataTransferObjects/Druid/ingestion/native-batch/PartitionsSpec.swift index 5a5394b..3f7fb04 100644 --- a/Sources/DataTransferObjects/Druid/ingestion/native-batch/PartitionsSpec.swift +++ b/Sources/DataTransferObjects/Druid/ingestion/native-batch/PartitionsSpec.swift @@ -9,7 +9,7 @@ /// For best-effort rollup, use dynamic. /// /// https://druid.apache.org/docs/latest/ingestion/native-batch/#partitionsspec -public indirect enum PartitionsSpec: Codable, Hashable, Equatable { +public indirect enum PartitionsSpec: Codable, Hashable, Equatable, Sendable { case dynamic(DynamicPartitionSpec) case hashed(HashedPartitionSpec) case singleDimension(SingleDimensionPartitionSpec) @@ -64,7 +64,7 @@ public indirect enum PartitionsSpec: Codable, Hashable, Equatable { /// /// Whenever the number of rows in the current segment exceeds maxRowsPerSegment. /// When the total number of rows in all segments across all time chunks reaches to maxTotalRows. At this point the task pushes all segments created so far to the deep storage and creates new ones. -public struct DynamicPartitionSpec: Codable, Hashable, Equatable { +public struct DynamicPartitionSpec: Codable, Hashable, Equatable, Sendable { public init(maxRowsPerSegment: Int? = nil, maxTotalRows: Int? = nil) { self.maxRowsPerSegment = maxRowsPerSegment self.maxTotalRows = maxTotalRows @@ -84,7 +84,7 @@ public struct DynamicPartitionSpec: Codable, Hashable, Equatable { /// In the partial segment generation phase, just like the Map phase in MapReduce, the Parallel task splits the input data based on the split hint spec and assigns each split to a worker task. Each worker task (type partial_index_generate) reads the assigned split, and partitions rows by the time chunk from segmentGranularity (primary partition key) in the granularitySpec and then by the hash value of partitionDimensions (secondary partition key) in the partitionsSpec. The partitioned data is stored in local storage of the middle Manager or the indexer. /// /// The partial segment merge phase is similar to the Reduce phase in MapReduce. The Parallel task spawns a new set of worker tasks (type partial_index_generic_merge) to merge the partitioned data created in the previous phase. Here, the partitioned data is shuffled based on the time chunk and the hash value of partitionDimensions to be merged; each worker task reads the data falling in the same time chunk and the same hash value from multiple Middle Manager/Indexer processes and merges them to create the final segments. Finally, they push the final segments to the deep storage at once. -public struct HashedPartitionSpec: Codable, Hashable, Equatable { +public struct HashedPartitionSpec: Codable, Hashable, Equatable, Sendable { public init( numShards: Int? = nil, targetRowsPerSegment: Int? = nil, @@ -114,7 +114,7 @@ public struct HashedPartitionSpec: Codable, Hashable, Equatable { /// When you use this technique to partition your data, segment sizes may be unequally distributed if the data in your partitionDimension is also unequally distributed. Therefore, to avoid imbalance in data layout, review the distribution of values in your source data before deciding on a partitioning strategy. /// /// Range partitioning is not possible on multi-value dimensions. If the provided partitionDimension is multi-value, your ingestion job will report an error. -public struct SingleDimensionPartitionSpec: Codable, Hashable, Equatable { +public struct SingleDimensionPartitionSpec: Codable, Hashable, Equatable, Sendable { public init( partitionDimension: String, targetRowsPerSegment: Int? = nil, @@ -145,7 +145,7 @@ public struct SingleDimensionPartitionSpec: Codable, Hashable, Equatable { /// Range partitioning has several benefits related to storage footprint and query performance. Multi-dimension range partitioning improves over single-dimension range partitioning by allowing Druid to distribute segment sizes more evenly, and to prune on more dimensions. /// /// Range partitioning is not possible on multi-value dimensions. If one of the provided partitionDimensions is multi-value, your ingestion job will report an error. -public struct RangePartitionSpec: Codable, Hashable, Equatable { +public struct RangePartitionSpec: Codable, Hashable, Equatable, Sendable { public init( partitionDimensions: [String], targetRowsPerSegment: Int? = nil, diff --git a/Sources/DataTransferObjects/Druid/ingestion/native-batch/SplitHintSpec.swift b/Sources/DataTransferObjects/Druid/ingestion/native-batch/SplitHintSpec.swift index 3ec50b6..126cf10 100644 --- a/Sources/DataTransferObjects/Druid/ingestion/native-batch/SplitHintSpec.swift +++ b/Sources/DataTransferObjects/Druid/ingestion/native-batch/SplitHintSpec.swift @@ -1,6 +1,6 @@ /// The split hint spec is used to help the supervisor task divide input sources. Each worker task processes a single input division. You can control the amount of data each worker task reads during the first phase. /// /// https://druid.apache.org/docs/latest/ingestion/native-batch/#split-hint-spec -public indirect enum SplitHintSpec: Codable, Hashable, Equatable { +public indirect enum SplitHintSpec: Codable, Hashable, Equatable, Sendable { case notImplemented } diff --git a/Sources/DataTransferObjects/Druid/ingestion/tasks/IndexParallelTaskSpec.swift b/Sources/DataTransferObjects/Druid/ingestion/tasks/IndexParallelTaskSpec.swift index 2966a95..00ff6d1 100644 --- a/Sources/DataTransferObjects/Druid/ingestion/tasks/IndexParallelTaskSpec.swift +++ b/Sources/DataTransferObjects/Druid/ingestion/tasks/IndexParallelTaskSpec.swift @@ -14,7 +14,7 @@ /// The detailed behavior of the parallel task is different depending on the partitionsSpec. See partitionsSpec for more details. /// /// https://druid.apache.org/docs/latest/ingestion/native-batch -public struct IndexParallelTaskSpec: Codable, Hashable, Equatable { +public struct IndexParallelTaskSpec: Codable, Hashable, Equatable, Sendable { public init(id: String? = nil, spec: ParallelIndexIngestionSpec) { self.id = id self.spec = spec diff --git a/Sources/DataTransferObjects/Druid/ingestion/tasks/TaskSpec.swift b/Sources/DataTransferObjects/Druid/ingestion/tasks/TaskSpec.swift index f140a84..109bb35 100644 --- a/Sources/DataTransferObjects/Druid/ingestion/tasks/TaskSpec.swift +++ b/Sources/DataTransferObjects/Druid/ingestion/tasks/TaskSpec.swift @@ -4,7 +4,7 @@ /// tasks are generally submitted for you by a supervisor. /// /// https://druid.apache.org/docs/latest/ingestion/tasks -public indirect enum TaskSpec: Codable, Hashable, Equatable { +public indirect enum TaskSpec: Codable, Hashable, Equatable, Sendable { case indexParallel(IndexParallelTaskSpec) enum CodingKeys: String, CodingKey { diff --git a/Sources/DataTransferObjects/Druid/segment/indexing/DataSchema.swift b/Sources/DataTransferObjects/Druid/segment/indexing/DataSchema.swift index 4314eb1..520f000 100644 --- a/Sources/DataTransferObjects/Druid/segment/indexing/DataSchema.swift +++ b/Sources/DataTransferObjects/Druid/segment/indexing/DataSchema.swift @@ -1,7 +1,7 @@ /// https://druid.apache.org/docs/latest/ingestion/ingestion-spec#dataschema /// /// https://github.com/apache/druid/blob/master/server/src/main/java/org/apache/druid/segment/indexing/DataSchema.java -public struct DataSchema: Codable, Hashable, Equatable { +public struct DataSchema: Codable, Hashable, Equatable, Sendable { public init( dataSource: String, timestampSpec: TimestampSpec? = nil, diff --git a/Sources/DataTransferObjects/Druid/segment/indexing/IoConfig.swift b/Sources/DataTransferObjects/Druid/segment/indexing/IoConfig.swift index 74451c3..bb74f4f 100644 --- a/Sources/DataTransferObjects/Druid/segment/indexing/IoConfig.swift +++ b/Sources/DataTransferObjects/Druid/segment/indexing/IoConfig.swift @@ -1,5 +1,5 @@ /// https://github.com/apache/druid/blob/master/server/src/main/java/org/apache/druid/segment/indexing/IOConfig.java -public indirect enum IoConfig: Codable, Hashable, Equatable { +public indirect enum IoConfig: Codable, Hashable, Equatable, Sendable { case kinesis(KinesisIndexTaskIOConfig) case indexParallel(ParallelIndexIOConfig) diff --git a/Sources/DataTransferObjects/Druid/segment/transform/TransformSpec.swift b/Sources/DataTransferObjects/Druid/segment/transform/TransformSpec.swift index d14893e..995e4f5 100644 --- a/Sources/DataTransferObjects/Druid/segment/transform/TransformSpec.swift +++ b/Sources/DataTransferObjects/Druid/segment/transform/TransformSpec.swift @@ -2,7 +2,7 @@ /// ingestion time. It is optional. /// /// https://druid.apache.org/docs/latest/ingestion/ingestion-spec/#transformspec -public struct TransformSpec: Codable, Hashable, Equatable { +public struct TransformSpec: Codable, Hashable, Equatable, Sendable { public init(transforms: [TransformSpecTransform]? = nil, filter: Filter? = nil) { self.transforms = transforms self.filter = filter @@ -12,7 +12,7 @@ public struct TransformSpec: Codable, Hashable, Equatable { public let filter: Filter? } -public struct TransformSpecTransform: Codable, Hashable, Equatable { +public struct TransformSpecTransform: Codable, Hashable, Equatable, Sendable { public init(type: String, name: String? = nil, expression: String? = nil) { self.type = type self.name = name diff --git a/Sources/DataTransferObjects/Query/Aggregator.swift b/Sources/DataTransferObjects/Query/Aggregator.swift index 2189164..487ad0e 100644 --- a/Sources/DataTransferObjects/Query/Aggregator.swift +++ b/Sources/DataTransferObjects/Query/Aggregator.swift @@ -5,7 +5,7 @@ import Foundation /// You can use aggregations at query time to summarize result data. /// /// https://druid.apache.org/docs/latest/querying/aggregations.html -public indirect enum Aggregator: Codable, Hashable, Equatable { +public indirect enum Aggregator: Codable, Hashable, Equatable, Sendable { // Convenience Aggregators /// Counts the number of unique users in a query. @@ -322,7 +322,7 @@ public indirect enum Aggregator: Codable, Hashable, Equatable { } } -public struct CountAggregator: Codable, Hashable { +public struct CountAggregator: Codable, Hashable, Sendable { public init(name: String) { type = .count self.name = name @@ -335,7 +335,7 @@ public struct CountAggregator: Codable, Hashable { } /// Calcluate the cardinality of a dimension (deprecated) -public struct CardinalityAggregator: Codable, Hashable { +public struct CardinalityAggregator: Codable, Hashable, Sendable { public init(name: String, fields: [String], byRow: Bool = false, round: Bool = true) { type = .cardinality self.name = name @@ -353,7 +353,7 @@ public struct CardinalityAggregator: Codable, Hashable { public let round: Bool } -public struct GenericAggregator: Codable, Hashable { +public struct GenericAggregator: Codable, Hashable, Sendable { public init(type: AggregatorType, name: String, fieldName: String) { self.type = type self.name = name @@ -369,7 +369,7 @@ public struct GenericAggregator: Codable, Hashable { public var fieldName: String } -public struct GenericTimeColumnAggregator: Codable, Hashable { +public struct GenericTimeColumnAggregator: Codable, Hashable, Sendable { public init(type: AggregatorType, name: String, fieldName: String, timeColumn: String? = nil) { self.type = type self.name = name @@ -389,7 +389,7 @@ public struct GenericTimeColumnAggregator: Codable, Hashable { public let timeColumn: String? } -public enum AggregatorType: String, Codable, Hashable { +public enum AggregatorType: String, Codable, Hashable, Sendable { // Convenience Aggregators case userCount case eventCount @@ -433,7 +433,7 @@ public enum AggregatorType: String, Codable, Hashable { /// queries, and use both results as part of post-aggregations. /// /// Note: If only the filtered results are required, consider putting the filter on the query itself, which will be much faster since it does not require scanning all the data. -public struct FilteredAggregator: Codable, Hashable { +public struct FilteredAggregator: Codable, Hashable, Sendable { public init(filter: Filter, aggregator: Aggregator, name: String? = nil) { type = .filtered self.filter = filter @@ -453,7 +453,7 @@ public struct FilteredAggregator: Codable, Hashable { /// DataSketches Theta Sketch Aggregator /// /// https://druid.apache.org/docs/latest/development/extensions-core/datasketches-theta/ -public struct ThetaSketchAggregator: Codable, Hashable { +public struct ThetaSketchAggregator: Codable, Hashable, Sendable { public init(name: String, fieldName: String, size: Int? = nil, shouldFinalize: Bool? = nil) { type = .thetaSketch self.name = name @@ -485,7 +485,7 @@ public struct ThetaSketchAggregator: Codable, Hashable { public let shouldFinalize: Bool? } -public struct QuantilesDoublesSketchAggregator: Codable, Hashable { +public struct QuantilesDoublesSketchAggregator: Codable, Hashable, Sendable { public init( name: String, fieldName: String, @@ -536,7 +536,7 @@ public struct QuantilesDoublesSketchAggregator: Codable, Hashable { /// Convenience Aggregator that counts the number of unique users in a query. /// /// Compiles to a theta sketch aggregator. -public struct UserCountAggregator: Codable, Hashable, PrecompilableAggregator { +public struct UserCountAggregator: Codable, Hashable, PrecompilableAggregator, Sendable { public init(name: String? = nil) { self.name = name } @@ -553,7 +553,7 @@ public struct UserCountAggregator: Codable, Hashable, PrecompilableAggregator { /// Convenience Aggregator that counts the number of unique events in a query. /// /// Compiles to a longSum aggregator. -public struct EventCountAggregator: Codable, Hashable, PrecompilableAggregator { +public struct EventCountAggregator: Codable, Hashable, PrecompilableAggregator, Sendable { public init(name: String? = nil) { self.name = name } @@ -568,7 +568,7 @@ public struct EventCountAggregator: Codable, Hashable, PrecompilableAggregator { } /// Convenience Aggregator that implements a histogram over floatValue using DataSketches Quantiles -public struct HistogramAggregator: Codable, Hashable, PrecompilableAggregator { +public struct HistogramAggregator: Codable, Hashable, PrecompilableAggregator, Sendable { public init(name: String? = nil, fieldName: String? = nil, splitPoints: [Double]? = nil, numBins: Int? = nil, k: Int? = nil) { self.name = name self.fieldName = fieldName diff --git a/Sources/DataTransferObjects/Query/BaseFilters.swift b/Sources/DataTransferObjects/Query/BaseFilters.swift index 84d4a4a..56f08a9 100644 --- a/Sources/DataTransferObjects/Query/BaseFilters.swift +++ b/Sources/DataTransferObjects/Query/BaseFilters.swift @@ -4,7 +4,7 @@ import Foundation /// /// The filters generated by this rule are combined with a query's `filter` property /// using an `and` filter. -public enum BaseFilters: String, Codable, Hashable, Equatable { +public enum BaseFilters: String, Codable, Hashable, Equatable, Sendable { /// Attach test mode filter and filter for all apps of the executing user's organization case thisOrganization diff --git a/Sources/DataTransferObjects/Query/CustomQuery.swift b/Sources/DataTransferObjects/Query/CustomQuery.swift index 8899f07..387c8d5 100644 --- a/Sources/DataTransferObjects/Query/CustomQuery.swift +++ b/Sources/DataTransferObjects/Query/CustomQuery.swift @@ -2,7 +2,7 @@ import Crypto import Foundation /// Custom JSON based query -public struct CustomQuery: Codable, Hashable, Equatable { +public struct CustomQuery: Codable, Hashable, Equatable, Sendable { public init( queryType: CustomQuery.QueryType, compilationStatus: CompilationStatus? = nil, @@ -135,7 +135,7 @@ public struct CustomQuery: Codable, Hashable, Equatable { self.successCriterion = successCriterion } - public enum QueryType: String, Codable, CaseIterable, Identifiable { + public enum QueryType: String, Codable, CaseIterable, Identifiable, Sendable { public var id: String { rawValue } case timeseries @@ -150,12 +150,12 @@ public struct CustomQuery: Codable, Hashable, Equatable { // case retention } - public enum Order: String, Codable, CaseIterable { + public enum Order: String, Codable, CaseIterable, Sendable { case ascending case descending } - public enum CompilationStatus: String, Codable, CaseIterable, Identifiable { + public enum CompilationStatus: String, Codable, CaseIterable, Identifiable, Sendable { public var id: String { rawValue } case notCompiled diff --git a/Sources/DataTransferObjects/Query/CustomSQLQuery.swift b/Sources/DataTransferObjects/Query/CustomSQLQuery.swift index cadbd50..183fdff 100644 --- a/Sources/DataTransferObjects/Query/CustomSQLQuery.swift +++ b/Sources/DataTransferObjects/Query/CustomSQLQuery.swift @@ -1,6 +1,6 @@ import Foundation -public struct CustomSQLQuery: Codable, Hashable { +public struct CustomSQLQuery: Codable, Hashable, Sendable { let query: String let context: QueryContext diff --git a/Sources/DataTransferObjects/Query/Datasource.swift b/Sources/DataTransferObjects/Query/Datasource.swift index 1581ce9..21fa442 100644 --- a/Sources/DataTransferObjects/Query/Datasource.swift +++ b/Sources/DataTransferObjects/Query/Datasource.swift @@ -1,6 +1,6 @@ import Foundation -public struct DataSource: Codable, Hashable, Equatable { +public struct DataSource: Codable, Hashable, Equatable, Sendable { public init(type: DataSource.DataSourceType, name: String) { self.type = type self.name = name @@ -11,7 +11,7 @@ public struct DataSource: Codable, Hashable, Equatable { self.name = name } - public enum DataSourceType: String, Codable, Hashable, Equatable { + public enum DataSourceType: String, Codable, Hashable, Equatable, Sendable { case table } diff --git a/Sources/DataTransferObjects/Query/DimensionSpec.swift b/Sources/DataTransferObjects/Query/DimensionSpec.swift index c5c9dbe..bb003f2 100644 --- a/Sources/DataTransferObjects/Query/DimensionSpec.swift +++ b/Sources/DataTransferObjects/Query/DimensionSpec.swift @@ -6,7 +6,7 @@ import Foundation /// /// If an etraction function is set, it returns dimension values transformed using the given /// extraction function. -public indirect enum DimensionSpec: Codable, Equatable, Hashable { +public indirect enum DimensionSpec: Codable, Equatable, Hashable, Sendable { case `default`(DefaultDimensionSpec) case extraction(ExtractionDimensionSpec) @@ -42,13 +42,13 @@ public indirect enum DimensionSpec: Codable, Equatable, Hashable { } } -public enum OutputType: String, Codable, Equatable, Hashable { +public enum OutputType: String, Codable, Equatable, Hashable, Sendable { case string = "STRING" case long = "LONG" case float = "FLOAT" } -public struct DefaultDimensionSpec: Codable, Equatable, Hashable { +public struct DefaultDimensionSpec: Codable, Equatable, Hashable, Sendable { public init(dimension: String, outputName: String, outputType: OutputType? = nil) { self.dimension = dimension self.outputName = outputName @@ -60,7 +60,7 @@ public struct DefaultDimensionSpec: Codable, Equatable, Hashable { public let outputType: OutputType? } -public struct ExtractionDimensionSpec: Codable, Equatable, Hashable { +public struct ExtractionDimensionSpec: Codable, Equatable, Hashable, Sendable { public init(dimension: String, outputName: String, outputType: OutputType? = nil, extractionFn: ExtractionFunction) { self.dimension = dimension self.outputName = outputName diff --git a/Sources/DataTransferObjects/Query/ExtractionFunction.swift b/Sources/DataTransferObjects/Query/ExtractionFunction.swift index c9dc634..2b28e4d 100644 --- a/Sources/DataTransferObjects/Query/ExtractionFunction.swift +++ b/Sources/DataTransferObjects/Query/ExtractionFunction.swift @@ -1,7 +1,7 @@ import Foundation /// Extraction functions define the transformation applied to each dimension value. -public indirect enum ExtractionFunction: Codable, Equatable, Hashable { +public indirect enum ExtractionFunction: Codable, Equatable, Hashable, Sendable { case regex(RegularExpressionExtractionFunction) case inlineLookup(InlineLookupExtractionFunction) case registeredLookup(RegisteredLookupExtractionFunction) @@ -45,7 +45,7 @@ public indirect enum ExtractionFunction: Codable, Equatable, Hashable { /// Returns the first matching group for the given regular expression. If there is no match, /// it returns the dimension value as is. -public struct RegularExpressionExtractionFunction: Codable, Equatable, Hashable { +public struct RegularExpressionExtractionFunction: Codable, Equatable, Hashable, Sendable { public init(expr: String, index: Int = 1, replaceMissingValue: Bool = false, replaceMissingValueWith: String? = nil) { self.expr = expr self.index = index @@ -81,7 +81,7 @@ public struct RegularExpressionExtractionFunction: Codable, Equatable, Hashable /// retainMissingValue = false which causes missing values to be treated as missing. /// /// It is illegal to set retainMissingValue = true and also specify a replaceMissingValueWith. -public struct InlineLookupExtractionFunction: Codable, Equatable, Hashable { +public struct InlineLookupExtractionFunction: Codable, Equatable, Hashable, Sendable { public init(lookupMap: [String: String], retainMissingValue: Bool = true, injective: Bool = true, replaceMissingValueWith: String? = nil) { lookup = Lookup(map: lookupMap) self.retainMissingValue = retainMissingValue @@ -89,7 +89,7 @@ public struct InlineLookupExtractionFunction: Codable, Equatable, Hashable { self.replaceMissingValueWith = replaceMissingValueWith } - public struct Lookup: Codable, Equatable, Hashable { + public struct Lookup: Codable, Equatable, Hashable, Sendable { public init(type: String = "map", map: [String: String]) { self.type = type self.map = map @@ -106,7 +106,7 @@ public struct InlineLookupExtractionFunction: Codable, Equatable, Hashable { } /// The "registeredLookup" extraction function lets you refer to a lookup that has been registered in the cluster-wide configuration. -public struct RegisteredLookupExtractionFunction: Codable, Equatable, Hashable { +public struct RegisteredLookupExtractionFunction: Codable, Equatable, Hashable, Sendable { public init(lookup: String, retainMissingValue: Bool) { self.lookup = lookup self.retainMissingValue = retainMissingValue diff --git a/Sources/DataTransferObjects/Query/Filter.swift b/Sources/DataTransferObjects/Query/Filter.swift index 7657c19..151646f 100644 --- a/Sources/DataTransferObjects/Query/Filter.swift +++ b/Sources/DataTransferObjects/Query/Filter.swift @@ -3,7 +3,7 @@ import Foundation /// The selector filter will match a specific dimension with a specific value. /// Selector filters can be used as the base filters for more complex Boolean /// expressions of filters. -public struct FilterSelector: Codable, Hashable, Equatable { +public struct FilterSelector: Codable, Hashable, Equatable, Sendable { public init(dimension: String, value: String) { self.dimension = dimension self.value = value @@ -15,7 +15,7 @@ public struct FilterSelector: Codable, Hashable, Equatable { /// The column comparison filter is similar to the selector filter, but instead /// compares dimensions to each other. -public struct FilterColumnComparison: Codable, Hashable, Equatable { +public struct FilterColumnComparison: Codable, Hashable, Equatable, Sendable { public init(dimensions: [String]) { self.dimensions = dimensions } @@ -32,7 +32,7 @@ public struct FilterColumnComparison: Codable, Hashable, Equatable { /// ranges and translates to an OR of Bound filters on those millisecond ranges, /// with numeric comparison. The Bound filters will have left-closed and /// right-open matching (i.e., start <= time < end). -public struct FilterInterval: Codable, Hashable, Equatable { +public struct FilterInterval: Codable, Hashable, Equatable, Sendable { public init(dimension: String, intervals: [QueryTimeInterval]? = nil, relativeIntervals: [RelativeTimeInterval]? = nil) { self.dimension = dimension self.intervals = intervals @@ -49,7 +49,7 @@ public struct FilterInterval: Codable, Hashable, Equatable { /// pattern can be any standard Java regular expression. /// /// @see http://docs.oracle.com/javase/6/docs/api/java/util/regex/Pattern.html -public struct FilterRegex: Codable, Hashable, Equatable { +public struct FilterRegex: Codable, Hashable, Equatable, Sendable { public init(dimension: String, pattern: String) { self.dimension = dimension self.pattern = pattern @@ -62,7 +62,7 @@ public struct FilterRegex: Codable, Hashable, Equatable { // The Range Filter can be used to filter on ranges of dimension values. It can be // used for comparison filtering like greater than, less than, greater than or equal // to, less than or equal to, and "between" -public struct FilterRange: Codable, Hashable, Equatable { +public struct FilterRange: Codable, Hashable, Equatable, Sendable { public init( column: String, matchValueType: FilterRange.MatchValueType, @@ -79,7 +79,7 @@ public struct FilterRange: Codable, Hashable, Equatable { self.upperOpen = upperOpen } - public enum MatchValueType: String, Codable, Hashable, Equatable { + public enum MatchValueType: String, Codable, Hashable, Equatable, Sendable { case String = "STRING" case Double = "DOUBLE" } @@ -113,7 +113,7 @@ public struct FilterRange: Codable, Hashable, Equatable { } // logical expression filters -public struct FilterExpression: Codable, Hashable, Equatable { +public struct FilterExpression: Codable, Hashable, Equatable, Sendable { public init(fields: [Filter]) { self.fields = fields } @@ -121,7 +121,7 @@ public struct FilterExpression: Codable, Hashable, Equatable { public let fields: [Filter] } -public struct FilterNot: Codable, Hashable, Equatable { +public struct FilterNot: Codable, Hashable, Equatable, Sendable { public init(field: Filter) { self.field = field } @@ -131,7 +131,7 @@ public struct FilterNot: Codable, Hashable, Equatable { /// A filter is a JSON object indicating which rows of data should be included in the computation /// for a query. It’s essentially the equivalent of the WHERE clause in SQL. -public indirect enum Filter: Codable, Hashable, Equatable { +public indirect enum Filter: Codable, Hashable, Equatable, Sendable { /// The selector filter will match a specific dimension with a specific value. /// Selector filters can be used as the base filters for more complex Boolean /// expressions of filters. diff --git a/Sources/DataTransferObjects/Query/HavingSpec.swift b/Sources/DataTransferObjects/Query/HavingSpec.swift index fab8dcc..cd0e0ed 100644 --- a/Sources/DataTransferObjects/Query/HavingSpec.swift +++ b/Sources/DataTransferObjects/Query/HavingSpec.swift @@ -5,7 +5,7 @@ import Foundation /// It is essentially the equivalent of the HAVING clause in SQL. /// /// The simplest having clause is a numeric filter. Numeric filters can be used as the base filters for more complex boolean expressions of filters. -public indirect enum HavingSpec: Codable, Hashable, Equatable { +public indirect enum HavingSpec: Codable, Hashable, Equatable, Sendable { /// Query filter HavingSpecs allow all query filters to be used in the Having part of the query. case filter(HavingFilter) @@ -93,7 +93,7 @@ public indirect enum HavingSpec: Codable, Hashable, Equatable { } } -public struct HavingFilter: Codable, Hashable, Equatable { +public struct HavingFilter: Codable, Hashable, Equatable, Sendable { public init(filter: Filter) { self.filter = filter } @@ -101,7 +101,7 @@ public struct HavingFilter: Codable, Hashable, Equatable { public let filter: Filter } -public struct HavingEqualTo: Codable, Hashable, Equatable { +public struct HavingEqualTo: Codable, Hashable, Equatable, Sendable { public init(aggregation: String, value: Double) { self.aggregation = aggregation self.value = value @@ -111,7 +111,7 @@ public struct HavingEqualTo: Codable, Hashable, Equatable { public let value: Double } -public struct HavingGreaterThan: Codable, Hashable, Equatable { +public struct HavingGreaterThan: Codable, Hashable, Equatable, Sendable { public init(aggregation: String, value: Double) { self.aggregation = aggregation self.value = value @@ -121,7 +121,7 @@ public struct HavingGreaterThan: Codable, Hashable, Equatable { public let value: Double } -public struct HavingLessThan: Codable, Hashable, Equatable { +public struct HavingLessThan: Codable, Hashable, Equatable, Sendable { public init(aggregation: String, value: Double) { self.aggregation = aggregation self.value = value @@ -131,7 +131,7 @@ public struct HavingLessThan: Codable, Hashable, Equatable { public let value: Double } -public struct HavingDimensionSelector: Codable, Hashable, Equatable { +public struct HavingDimensionSelector: Codable, Hashable, Equatable, Sendable { public init(dimension: String, value: String) { self.dimension = dimension self.value = value @@ -141,7 +141,7 @@ public struct HavingDimensionSelector: Codable, Hashable, Equatable { public let value: String } -public struct HavingAnd: Codable, Hashable, Equatable { +public struct HavingAnd: Codable, Hashable, Equatable, Sendable { public init(havingSpecs: [HavingSpec]) { self.havingSpecs = havingSpecs } @@ -149,7 +149,7 @@ public struct HavingAnd: Codable, Hashable, Equatable { public let havingSpecs: [HavingSpec] } -public struct HavingOr: Codable, Hashable, Equatable { +public struct HavingOr: Codable, Hashable, Equatable, Sendable { public init(havingSpecs: [HavingSpec]) { self.havingSpecs = havingSpecs } @@ -157,7 +157,7 @@ public struct HavingOr: Codable, Hashable, Equatable { public let havingSpecs: [HavingSpec] } -public struct HavingNot: Codable, Hashable, Equatable { +public struct HavingNot: Codable, Hashable, Equatable, Sendable { public init(havingSpec: HavingSpec) { self.havingSpec = havingSpec } diff --git a/Sources/DataTransferObjects/Query/NamedFilter.swift b/Sources/DataTransferObjects/Query/NamedFilter.swift index 377884c..8d7f6df 100644 --- a/Sources/DataTransferObjects/Query/NamedFilter.swift +++ b/Sources/DataTransferObjects/Query/NamedFilter.swift @@ -4,7 +4,7 @@ import Foundation /// /// Used in e.g. funnel steps and A/B test experiments or in other /// places where a subset of a query needs to be named. -public struct NamedFilter: Codable, Hashable, Equatable { +public struct NamedFilter: Codable, Hashable, Equatable, Sendable { public init(filter: Filter? = nil, name: String) { self.filter = filter self.name = name diff --git a/Sources/DataTransferObjects/Query/PostAggregator.swift b/Sources/DataTransferObjects/Query/PostAggregator.swift index 32b1fcf..b6ce0b5 100644 --- a/Sources/DataTransferObjects/Query/PostAggregator.swift +++ b/Sources/DataTransferObjects/Query/PostAggregator.swift @@ -6,7 +6,7 @@ import Foundation /// If you include a post aggregation as part of a query, make sure to include all aggregators the post-aggregator requires. /// /// https://druid.apache.org/docs/latest/querying/post-aggregations.html -public indirect enum PostAggregator: Codable, Hashable, Equatable { +public indirect enum PostAggregator: Codable, Hashable, Equatable, Sendable { // Included case arithmetic(ArithmetricPostAggregator) case fieldAccess(FieldAccessPostAggregator) @@ -172,7 +172,7 @@ public indirect enum PostAggregator: Codable, Hashable, Equatable { } } -public enum PostAggregatorType: String, Codable, Hashable { +public enum PostAggregatorType: String, Codable, Hashable, Sendable { case arithmetic case fieldAccess case finalizingFieldAccess @@ -196,7 +196,7 @@ public enum PostAggregatorType: String, Codable, Hashable { case pvalue2tailedZtest } -public enum PostAggregatorOrdering: String, Codable, Hashable { +public enum PostAggregatorOrdering: String, Codable, Hashable, Sendable { case numericFirst } @@ -217,7 +217,7 @@ public enum PostAggregatorOrdering: String, Codable, Hashable { /// /// - If no ordering (or null) is specified, the default floating point ordering is used. /// - numericFirst ordering always returns finite values first, followed by NaN, and infinite values last. -public struct ArithmetricPostAggregator: Codable, Hashable { +public struct ArithmetricPostAggregator: Codable, Hashable, Sendable { public init(name: String, function: MathematicalFunction, fields: [PostAggregator], ordering: PostAggregatorOrdering? = nil) { type = .arithmetic self.name = name @@ -226,7 +226,7 @@ public struct ArithmetricPostAggregator: Codable, Hashable { self.ordering = ordering } - public enum MathematicalFunction: String, Codable, Hashable { + public enum MathematicalFunction: String, Codable, Hashable, Sendable { case addition = "+" case subtraction = "-" case multiplication = "*" @@ -253,7 +253,7 @@ public struct ArithmetricPostAggregator: Codable, Hashable { /// fieldName refers to the output name of the aggregator given in the aggregations portion of the query. For complex aggregators, like "cardinality" and /// "hyperUnique", the type of the post-aggregator determines what the post-aggregator will return. Use type "fieldAccess" to return the raw aggregation /// object, or use type "finalizingFieldAccess" to return a finalized value, such as an estimated cardinality. -public struct FieldAccessPostAggregator: Codable, Hashable { +public struct FieldAccessPostAggregator: Codable, Hashable, Sendable { public init(type: PostAggregatorType, name: String? = nil, fieldName: String) { self.type = type self.name = name @@ -270,7 +270,7 @@ public struct FieldAccessPostAggregator: Codable, Hashable { } /// The constant post-aggregator always returns the specified value. -public struct ConstantPostAggregator: Codable, Hashable { +public struct ConstantPostAggregator: Codable, Hashable, Sendable { public init(name: String, value: Double) { type = .constant self.name = name @@ -286,7 +286,7 @@ public struct ConstantPostAggregator: Codable, Hashable { public let value: Double } -public struct GreatestLeastPostAggregator: Codable, Hashable { +public struct GreatestLeastPostAggregator: Codable, Hashable, Sendable { public init(type: PostAggregatorType, name: String, fields: [PostAggregator]) { self.type = type self.name = name @@ -303,7 +303,7 @@ public struct GreatestLeastPostAggregator: Codable, Hashable { /// The expression post-aggregator is defined using a Druid expression. /// see https://druid.apache.org/docs/latest/misc/math-expr.html -public struct ExpressionPostAggregator: Codable, Hashable { +public struct ExpressionPostAggregator: Codable, Hashable, Sendable { public init(name: String, expression: String, ordering: PostAggregatorOrdering? = nil) { type = .expression self.name = name @@ -322,7 +322,7 @@ public struct ExpressionPostAggregator: Codable, Hashable { } /// The hyperUniqueCardinality post aggregator is used to wrap a hyperUnique object such that it can be used in post aggregations. -public struct HyperUniqueCardinalityPostAggregator: Codable, Hashable { +public struct HyperUniqueCardinalityPostAggregator: Codable, Hashable, Sendable { public init(name: String? = nil, fieldName: String) { type = .hyperUniqueCardinality self.name = name @@ -335,7 +335,7 @@ public struct HyperUniqueCardinalityPostAggregator: Codable, Hashable { } /// "field" : -public struct ThetaSketchEstimatePostAggregator: Codable, Hashable { +public struct ThetaSketchEstimatePostAggregator: Codable, Hashable, Sendable { public init(name: String? = nil, field: PostAggregator) { type = .thetaSketchEstimate self.name = name @@ -347,7 +347,7 @@ public struct ThetaSketchEstimatePostAggregator: Codable, Hashable { public let field: PostAggregator } -public struct ThetaSketchSetOpPostAggregator: Codable, Hashable { +public struct ThetaSketchSetOpPostAggregator: Codable, Hashable, Sendable { public init(name: String? = nil, func: ThetaSketchSetOpPostAggregator.SketchOperation, fields: [PostAggregator]) { type = .thetaSketchSetOp self.name = name @@ -355,7 +355,7 @@ public struct ThetaSketchSetOpPostAggregator: Codable, Hashable { self.fields = fields } - public enum SketchOperation: String, Codable, Hashable { + public enum SketchOperation: String, Codable, Hashable, Sendable { case union = "UNION" case intersect = "INTERSECT" case not = "NOT" @@ -376,7 +376,7 @@ public struct ThetaSketchSetOpPostAggregator: Codable, Hashable { /// Highly useful for A/B Tests and similar experiments. /// /// @see https://medium.com/paypal-tech/democratizing-experimentation-data-for-product-innovations-8b6e1cf40c27#DemocratizingExperimentationScience-Druid -public struct ZScore2SamplePostAggregator: Codable, Hashable { +public struct ZScore2SamplePostAggregator: Codable, Hashable, Sendable { public init(name: String, sample1Size: PostAggregator, successCount1: PostAggregator, sample2Size: PostAggregator, successCount2: PostAggregator) { type = .zscore2sample self.name = name @@ -400,7 +400,7 @@ public struct ZScore2SamplePostAggregator: Codable, Hashable { /// calculated using the zscore2sample post aggregator /// /// @see https://medium.com/paypal-tech/democratizing-experimentation-data-for-product-innovations-8b6e1cf40c27#DemocratizingExperimentationScience-Druid -public struct PValue2TailedZTestPostAggregator: Codable, Hashable { +public struct PValue2TailedZTestPostAggregator: Codable, Hashable, Sendable { public init(name: String, zScore: PostAggregator) { type = .pvalue2tailedZtest self.name = name @@ -416,7 +416,7 @@ public struct PValue2TailedZTestPostAggregator: Codable, Hashable { /// /// This returns an approximation to the value that would be preceded by a given fraction of a hypothetical sorted version of the input /// stream. -public struct QuantilesDoublesSketchToQuantilePostAggregator: Codable, Hashable { +public struct QuantilesDoublesSketchToQuantilePostAggregator: Codable, Hashable, Sendable { public init( name: String, field: PostAggregator, @@ -443,7 +443,7 @@ public struct QuantilesDoublesSketchToQuantilePostAggregator: Codable, Hashable /// Quantiles /// /// This returns an array of quantiles corresponding to a given array of fractions -public struct QuantilesDoublesSketchToQuantilesPostAggregator: Codable, Hashable { +public struct QuantilesDoublesSketchToQuantilesPostAggregator: Codable, Hashable, Sendable { public init( name: String, field: PostAggregator, @@ -474,7 +474,7 @@ public struct QuantilesDoublesSketchToQuantilesPostAggregator: Codable, Hashable /// intervals. The definition of an interval is inclusive of the left split point and exclusive of the right split point. If the /// number of bins is specified instead of split points, the interval between the minimum and maximum values is divided into the /// given number of equally-spaced bins. -public struct QuantilesDoublesSketchToHistogramPostAggregator: Codable, Hashable { +public struct QuantilesDoublesSketchToHistogramPostAggregator: Codable, Hashable, Sendable { public init( name: String, field: PostAggregator, @@ -506,7 +506,7 @@ public struct QuantilesDoublesSketchToHistogramPostAggregator: Codable, Hashable /// Rank /// /// This returns an approximation to the rank of a given value that is the fraction of the distribution less than that value. -public struct QuantilesDoublesSketchToRankPostAggregator: Codable, Hashable { +public struct QuantilesDoublesSketchToRankPostAggregator: Codable, Hashable, Sendable { public init( name: String, field: PostAggregator, @@ -536,7 +536,7 @@ public struct QuantilesDoublesSketchToRankPostAggregator: Codable, Hashable { /// bins. An array of m unique, monotonically increasing split points divide the real number line into m+1 consecutive disjoint /// intervals. The definition of an interval is inclusive of the left split point and exclusive of the right split point. The resulting /// array of fractions can be viewed as ranks of each split point with one additional rank that is always 1. -public struct QuantilesDoublesSketchToCDFPostAggregator: Codable, Hashable { +public struct QuantilesDoublesSketchToCDFPostAggregator: Codable, Hashable, Sendable { public init( name: String, field: PostAggregator, @@ -563,7 +563,7 @@ public struct QuantilesDoublesSketchToCDFPostAggregator: Codable, Hashable { /// Sketch Summary /// /// This returns a summary of the sketch that can be used for debugging. This is the result of calling toString() method. -public struct QuantilesDoublesSketchToStringPostAggregator: Codable, Hashable { +public struct QuantilesDoublesSketchToStringPostAggregator: Codable, Hashable, Sendable { public init( name: String, field: PostAggregator diff --git a/Sources/DataTransferObjects/Query/QueryContext.swift b/Sources/DataTransferObjects/Query/QueryContext.swift index c836083..1f203fd 100644 --- a/Sources/DataTransferObjects/Query/QueryContext.swift +++ b/Sources/DataTransferObjects/Query/QueryContext.swift @@ -1,6 +1,6 @@ import Foundation -public struct QueryContext: Codable, Hashable { +public struct QueryContext: Codable, Hashable, Sendable { /// Query timeout in millis, beyond which unfinished queries will be cancelled. 0 timeout means no timeout. public var timeout: String? diff --git a/Sources/DataTransferObjects/Query/QueryGranularity.swift b/Sources/DataTransferObjects/Query/QueryGranularity.swift index cb571d8..bf8d709 100644 --- a/Sources/DataTransferObjects/Query/QueryGranularity.swift +++ b/Sources/DataTransferObjects/Query/QueryGranularity.swift @@ -1,4 +1,4 @@ -public enum QueryGranularity: String, Codable, Hashable, CaseIterable { +public enum QueryGranularity: String, Codable, Hashable, CaseIterable, Sendable { case all case none case second diff --git a/Sources/DataTransferObjects/Query/QueryTimeInterval.swift b/Sources/DataTransferObjects/Query/QueryTimeInterval.swift index 96fef87..60ffccd 100644 --- a/Sources/DataTransferObjects/Query/QueryTimeInterval.swift +++ b/Sources/DataTransferObjects/Query/QueryTimeInterval.swift @@ -1,8 +1,8 @@ import DateOperations import Foundation -public struct QueryTimeIntervalsContainer: Codable, Hashable, Equatable { - public enum ContainerType: String, Codable, Hashable, Equatable { +public struct QueryTimeIntervalsContainer: Codable, Hashable, Equatable, Sendable { + public enum ContainerType: String, Codable, Hashable, Equatable, Sendable { case intervals } @@ -10,7 +10,7 @@ public struct QueryTimeIntervalsContainer: Codable, Hashable, Equatable { public let intervals: [QueryTimeInterval] } -public struct QueryTimeInterval: Codable, Hashable, Equatable, Comparable { +public struct QueryTimeInterval: Codable, Hashable, Equatable, Comparable, Sendable { public let beginningDate: Date public let endDate: Date diff --git a/Sources/DataTransferObjects/Query/RelativeTimeInterval.swift b/Sources/DataTransferObjects/Query/RelativeTimeInterval.swift index 0a6403a..c3fd83c 100644 --- a/Sources/DataTransferObjects/Query/RelativeTimeInterval.swift +++ b/Sources/DataTransferObjects/Query/RelativeTimeInterval.swift @@ -1,7 +1,7 @@ import DateOperations import Foundation -public struct RelativeTimeInterval: Codable, Hashable, Equatable { +public struct RelativeTimeInterval: Codable, Hashable, Equatable, Sendable { public init(beginningDate: RelativeDate, endDate: RelativeDate) { self.beginningDate = beginningDate self.endDate = endDate @@ -11,8 +11,8 @@ public struct RelativeTimeInterval: Codable, Hashable, Equatable { public let endDate: RelativeDate } -public struct RelativeDate: Codable, Hashable, Equatable { - public enum RelativeDateComponent: String, Codable, Hashable, Equatable { +public struct RelativeDate: Codable, Hashable, Equatable, Sendable { + public enum RelativeDateComponent: String, Codable, Hashable, Equatable, Sendable { case hour case day case week @@ -38,7 +38,7 @@ public struct RelativeDate: Codable, Hashable, Equatable { } } - public enum ComponentPosition: String, Codable, Hashable, Equatable { + public enum ComponentPosition: String, Codable, Hashable, Equatable, Sendable { /// Generate a date at the first possible moment in the specified component case beginning diff --git a/Sources/DataTransferObjects/Query/StringComparators.swift b/Sources/DataTransferObjects/Query/StringComparators.swift index 6bbbe42..9d3f3df 100644 --- a/Sources/DataTransferObjects/Query/StringComparators.swift +++ b/Sources/DataTransferObjects/Query/StringComparators.swift @@ -1,6 +1,6 @@ import Foundation -public enum StringComparators: String, Codable, Equatable, Hashable { +public enum StringComparators: String, Codable, Equatable, Hashable, Sendable { /// Sorts values by converting Strings to their UTF-8 byte array representations and comparing lexicographically, byte-by-byte. case lexicographic diff --git a/Sources/DataTransferObjects/Query/TopNMetricSpec.swift b/Sources/DataTransferObjects/Query/TopNMetricSpec.swift index e38613c..bfce882 100644 --- a/Sources/DataTransferObjects/Query/TopNMetricSpec.swift +++ b/Sources/DataTransferObjects/Query/TopNMetricSpec.swift @@ -1,7 +1,7 @@ import Foundation /// Specifies how topN values should be sorted. -public indirect enum TopNMetricSpec: Codable, Equatable, Hashable { +public indirect enum TopNMetricSpec: Codable, Equatable, Hashable, Sendable { case numeric(NumericTopNMetricSpec) case dimension(DimensionTopNMetricSpec) case inverted(InvertedTopNMetricSpec) @@ -44,7 +44,7 @@ public indirect enum TopNMetricSpec: Codable, Equatable, Hashable { } /// The simplest metric specification is a String value indicating the metric to sort topN results by -public struct NumericTopNMetricSpec: Codable, Equatable, Hashable { +public struct NumericTopNMetricSpec: Codable, Equatable, Hashable, Sendable { public init(metric: String) { self.metric = metric } @@ -54,7 +54,7 @@ public struct NumericTopNMetricSpec: Codable, Equatable, Hashable { } /// This metric specification sorts TopN results by dimension value -public struct DimensionTopNMetricSpec: Codable, Equatable, Hashable { +public struct DimensionTopNMetricSpec: Codable, Equatable, Hashable, Sendable { public init(ordering: StringComparators, previousStop: String? = nil) { self.ordering = ordering self.previousStop = previousStop @@ -67,7 +67,7 @@ public struct DimensionTopNMetricSpec: Codable, Equatable, Hashable { } /// Sort dimension values in inverted order, i.e inverts the order of the delegate metric spec. It can be used to sort the values in ascending order. -public struct InvertedTopNMetricSpec: Codable, Equatable, Hashable { +public struct InvertedTopNMetricSpec: Codable, Equatable, Hashable, Sendable { public init(metric: TopNMetricSpec) { self.metric = metric } diff --git a/Sources/DataTransferObjects/Query/ValueFormatter.swift b/Sources/DataTransferObjects/Query/ValueFormatter.swift index 44de761..51ec42e 100644 --- a/Sources/DataTransferObjects/Query/ValueFormatter.swift +++ b/Sources/DataTransferObjects/Query/ValueFormatter.swift @@ -1,10 +1,10 @@ -public struct ValueFormatter: Codable, Hashable, Equatable { +public struct ValueFormatter: Codable, Hashable, Equatable, Sendable { public let locale: String? public let options: ValueFormatterOptions? } /// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/NumberFormat/NumberFormat#currencydisplay -public struct ValueFormatterOptions: Codable, Hashable, Equatable { +public struct ValueFormatterOptions: Codable, Hashable, Equatable, Sendable { public let style: ValueFormatterStyle? public let currency: String? @@ -30,38 +30,38 @@ public struct ValueFormatterOptions: Codable, Hashable, Equatable { public let signDisplay: ValueFormatterSignDisplay? } -public enum ValueFormatterStyle: String, Codable, Hashable, Equatable { +public enum ValueFormatterStyle: String, Codable, Hashable, Equatable, Sendable { case decimal case currency case percent case unit } -public enum ValueFormatterCurrencyDisplay: String, Codable, Hashable, Equatable { +public enum ValueFormatterCurrencyDisplay: String, Codable, Hashable, Equatable, Sendable { case code case symbol case narrowSymbol case name } -public enum ValueFormatterCurrencySign: String, Codable, Hashable, Equatable { +public enum ValueFormatterCurrencySign: String, Codable, Hashable, Equatable, Sendable { case standard case accounting } -public enum ValueFormatterUnitDisplay: String, Codable, Hashable, Equatable { +public enum ValueFormatterUnitDisplay: String, Codable, Hashable, Equatable, Sendable { case short case narrow case long } -public enum ValueFormatterRoundingPriority: String, Codable, Hashable, Equatable { +public enum ValueFormatterRoundingPriority: String, Codable, Hashable, Equatable, Sendable { case auto case morePrecision case lessPrecision } -public enum ValueFormatterRoundingMode: Codable, Hashable, Equatable { +public enum ValueFormatterRoundingMode: Codable, Hashable, Equatable, Sendable { case ceil case floor case expand @@ -73,24 +73,24 @@ public enum ValueFormatterRoundingMode: Codable, Hashable, Equatable { case halfEven } -public enum ValueFormatterTrailingZeroDisplay: String, Codable, Hashable, Equatable { +public enum ValueFormatterTrailingZeroDisplay: String, Codable, Hashable, Equatable, Sendable { case auto case stripIfInteger } -public enum ValueFormatterNotation: String, Codable, Hashable, Equatable { +public enum ValueFormatterNotation: String, Codable, Hashable, Equatable, Sendable { case standard case scientific case engineering case compact } -public enum ValueFormatterCompactDisplay: String, Codable, Hashable, Equatable { +public enum ValueFormatterCompactDisplay: String, Codable, Hashable, Equatable, Sendable { case short case long } -public enum ValueFormatterUseGrouping: String, Codable, Hashable, Equatable { +public enum ValueFormatterUseGrouping: String, Codable, Hashable, Equatable, Sendable { case always case auto case min2 @@ -98,7 +98,7 @@ public enum ValueFormatterUseGrouping: String, Codable, Hashable, Equatable { case `false` } -public enum ValueFormatterSignDisplay: String, Codable, Hashable, Equatable { +public enum ValueFormatterSignDisplay: String, Codable, Hashable, Equatable, Sendable { case auto case always case exceptZero diff --git a/Sources/DataTransferObjects/Query/Virtual Column/ExpressionVirtualColumn.swift b/Sources/DataTransferObjects/Query/Virtual Column/ExpressionVirtualColumn.swift index 179b76d..c071da5 100644 --- a/Sources/DataTransferObjects/Query/Virtual Column/ExpressionVirtualColumn.swift +++ b/Sources/DataTransferObjects/Query/Virtual Column/ExpressionVirtualColumn.swift @@ -1,7 +1,7 @@ /// Expression virtual columns use Druid's native expression system to allow defining query time transforms of inputs from one or more columns. /// /// https://druid.apache.org/docs/latest/querying/math-expr -public struct ExpressionVirtualColumn: Codable, Hashable, Equatable { +public struct ExpressionVirtualColumn: Codable, Hashable, Equatable, Sendable { public init(name: String, expression: String, outputType: String? = nil) { self.name = name self.expression = expression diff --git a/Sources/DataTransferObjects/Query/Virtual Column/ListFilteredVirtualColumn.swift b/Sources/DataTransferObjects/Query/Virtual Column/ListFilteredVirtualColumn.swift index c2602ef..b06a13f 100644 --- a/Sources/DataTransferObjects/Query/Virtual Column/ListFilteredVirtualColumn.swift +++ b/Sources/DataTransferObjects/Query/Virtual Column/ListFilteredVirtualColumn.swift @@ -1,6 +1,6 @@ /// This virtual column provides an alternative way to use 'list filtered' dimension spec as a virtual column. It has optimized access /// to the underlying column value indexes that can provide a small performance improvement in some cases. -public struct ListFilteredVirtualColumn: Codable, Hashable, Equatable { +public struct ListFilteredVirtualColumn: Codable, Hashable, Equatable, Sendable { public init(name: String, delegate: String, values: [String], isAllowList: Bool? = nil) { self.name = name self.delegate = delegate diff --git a/Sources/DataTransferObjects/Query/Virtual Column/VirtualColumn.swift b/Sources/DataTransferObjects/Query/Virtual Column/VirtualColumn.swift index 99cdb1b..7d1a086 100644 --- a/Sources/DataTransferObjects/Query/Virtual Column/VirtualColumn.swift +++ b/Sources/DataTransferObjects/Query/Virtual Column/VirtualColumn.swift @@ -1,4 +1,4 @@ -public indirect enum VirtualColumn: Codable, Hashable, Equatable { +public indirect enum VirtualColumn: Codable, Hashable, Equatable, Sendable { case expression(ExpressionVirtualColumn) case listFiltered(ListFilteredVirtualColumn) diff --git a/Sources/DataTransferObjects/QueryResult/QueryResult.swift b/Sources/DataTransferObjects/QueryResult/QueryResult.swift index b5b06e8..0f677e5 100644 --- a/Sources/DataTransferObjects/QueryResult/QueryResult.swift +++ b/Sources/DataTransferObjects/QueryResult/QueryResult.swift @@ -1,6 +1,6 @@ import Foundation -public enum QueryResult: Codable, Hashable, Equatable { +public enum QueryResult: Codable, Hashable, Equatable, Sendable { case timeSeries(TimeSeriesQueryResult) case topN(TopNQueryResult) case groupBy(GroupByQueryResult) @@ -54,7 +54,7 @@ public enum QueryResult: Codable, Hashable, Equatable { } } -public struct TimeSeriesQueryResult: Codable, Hashable, Equatable { +public struct TimeSeriesQueryResult: Codable, Hashable, Equatable, Sendable { public init(rows: [TimeSeriesQueryResultRow], restrictions: [QueryTimeInterval]? = nil) { self.rows = rows self.restrictions = restrictions @@ -65,7 +65,7 @@ public struct TimeSeriesQueryResult: Codable, Hashable, Equatable { } /// Wrapper that can resolve either into a String or an Array of Strings -public enum StringWrapper: Codable, Hashable, Equatable { +public enum StringWrapper: Codable, Hashable, Equatable, Sendable { case single(String) case array([String]) @@ -110,7 +110,7 @@ public enum StringWrapper: Codable, Hashable, Equatable { } /// Wrapper that can resolve either into a Double or an Array of Doubles -public enum DoubleWrapper: Codable, Hashable, Equatable { +public enum DoubleWrapper: Codable, Hashable, Equatable, Sendable { case single(DoublePlusInfinity) case array([DoublePlusInfinity]) @@ -155,7 +155,7 @@ public enum DoubleWrapper: Codable, Hashable, Equatable { } /// Wrapper around the Double type that also accepts encoding and decoding as "Infinity" and "-Infinity" -public struct DoublePlusInfinity: Codable, Hashable, Equatable { +public struct DoublePlusInfinity: Codable, Hashable, Equatable, Sendable { public let value: Double public init(_ doubleValue: Double) { @@ -207,7 +207,7 @@ public struct DoublePlusInfinity: Codable, Hashable, Equatable { /// Time series queries return an array of JSON objects, where each object represents a value as described in the time-series query. /// For instance, the daily average of a dimension for the last one month. -public struct TimeSeriesQueryResultRow: Codable, Hashable, Equatable { +public struct TimeSeriesQueryResultRow: Codable, Hashable, Equatable, Sendable { public init(timestamp: Date, result: [String: DoubleWrapper]) { self.timestamp = timestamp self.result = result @@ -219,7 +219,7 @@ public struct TimeSeriesQueryResultRow: Codable, Hashable, Equatable { /// GroupBy queries return an array of JSON objects, where each object represents a grouping as described in the group-by query. /// For example, we can query for the daily average of a dimension for the past month grouped by another dimension. -public struct GroupByQueryResult: Codable, Hashable, Equatable { +public struct GroupByQueryResult: Codable, Hashable, Equatable, Sendable { public init(rows: [GroupByQueryResultRow], restrictions: [QueryTimeInterval]? = nil) { self.restrictions = restrictions self.rows = rows @@ -229,7 +229,7 @@ public struct GroupByQueryResult: Codable, Hashable, Equatable { public let rows: [GroupByQueryResultRow] } -public struct GroupByQueryResultRow: Codable, Hashable, Equatable { +public struct GroupByQueryResultRow: Codable, Hashable, Equatable, Sendable { public init(timestamp: Date, event: AdaptableQueryResultItem) { version = "v1" self.timestamp = timestamp @@ -246,7 +246,7 @@ public struct GroupByQueryResultRow: Codable, Hashable, Equatable { /// Conceptually, they can be thought of as an approximate GroupByQuery over a single dimension with an Ordering spec. /// TopNs are much faster and resource efficient than GroupBys for this use case. These types of queries take a topN query /// object and return an array of JSON objects where each object represents a value asked for by the topN query. -public struct TopNQueryResult: Codable, Hashable, Equatable { +public struct TopNQueryResult: Codable, Hashable, Equatable, Sendable { public init(rows: [TopNQueryResultRow], restrictions: [QueryTimeInterval]? = nil) { self.rows = rows self.restrictions = restrictions @@ -256,7 +256,7 @@ public struct TopNQueryResult: Codable, Hashable, Equatable { public let rows: [TopNQueryResultRow] } -public struct TopNQueryResultRow: Codable, Hashable, Equatable { +public struct TopNQueryResultRow: Codable, Hashable, Equatable, Sendable { public init(timestamp: Date, result: [AdaptableQueryResultItem]) { self.timestamp = timestamp self.result = result @@ -266,7 +266,7 @@ public struct TopNQueryResultRow: Codable, Hashable, Equatable { public let result: [AdaptableQueryResultItem] } -public struct ScanQueryResult: Codable, Hashable, Equatable { +public struct ScanQueryResult: Codable, Hashable, Equatable, Sendable { public init(rows: [ScanQueryResultRow], restrictions: [QueryTimeInterval]? = nil) { self.restrictions = restrictions self.rows = rows @@ -276,7 +276,7 @@ public struct ScanQueryResult: Codable, Hashable, Equatable { public let rows: [ScanQueryResultRow] } -public struct ScanQueryResultRow: Codable, Hashable, Equatable { +public struct ScanQueryResultRow: Codable, Hashable, Equatable, Sendable { public init( segmentId: String? = nil, columns: [String], @@ -295,7 +295,7 @@ public struct ScanQueryResultRow: Codable, Hashable, Equatable { public let rowSignature: [ScanQueryRowSignatureRow] } -public struct ScanQueryRowSignatureRow: Codable, Hashable, Equatable { +public struct ScanQueryRowSignatureRow: Codable, Hashable, Equatable, Sendable { public init(name: String, type: String) { self.name = name self.type = type @@ -306,7 +306,7 @@ public struct ScanQueryRowSignatureRow: Codable, Hashable, Equatable { } /// Represents a JSON object that can contain string values (dimensions), double values (dimensions) and null values. -public struct AdaptableQueryResultItem: Codable, Hashable, Equatable { +public struct AdaptableQueryResultItem: Codable, Hashable, Equatable, Sendable { public init(metrics: [String: DoubleWrapper], dimensions: [String: StringWrapper], nullValues: [String] = []) { self.metrics = metrics self.dimensions = dimensions @@ -366,7 +366,7 @@ public struct AdaptableQueryResultItem: Codable, Hashable, Equatable { } } -public struct TimeBoundaryResult: Codable, Hashable, Equatable { +public struct TimeBoundaryResult: Codable, Hashable, Equatable, Sendable { public init(rows: [TimeBoundaryResultRow], restrictions: [QueryTimeInterval]? = nil) { self.restrictions = restrictions self.rows = rows @@ -376,7 +376,7 @@ public struct TimeBoundaryResult: Codable, Hashable, Equatable { public let rows: [TimeBoundaryResultRow] } -public struct TimeBoundaryResultRow: Codable, Hashable, Equatable { +public struct TimeBoundaryResultRow: Codable, Hashable, Equatable, Sendable { public init(timestamp: Date, result: [String: Date]) { self.timestamp = timestamp self.result = result diff --git a/Sources/DataTransferObjects/QueryResult/QueryResultWrapper.swift b/Sources/DataTransferObjects/QueryResult/QueryResultWrapper.swift index 9a2e4f1..3efa463 100644 --- a/Sources/DataTransferObjects/QueryResult/QueryResultWrapper.swift +++ b/Sources/DataTransferObjects/QueryResult/QueryResultWrapper.swift @@ -1,6 +1,6 @@ import Foundation -public struct QueryResultWrapper: Codable, Hashable, Equatable { +public struct QueryResultWrapper: Codable, Hashable, Equatable, Sendable { public let result: QueryResult? public let error: String? diff --git a/Sources/DataTransferObjects/Supervisor/IdleConfig.swift b/Sources/DataTransferObjects/Supervisor/IdleConfig.swift index 2931129..410a117 100644 --- a/Sources/DataTransferObjects/Supervisor/IdleConfig.swift +++ b/Sources/DataTransferObjects/Supervisor/IdleConfig.swift @@ -1,5 +1,5 @@ /// When the supervisor enters the idle state, no new tasks are launched subsequent to the completion of the currently executing tasks. This strategy may lead to reduced costs for cluster operators while using topics that get sporadic data. Idle state transitioning is currently designated as experimental. -public struct IdleConfig: Codable, Hashable, Equatable { +public struct IdleConfig: Codable, Hashable, Equatable, Sendable { public init(enabled: Bool? = nil, inactiveAfterMillis: Int? = nil) { self.enabled = enabled self.inactiveAfterMillis = inactiveAfterMillis diff --git a/Sources/DataTransferObjects/Supervisor/Supervisor.swift b/Sources/DataTransferObjects/Supervisor/Supervisor.swift index dd1292e..0e8a421 100644 --- a/Sources/DataTransferObjects/Supervisor/Supervisor.swift +++ b/Sources/DataTransferObjects/Supervisor/Supervisor.swift @@ -1,12 +1,12 @@ /// Datasource / Namespace Supervisor definition -public struct Supervisor: Codable, Hashable, Equatable { +public struct Supervisor: Codable, Hashable, Equatable, Sendable { public init(type: Supervisor.SupervisorType, spec: ParallelIndexIngestionSpec? = nil, suspended: Bool? = nil) { self.type = type self.spec = spec self.suspended = suspended } - public enum SupervisorType: String, Codable, CaseIterable { + public enum SupervisorType: String, Codable, CaseIterable, Sendable { case kafka case kinesis case rabbit