diff --git a/deploy/helm/opensearch-operator/crds/crds.yaml b/deploy/helm/opensearch-operator/crds/crds.yaml index 76ed611..9937558 100644 --- a/deploy/helm/opensearch-operator/crds/crds.yaml +++ b/deploy/helm/opensearch-operator/crds/crds.yaml @@ -246,6 +246,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: object + discoveryServiceExposed: + description: Determines whether this role group is exposed in the discovery service. + nullable: true + type: boolean gracefulShutdownTimeout: description: |- Time period Pods have to gracefully shut down, e.g. `30m`, `1h` or `2d`. Consult the @@ -517,11 +521,19 @@ spec: x-kubernetes-preserve-unknown-fields: true roleConfig: default: + discoveryServiceListenerClass: cluster-internal podDisruptionBudget: enabled: true maxUnavailable: null description: This is a product-agnostic RoleConfig, which is sufficient for most of the products. properties: + discoveryServiceListenerClass: + default: cluster-internal + description: The [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) that is used for the discovery service. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string podDisruptionBudget: default: enabled: true @@ -600,6 +612,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: object + discoveryServiceExposed: + description: Determines whether this role group is exposed in the discovery service. + nullable: true + type: boolean gracefulShutdownTimeout: description: |- Time period Pods have to gracefully shut down, e.g. `30m`, `1h` or `2d`. Consult the diff --git a/rust/operator-binary/src/controller.rs b/rust/operator-binary/src/controller.rs index 8f631da..a5ee996 100644 --- a/rust/operator-binary/src/controller.rs +++ b/rust/operator-binary/src/controller.rs @@ -7,11 +7,15 @@ use std::{collections::BTreeMap, marker::PhantomData, str::FromStr, sync::Arc}; use apply::Applier; use build::build; +use dereference::dereference; use snafu::{ResultExt, Snafu}; use stackable_operator::{ cluster_resources::ClusterResourceApplyStrategy, - commons::{affinity::StackableAffinity, product_image_selection::ResolvedProductImage}, - crd::listener::v1alpha1::Listener, + commons::{ + affinity::StackableAffinity, networking::DomainName, + product_image_selection::ResolvedProductImage, + }, + crd::listener, k8s_openapi::api::{ apps::v1::StatefulSet, core::v1::{ConfigMap, Service, ServiceAccount}, @@ -20,7 +24,6 @@ use stackable_operator::{ }, kube::{Resource, api::ObjectMeta, core::DeserializeGuard, runtime::controller::Action}, logging::controller::ReconcilerError, - role_utils::GenericRoleConfig, shared::time::Duration, }; use strum::{EnumDiscriminants, IntoStaticStr}; @@ -34,7 +37,8 @@ use crate::{ product_logging::framework::{ValidatedContainerLogConfigChoice, VectorContainerLogConfig}, role_utils::{GenericProductSpecificCommonConfig, RoleGroupConfig}, types::{ - kubernetes::{ListenerClassName, NamespaceName, Uid}, + common::Port, + kubernetes::{Hostname, ListenerClassName, NamespaceName, Uid}, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, RoleName, @@ -45,6 +49,7 @@ use crate::{ mod apply; mod build; +mod dereference; mod update_status; mod validate; @@ -56,6 +61,7 @@ pub struct ContextNames { pub product_name: ProductName, pub operator_name: OperatorName, pub controller_name: ControllerName, + pub cluster_domain_name: DomainName, } /// The controller context @@ -66,19 +72,22 @@ pub struct Context { impl Context { pub fn new(client: stackable_operator::client::Client, operator_name: OperatorName) -> Self { + let cluster_domain_name = client.kubernetes_cluster_info.cluster_domain.clone(); + Context { client, - names: Self::context_names(operator_name), + names: Self::context_names(operator_name, cluster_domain_name), } } - fn context_names(operator_name: OperatorName) -> ContextNames { + fn context_names(operator_name: OperatorName, cluster_domain_name: DomainName) -> ContextNames { ContextNames { product_name: ProductName::from_str("opensearch") .expect("should be a valid product name"), operator_name, controller_name: ControllerName::from_str("opensearchcluster") .expect("should be a valid controller name"), + cluster_domain_name, } } @@ -103,6 +112,9 @@ pub enum Error { source: Box, }, + #[snafu(display("failed to dereference resources"))] + Dereference { source: dereference::Error }, + #[snafu(display("failed to validate cluster"))] ValidateCluster { source: validate::Error }, @@ -127,10 +139,16 @@ type OpenSearchRoleGroupConfig = type OpenSearchNodeResources = stackable_operator::commons::resources::Resources; +/// Additional objects required for building the cluster +pub struct DereferencedObjects { + pub maybe_discovery_service_listener: Option, +} + /// Validated [`v1alpha1::OpenSearchConfig`] #[derive(Clone, Debug, PartialEq)] pub struct ValidatedOpenSearchConfig { pub affinity: StackableAffinity, + pub discovery_service_exposed: bool, pub listener_class: ListenerClassName, pub logging: ValidatedLogging, pub node_roles: NodeRoles, @@ -152,6 +170,12 @@ impl ValidatedLogging { } } +#[derive(Clone, Debug, PartialEq)] +pub struct ValidatedDiscoveryEndpoint { + pub hostname: Hostname, + pub port: Port, +} + /// The validated [`v1alpha1::OpenSearchCluster`] /// /// Validated means that there should be no reason for Kubernetes to reject resources generated @@ -168,10 +192,11 @@ pub struct ValidatedCluster { pub name: ClusterName, pub namespace: NamespaceName, pub uid: Uid, - pub role_config: GenericRoleConfig, + pub role_config: v1alpha1::OpenSearchRoleConfig, pub role_group_configs: BTreeMap, pub tls_config: v1alpha1::OpenSearchTls, pub keystores: Vec, + pub discovery_endpoint: Option, } impl ValidatedCluster { @@ -182,13 +207,14 @@ impl ValidatedCluster { name: ClusterName, namespace: NamespaceName, uid: impl Into, - role_config: GenericRoleConfig, + role_config: v1alpha1::OpenSearchRoleConfig, role_group_configs: BTreeMap, tls_config: v1alpha1::OpenSearchTls, keystores: Vec, + discovery_endpoint: Option, ) -> Self { let uid = uid.into(); - ValidatedCluster { + Self { metadata: ObjectMeta { name: Some(name.to_string()), namespace: Some(namespace.to_string()), @@ -204,6 +230,7 @@ impl ValidatedCluster { role_group_configs, tls_config, keystores, + discovery_endpoint, } } @@ -286,6 +313,27 @@ impl Resource for ValidatedCluster { } } +/// Marker for prepared Kubernetes resources which are not applied yet +struct Prepared; +/// Marker for applied Kubernetes resources +struct Applied; + +/// List of all Kubernetes resources produced by this controller +/// +/// `T` is a marker that indicates if these resources are only [`Prepared`] or already [`Applied`]. +/// The marker is useful e.g. to ensure that the cluster status is updated based on the applied +/// resources. +struct KubernetesResources { + stateful_sets: Vec, + services: Vec, + listeners: Vec, + config_maps: Vec, + service_accounts: Vec, + role_bindings: Vec, + pod_disruption_budgets: Vec, + status: PhantomData, +} + pub fn error_policy( _object: Arc>, error: &Error, @@ -317,10 +365,14 @@ pub async fn reconcile( .map_err(stackable_operator::kube::core::error_boundary::InvalidObject::clone) .context(DeserializeClusterDefinitionSnafu)?; - // not necessary in this controller: dereference (client required) + // dereference (client required) + let dereferenced_objects = dereference(&context.client, cluster) + .await + .context(DereferenceSnafu)?; // validate (no client required) - let validated_cluster = validate(&context.names, cluster).context(ValidateClusterSnafu)?; + let validated_cluster = + validate(&context.names, cluster, &dereferenced_objects).context(ValidateClusterSnafu)?; // build (no client required; infallible) let prepared_resources = build(&context.names, validated_cluster.clone()); @@ -350,27 +402,6 @@ pub async fn reconcile( Ok(Action::await_change()) } -/// Marker for prepared Kubernetes resources which are not applied yet -struct Prepared; -/// Marker for applied Kubernetes resources -struct Applied; - -/// List of all Kubernetes resources produced by this controller -/// -/// `T` is a marker that indicates if these resources are only [`Prepared`] or already [`Applied`]. -/// The marker is useful e.g. to ensure that the cluster status is updated based on the applied -/// resources. -struct KubernetesResources { - stateful_sets: Vec, - services: Vec, - listeners: Vec, - config_maps: Vec, - service_accounts: Vec, - role_bindings: Vec, - pod_disruption_budgets: Vec, - status: PhantomData, -} - #[cfg(test)] mod tests { use std::{ @@ -379,11 +410,13 @@ mod tests { }; use stackable_operator::{ - commons::{affinity::StackableAffinity, product_image_selection::ResolvedProductImage}, + commons::{ + affinity::StackableAffinity, networking::DomainName, + product_image_selection::ResolvedProductImage, + }, k8s_openapi::api::core::v1::PodTemplateSpec, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use uuid::uuid; @@ -406,7 +439,10 @@ mod tests { #[test] fn test_context_names() { // Test that the function does not panic - Context::context_names(OperatorName::from_str_unsafe("my-operator")); + Context::context_names( + OperatorName::from_str_unsafe("my-operator"), + DomainName::from_str("cluster.local").expect("should be a valid domain name"), + ); } #[test] @@ -476,7 +512,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch"), NamespaceName::from_str_unsafe("default"), uuid!("e6ac237d-a6d4-43a1-8135-f36506110912"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [ ( RoleGroupName::from_str_unsafe("coordinating"), @@ -512,6 +548,7 @@ mod tests { .into(), v1alpha1::OpenSearchTls::default(), vec![], + None, ) } @@ -523,6 +560,7 @@ mod tests { replicas, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("external-stable"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( diff --git a/rust/operator-binary/src/controller/build.rs b/rust/operator-binary/src/controller/build.rs index b0f84c5..841eabd 100644 --- a/rust/operator-binary/src/controller/build.rs +++ b/rust/operator-binary/src/controller/build.rs @@ -33,8 +33,11 @@ pub fn build(names: &ContextNames, cluster: ValidatedCluster) -> KubernetesResou listeners.push(role_group_builder.build_listener()); } - let cluster_manager_service = role_builder.build_cluster_manager_service(); - services.push(cluster_manager_service); + if let Some(discovery_config_map) = role_builder.build_discovery_config_map() { + config_maps.push(discovery_config_map); + } + services.push(role_builder.build_seed_nodes_service()); + listeners.push(role_builder.build_discovery_service_listener()); let service_accounts = vec![role_builder.build_service_account()]; @@ -62,12 +65,14 @@ mod tests { }; use stackable_operator::{ - commons::{affinity::StackableAffinity, product_image_selection::ResolvedProductImage}, + commons::{ + affinity::StackableAffinity, networking::DomainName, + product_image_selection::ResolvedProductImage, + }, k8s_openapi::api::core::v1::PodTemplateSpec, kube::Resource, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use uuid::uuid; @@ -76,14 +81,16 @@ mod tests { use crate::{ controller::{ ContextNames, OpenSearchNodeResources, OpenSearchRoleGroupConfig, ValidatedCluster, - ValidatedContainerLogConfigChoice, ValidatedLogging, ValidatedOpenSearchConfig, + ValidatedContainerLogConfigChoice, ValidatedDiscoveryEndpoint, ValidatedLogging, + ValidatedOpenSearchConfig, }, crd::{NodeRoles, v1alpha1}, framework::{ builder::pod::container::EnvVarSet, role_utils::GenericProductSpecificCommonConfig, types::{ - kubernetes::{ListenerClassName, NamespaceName}, + common::Port, + kubernetes::{Hostname, ListenerClassName, NamespaceName}, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, @@ -106,15 +113,16 @@ mod tests { ); assert_eq!( vec![ - "my-opensearch", "my-opensearch-nodes-cluster-manager-headless", "my-opensearch-nodes-coordinating-headless", - "my-opensearch-nodes-data-headless" + "my-opensearch-nodes-data-headless", + "my-opensearch-seed-nodes" ], extract_resource_names(&resources.services) ); assert_eq!( vec![ + "my-opensearch", "my-opensearch-nodes-cluster-manager", "my-opensearch-nodes-coordinating", "my-opensearch-nodes-data" @@ -123,6 +131,7 @@ mod tests { ); assert_eq!( vec![ + "my-opensearch", "my-opensearch-nodes-cluster-manager", "my-opensearch-nodes-coordinating", "my-opensearch-nodes-data" @@ -158,6 +167,8 @@ mod tests { product_name: ProductName::from_str_unsafe("opensearch"), operator_name: OperatorName::from_str_unsafe("opensearch.stackable.tech"), controller_name: ControllerName::from_str_unsafe("opensearchcluster"), + cluster_domain_name: DomainName::from_str("cluster.local") + .expect("should be a valid domain name"), } } @@ -175,7 +186,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch"), NamespaceName::from_str_unsafe("default"), uuid!("e6ac237d-a6d4-43a1-8135-f36506110912"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [ ( RoleGroupName::from_str_unsafe("coordinating"), @@ -200,6 +211,10 @@ mod tests { .into(), v1alpha1::OpenSearchTls::default(), vec![], + Some(ValidatedDiscoveryEndpoint { + hostname: Hostname::from_str_unsafe("1.2.3.4"), + port: Port(12345), + }), ) } @@ -211,6 +226,7 @@ mod tests { replicas, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("external-stable"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( diff --git a/rust/operator-binary/src/controller/build/node_config.rs b/rust/operator-binary/src/controller/build/node_config.rs index b58dcd6..77a20fd 100644 --- a/rust/operator-binary/src/controller/build/node_config.rs +++ b/rust/operator-binary/src/controller/build/node_config.rs @@ -3,7 +3,9 @@ use std::str::FromStr; use serde_json::{Value, json}; -use stackable_operator::builder::pod::container::FieldPathEnvVar; +use stackable_operator::{ + builder::pod::container::FieldPathEnvVar, commons::networking::DomainName, +}; use super::ValidatedCluster; use crate::{ @@ -32,6 +34,11 @@ pub const CONFIG_OPTION_DISCOVERY_SEED_HOSTS: &str = "discovery.seed_hosts"; /// Type: string pub const CONFIG_OPTION_DISCOVERY_TYPE: &str = "discovery.type"; +/// Specifies an address or addresses that an OpenSearch node publishes to other nodes for HTTP +/// communication. +/// Type: (comma-separated) list of strings +pub const CONFIG_OPTION_HTTP_PUBLISH_HOST: &str = "http.publish_host"; + /// A list of cluster-manager-eligible nodes used to bootstrap the cluster. /// Type: (comma-separated) list of strings pub const CONFIG_OPTION_INITIAL_CLUSTER_MANAGER_NODES: &str = @@ -41,6 +48,11 @@ pub const CONFIG_OPTION_INITIAL_CLUSTER_MANAGER_NODES: &str = /// Type: string pub const CONFIG_OPTION_NETWORK_HOST: &str = "network.host"; +/// Specifies an address or addresses that an OpenSearch node publishes to other nodes in the +/// cluster so that they can connect to it. +/// Type: (comma-separated) list of strings +pub const CONFIG_OPTION_NETWORK_PUBLISH_HOST: &str = "network.publish_host"; + /// The custom node attribute "role-group" /// Type: string pub const CONFIG_OPTION_NODE_ATTR_ROLE_GROUP: &str = "node.attr.role-group"; @@ -97,6 +109,11 @@ pub const CONFIG_OPTION_PLUGINS_SECURITY_SSL_TRANSPORT_PEMKEY_FILEPATH: &str = pub const CONFIG_OPTION_PLUGINS_SECURITY_SSL_TRANSPORT_PEMTRUSTEDCAS_FILEPATH: &str = "plugins.security.ssl.transport.pemtrustedcas_filepath"; +/// Specifies an address or addresses that an OpenSearch node publishes to other nodes for +/// transport communication. +/// Type: (comma-separated) list of strings +pub const CONFIG_OPTION_TRANSPORT_PUBLISH_HOST: &str = "transport.publish_host"; + const DEFAULT_OPENSEARCH_HOME: &str = "/stackable/opensearch"; /// Configuration of an OpenSearch node based on the cluster and role-group configuration @@ -104,7 +121,9 @@ pub struct NodeConfig { cluster: ValidatedCluster, role_group_name: RoleGroupName, role_group_config: OpenSearchRoleGroupConfig, - pub discovery_service_name: ServiceName, + pub seed_nodes_service_name: ServiceName, + cluster_domain_name: DomainName, + headless_service_name: ServiceName, } // Most functions are public because their configuration values could also be used in environment @@ -114,13 +133,17 @@ impl NodeConfig { cluster: ValidatedCluster, role_group_name: RoleGroupName, role_group_config: OpenSearchRoleGroupConfig, - discovery_service_name: ServiceName, + seed_nodes_service_name: ServiceName, + cluster_domain_name: DomainName, + headless_service_name: ServiceName, ) -> Self { Self { cluster, role_group_name, role_group_config, - discovery_service_name, + seed_nodes_service_name, + cluster_domain_name, + headless_service_name, } } @@ -258,20 +281,42 @@ impl NodeConfig { /// The environment variables should only contain node-specific configuration options. /// Cluster-wide options should be added to the configuration file. pub fn environment_variables(&self) -> EnvVarSet { - EnvVarSet::new() + let fqdn = format!( + "$(_POD_NAME).{}.{}.svc.{}", + self.headless_service_name, self.cluster.namespace, self.cluster_domain_name + ); + + let mut env_vars = EnvVarSet::new() // Set the OpenSearch node name to the Pod name. // The node name is used e.g. for INITIAL_CLUSTER_MANAGER_NODES. + .with_field_path( + // Prefix with an underscore, so that it occurs before the other environment + // variables which depend on it. + &EnvVarName::from_str_unsafe("_POD_NAME"), + FieldPathEnvVar::Name, + ) .with_field_path( &EnvVarName::from_str_unsafe(CONFIG_OPTION_NODE_NAME), FieldPathEnvVar::Name, ) .with_value( - &EnvVarName::from_str_unsafe(CONFIG_OPTION_DISCOVERY_SEED_HOSTS), - &self.discovery_service_name, + &EnvVarName::from_str_unsafe(CONFIG_OPTION_NETWORK_PUBLISH_HOST), + &fqdn, ) .with_value( - &EnvVarName::from_str_unsafe(CONFIG_OPTION_INITIAL_CLUSTER_MANAGER_NODES), - self.initial_cluster_manager_nodes(), + &EnvVarName::from_str_unsafe(CONFIG_OPTION_TRANSPORT_PUBLISH_HOST), + &fqdn, + ) + .with_value( + &EnvVarName::from_str_unsafe(CONFIG_OPTION_HTTP_PUBLISH_HOST), + &fqdn, + ) + .with_value( + &EnvVarName::from_str_unsafe(CONFIG_OPTION_DISCOVERY_SEED_HOSTS), + format!( + "{}.{}.svc.{}", + self.seed_nodes_service_name, self.cluster.namespace, self.cluster_domain_name + ), ) .with_value( &EnvVarName::from_str_unsafe(CONFIG_OPTION_NODE_ROLES), @@ -284,8 +329,16 @@ impl NodeConfig { // Node roles cannot contain commas, therefore creating a comma-separated list // is safe. .join(","), - ) - .merge(self.role_group_config.env_overrides.clone()) + ); + + if let Some(initial_cluster_manager_nodes) = self.initial_cluster_manager_nodes() { + env_vars = env_vars.with_value( + &EnvVarName::from_str_unsafe(CONFIG_OPTION_INITIAL_CLUSTER_MANAGER_NODES), + initial_cluster_manager_nodes, + ); + } + + env_vars.merge(self.role_group_config.env_overrides.clone()) } fn to_yaml(kv: serde_json::Map) -> String { @@ -311,33 +364,70 @@ impl NodeConfig { } } - /// Configuration for `cluster.initial_cluster_manager_nodes` which replaces - /// `cluster.initial_master_nodes`, see - /// . + /// Configuration for `cluster.initial_cluster_manager_nodes` /// - /// According to - /// , - /// it contains "a list of cluster-manager-eligible nodes used to bootstrap the cluster." + /// Returns the node names of the initial cluster-manager nodes if + /// * this is a multi-node cluster and + /// * this node has the cluster-manager node role. /// - /// However, the documentation for Elasticsearch is more detailed and contains the following - /// notes (see ): + /// Please read the following sections for an explanation of these restrictions. + /// + /// This configuration setting replaces the setting `cluster.initial_master_nodes`, see + /// . + /// + /// This setting is required on nodes with the cluster-manager node role on a multi-node + /// cluster. Otherwise the bootstrapping of the cluster fails and all pods report: + /// > Wait for cluster to be available ... + /// + /// This setting must not be set on a single-node cluster, because otherwise the following + /// error is thrown: + /// > setting [cluster.initial_cluster_manager_nodes] is not allowed when [discovery.type] is set to [single-node] + /// + /// see + /// + /// This setting does not seem to have an effect on nodes without the cluster-manager node + /// role. However, as it is recommended (see the Elasticsearch documentation below) to not set + /// it on master-ineligible nodes, it is not set. + /// + /// This setting seems to be ignored when the cluster has already formed. It is recommended in + /// the Elasticsearch documentation to remove it once the cluster has formed, but as it is hard + /// to determine if the bootstrapping was successfully completed, this setting is still set. + /// Adding a new cluster-manager node and updating this setting also seems to be okay. + /// + /// # OpenSearch documentation + /// + /// > This setting is required when bootstrapping a cluster for the first time and should + /// > contain the node names (as defined by `node.name`) of the initial cluster-manager-eligible + /// > nodes. This list should be empty for nodes joining an existing cluster. + /// + /// see + /// + /// # Elasticsearch documentation + /// + /// The documentation for Elasticsearch is more detailed and contains the following + /// notes: /// * Remove this setting once the cluster has formed, and never set it again for this cluster. /// * Do not configure this setting on master-ineligible nodes. /// * Do not configure this setting on nodes joining an existing cluster. /// * Do not configure this setting on nodes which are restarting. /// * Do not configure this setting when performing a full-cluster restart. /// - /// The OpenSearch Helm chart only sets master nodes but does not handle the other cases (see - /// ), - /// so they are also ignored here for the moment. - fn initial_cluster_manager_nodes(&self) -> String { - if !self.cluster.is_single_node() - && self + /// see + /// + /// # Implementation in the OpenSearch Helm chart + /// + /// The OpenSearch Helm chart sets this setting on master nodes on multi-node clusters, see + /// see . + fn initial_cluster_manager_nodes(&self) -> Option { + if self.cluster.is_single_node() + || !self .role_group_config .config .node_roles .contains(&v1alpha1::NodeRole::ClusterManager) { + None + } else { let cluster_manager_configs = self .cluster .role_group_configs_filtered_by_node_role(&v1alpha1::NodeRole::ClusterManager); @@ -360,11 +450,7 @@ impl NodeConfig { ); } // Pod names cannot contain commas, therefore creating a comma-separated list is safe. - pod_names.join(",") - } else { - // This setting is not allowed on single node cluster, see - // - String::new() + Some(pod_names.join(",")) } } @@ -390,6 +476,7 @@ impl NodeConfig { mod tests { use std::collections::BTreeMap; + use pretty_assertions::assert_eq; use stackable_operator::{ commons::{ affinity::StackableAffinity, @@ -399,7 +486,6 @@ mod tests { k8s_openapi::api::core::v1::PodTemplateSpec, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use uuid::uuid; @@ -444,6 +530,7 @@ mod tests { replicas: test_config.replicas, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("cluster-internal"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( @@ -495,7 +582,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch-cluster"), NamespaceName::from_str_unsafe("default"), uuid!("0b1e30e6-326e-4c1a-868d-ad6598b49e8b"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [( RoleGroupName::from_str_unsafe("default"), role_group_config.clone(), @@ -503,13 +590,16 @@ mod tests { .into(), v1alpha1::OpenSearchTls::default(), vec![], + None, ); NodeConfig::new( cluster, role_group_name, role_group_config, - ServiceName::from_str_unsafe("my-opensearch-cluster-manager"), + ServiceName::from_str_unsafe("my-opensearch-seed-nodes"), + DomainName::from_str("cluster.local").expect("should be a valid domain name"), + ServiceName::from_str_unsafe("my-opensearch-cluster-default-headless"), ) } @@ -609,13 +699,25 @@ mod tests { assert_eq!( EnvVarSet::new() .with_value(&EnvVarName::from_str_unsafe("TEST"), "value") + .with_field_path( + &EnvVarName::from_str_unsafe("_POD_NAME"), + FieldPathEnvVar::Name + ) .with_value( &EnvVarName::from_str_unsafe("cluster.initial_cluster_manager_nodes"), "my-opensearch-cluster-nodes-default-0,my-opensearch-cluster-nodes-default-1", ) .with_value( &EnvVarName::from_str_unsafe("discovery.seed_hosts"), - "my-opensearch-cluster-manager", + "my-opensearch-seed-nodes.default.svc.cluster.local", + ) + .with_value( + &EnvVarName::from_str_unsafe("http.publish_host"), + "$(_POD_NAME).my-opensearch-cluster-default-headless.default.svc.cluster.local", + ) + .with_value( + &EnvVarName::from_str_unsafe("network.publish_host"), + "$(_POD_NAME).my-opensearch-cluster-default-headless.default.svc.cluster.local", ) .with_field_path( &EnvVarName::from_str_unsafe("node.name"), @@ -624,6 +726,10 @@ mod tests { .with_value( &EnvVarName::from_str_unsafe("node.roles"), "cluster_manager,data,ingest,remote_cluster_client" + ) + .with_value( + &EnvVarName::from_str_unsafe("transport.publish_host"), + "$(_POD_NAME).my-opensearch-cluster-default-headless.default.svc.cluster.local", ), node_config.environment_variables() ); @@ -664,11 +770,11 @@ mod tests { }); assert_eq!( - "".to_owned(), + None, node_config_single_node.initial_cluster_manager_nodes() ); assert_eq!( - "my-opensearch-cluster-nodes-default-0,my-opensearch-cluster-nodes-default-1,my-opensearch-cluster-nodes-default-2".to_owned(), + Some("my-opensearch-cluster-nodes-default-0,my-opensearch-cluster-nodes-default-1,my-opensearch-cluster-nodes-default-2".to_owned()), node_config_multiple_nodes.initial_cluster_manager_nodes() ); } diff --git a/rust/operator-binary/src/controller/build/role_builder.rs b/rust/operator-binary/src/controller/build/role_builder.rs index c2b8376..d48fdbe 100644 --- a/rust/operator-binary/src/controller/build/role_builder.rs +++ b/rust/operator-binary/src/controller/build/role_builder.rs @@ -1,11 +1,14 @@ //! Builder for role resources +use std::str::FromStr; + use stackable_operator::{ builder::meta::ObjectMetaBuilder, + crd::listener, k8s_openapi::{ Resource, api::{ - core::v1::{Service, ServiceAccount, ServicePort, ServiceSpec}, + core::v1::{ConfigMap, Service, ServiceAccount, ServicePort, ServiceSpec}, policy::v1::PodDisruptionBudget, rbac::v1::{ClusterRole, RoleBinding, RoleRef, Subject}, }, @@ -17,17 +20,23 @@ use stackable_operator::{ }, }; -use super::role_group_builder::{ - HTTP_PORT, HTTP_PORT_NAME, RoleGroupBuilder, TRANSPORT_PORT, TRANSPORT_PORT_NAME, -}; use crate::{ - controller::{ContextNames, ValidatedCluster}, + controller::{ + ContextNames, ValidatedCluster, + build::role_group_builder::{ + HTTP_PORT, HTTP_PORT_NAME, RoleGroupBuilder, TRANSPORT_PORT, TRANSPORT_PORT_NAME, + }, + }, framework::{ NameIsValidLabelValue, builder::{ meta::ownerreference_from_resource, pdb::pod_disruption_budget_builder_with_role, }, role_utils::ResourceNames, + types::{ + kubernetes::{ConfigMapName, ListenerName, ServiceName}, + operator::ClusterName, + }, }, }; @@ -64,7 +73,8 @@ impl<'a> RoleBuilder<'a> { role_group_name.clone(), role_group_config.clone(), self.context_names, - self.resource_names.discovery_service_name(), + seed_nodes_service_name(&self.cluster.name), + discovery_service_listener_name(&self.cluster.name), ) }) .collect() @@ -101,28 +111,14 @@ impl<'a> RoleBuilder<'a> { } /// Builds a Service that references all nodes with the cluster_manager node role - /// - /// Initially, this service was meant to be used by - /// [`super::node_config::NodeConfig::initial_cluster_manager_nodes`], but the function uses now another approach. - /// Afterwards, it was meant to be used as an entry point to OpenSearch, but it could also make - /// sense to use coordinating only nodes as entry points and not cluster manager nodes. - /// Therefore, this service will bei either adapted or removed. There is already an according - /// task entry in . - pub fn build_cluster_manager_service(&self) -> Service { - let ports = vec![ - ServicePort { - name: Some(HTTP_PORT_NAME.to_owned()), - port: HTTP_PORT.into(), - ..ServicePort::default() - }, - ServicePort { - name: Some(TRANSPORT_PORT_NAME.to_owned()), - port: TRANSPORT_PORT.into(), - ..ServicePort::default() - }, - ]; + pub fn build_seed_nodes_service(&self) -> Service { + let ports = vec![ServicePort { + name: Some(TRANSPORT_PORT_NAME.to_owned()), + port: TRANSPORT_PORT.into(), + ..ServicePort::default() + }]; - let metadata = self.common_metadata(self.resource_names.discovery_service_name()); + let metadata = self.common_metadata(seed_nodes_service_name(&self.cluster.name)); let service_selector = RoleGroupBuilder::cluster_manager_labels(&self.cluster, self.context_names); @@ -144,9 +140,68 @@ impl<'a> RoleBuilder<'a> { } } + /// Builds a Listener whose status is used to populate the discovery ConfigMap. + pub fn build_discovery_service_listener(&self) -> listener::v1alpha1::Listener { + let metadata = self.common_metadata(discovery_service_listener_name(&self.cluster.name)); + + let listener_class = &self.cluster.role_config.discovery_service_listener_class; + + let ports = vec![listener::v1alpha1::ListenerPort { + name: HTTP_PORT_NAME.to_owned(), + port: HTTP_PORT.into(), + protocol: Some("TCP".to_owned()), + }]; + + listener::v1alpha1::Listener { + metadata, + spec: listener::v1alpha1::ListenerSpec { + class_name: Some(listener_class.to_string()), + ports: Some(ports), + ..listener::v1alpha1::ListenerSpec::default() + }, + status: None, + } + } + + /// Builds the discovery ConfigMap if the discovery endpoint is already known. + /// + /// The discovery endpoint is derived from the status of the discovery service Listener. If the + /// status is not set yet, the reconciliation process will occur again once the Listener status + /// is updated, leading to the eventual creation of the discovery ConfigMap. + pub fn build_discovery_config_map(&self) -> Option { + let discovery_endpoint = self.cluster.discovery_endpoint.as_ref()?; + + let metadata = self.common_metadata(discovery_config_map_name(&self.cluster.name)); + + let data = [ + ( + "OPENSEARCH_PROTOCOL".to_owned(), + if self.cluster.tls_config.server_secret_class.is_some() { + "https".to_owned() + } else { + "http".to_owned() + }, + ), + ( + "OPENSEARCH_HOST".to_owned(), + discovery_endpoint.hostname.to_string(), + ), + ( + "OPENSEARCH_PORT".to_owned(), + discovery_endpoint.port.to_string(), + ), + ]; + + Some(ConfigMap { + metadata, + data: Some(data.into()), + ..ConfigMap::default() + }) + } + /// Builds a [`PodDisruptionBudget`] used by all role-groups pub fn build_pdb(&self) -> Option { - let pdb_config = &self.cluster.role_config.pod_disruption_budget; + let pdb_config = &self.cluster.role_config.common.pod_disruption_budget; if pdb_config.enabled { let max_unavailable = pdb_config @@ -211,6 +266,43 @@ impl<'a> RoleBuilder<'a> { } } +fn seed_nodes_service_name(cluster_name: &ClusterName) -> ServiceName { + const SUFFIX: &str = "-seed-nodes"; + + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH + SUFFIX.len() <= ServiceName::MAX_LENGTH, + "The string `-seed-nodes` must not exceed the limit of Service names." + ); + let _ = ClusterName::IS_RFC_1035_LABEL_NAME; + let _ = ClusterName::IS_VALID_LABEL_VALUE; + + ServiceName::from_str(&format!("{}{SUFFIX}", cluster_name.as_ref())) + .expect("should be a valid Service name") +} + +fn discovery_config_map_name(cluster_name: &ClusterName) -> ConfigMapName { + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH <= ConfigMapName::MAX_LENGTH, + "The string `` must not exceed the limit of ConfigMap names." + ); + let _ = ClusterName::IS_RFC_1123_SUBDOMAIN_NAME; + + ConfigMapName::from_str(cluster_name.as_ref()).expect("should be a valid ConfigMap name") +} + +pub fn discovery_service_listener_name(cluster_name: &ClusterName) -> ListenerName { + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH <= ListenerName::MAX_LENGTH, + "The string `` must not exceed the limit of Listener names." + ); + let _ = ClusterName::IS_RFC_1123_SUBDOMAIN_NAME; + + ListenerName::from_str(cluster_name.as_ref()).expect("should be a valid Listener name") +} + #[cfg(test)] mod tests { use std::{ @@ -223,13 +315,13 @@ mod tests { use stackable_operator::{ commons::{ affinity::StackableAffinity, + networking::DomainName, product_image_selection::{ProductImage, ResolvedProductImage}, resources::Resources, }, k8s_openapi::api::core::v1::PodTemplateSpec, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use uuid::uuid; @@ -238,14 +330,22 @@ mod tests { use crate::{ controller::{ ContextNames, OpenSearchRoleGroupConfig, ValidatedCluster, - ValidatedContainerLogConfigChoice, ValidatedLogging, ValidatedOpenSearchConfig, + ValidatedContainerLogConfigChoice, ValidatedDiscoveryEndpoint, ValidatedLogging, + ValidatedOpenSearchConfig, + build::role_builder::{ + discovery_config_map_name, discovery_service_listener_name, seed_nodes_service_name, + }, }, crd::{NodeRoles, v1alpha1}, framework::{ builder::pod::container::EnvVarSet, role_utils::GenericProductSpecificCommonConfig, types::{ - kubernetes::{ListenerClassName, NamespaceName}, + common::Port, + kubernetes::{ + ConfigMapName, Hostname, ListenerClassName, ListenerName, NamespaceName, + ServiceName, + }, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, @@ -259,6 +359,8 @@ mod tests { product_name: ProductName::from_str_unsafe("opensearch"), operator_name: OperatorName::from_str_unsafe("opensearch.stackable.tech"), controller_name: ControllerName::from_str_unsafe("opensearchcluster"), + cluster_domain_name: DomainName::from_str("cluster.local") + .expect("should be a valid domain name"), } } @@ -270,6 +372,7 @@ mod tests { replicas: 1, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("cluster-internal"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( @@ -308,7 +411,12 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch-cluster"), NamespaceName::from_str_unsafe("default"), uuid!("0b1e30e6-326e-4c1a-868d-ad6598b49e8b"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig { + discovery_service_listener_class: ListenerClassName::from_str_unsafe( + "external-stable", + ), + ..v1alpha1::OpenSearchRoleConfig::default() + }, [( RoleGroupName::from_str_unsafe("default"), role_group_config.clone(), @@ -316,6 +424,10 @@ mod tests { .into(), v1alpha1::OpenSearchTls::default(), vec![], + Some(ValidatedDiscoveryEndpoint { + hostname: Hostname::from_str_unsafe("1.2.3.4"), + port: Port(12345), + }), ); RoleBuilder::new(cluster, context_names) @@ -411,13 +523,12 @@ mod tests { } #[test] - fn test_build_cluster_manager_service() { + fn test_build_seed_nodes_service() { let context_names = context_names(); let role_builder = role_builder(&context_names); - let cluster_manager_service = - serde_json::to_value(role_builder.build_cluster_manager_service()) - .expect("should be serializable"); + let seed_nodes_service = serde_json::to_value(role_builder.build_seed_nodes_service()) + .expect("should be serializable"); assert_eq!( json!({ @@ -432,7 +543,7 @@ mod tests { "app.kubernetes.io/version": "3.1.0", "stackable.tech/vendor": "Stackable" }, - "name": "my-opensearch-cluster", + "name": "my-opensearch-cluster-seed-nodes", "namespace": "default", "ownerReferences": [ { @@ -447,10 +558,6 @@ mod tests { "spec": { "clusterIP": "None", "ports": [ - { - "name": "http", - "port": 9200 - }, { "name": "transport", "port": 9300 @@ -466,7 +573,101 @@ mod tests { "type": "ClusterIP" } }), - cluster_manager_service + seed_nodes_service + ); + } + + #[test] + fn test_build_discovery_service_listener() { + let context_names = context_names(); + let role_builder = role_builder(&context_names); + + let discovery_service_listener = + serde_json::to_value(role_builder.build_discovery_service_listener()) + .expect("should be serializable"); + + assert_eq!( + json!({ + "apiVersion": "listeners.stackable.tech/v1alpha1", + "kind": "Listener", + "metadata": { + "labels": { + "app.kubernetes.io/component": "nodes", + "app.kubernetes.io/instance": "my-opensearch-cluster", + "app.kubernetes.io/managed-by": "opensearch.stackable.tech_opensearchcluster", + "app.kubernetes.io/name": "opensearch", + "app.kubernetes.io/version": "3.1.0", + "stackable.tech/vendor": "Stackable", + }, + "name": "my-opensearch-cluster", + "namespace": "default", + "ownerReferences": [ + { + "apiVersion": "opensearch.stackable.tech/v1alpha1", + "controller": true, + "kind": "OpenSearchCluster", + "name": "my-opensearch-cluster", + "uid": "0b1e30e6-326e-4c1a-868d-ad6598b49e8b", + }, + ], + }, + "spec": { + "className": "external-stable", + "extraPodSelectorLabels": {}, + "ports": [ + { + "name": "http", + "port": 9200, + "protocol": "TCP", + }, + ], + "publishNotReadyAddresses": null, + }, + }), + discovery_service_listener + ); + } + + #[test] + fn test_build_discovery_config_map() { + let context_names = context_names(); + let role_builder = role_builder(&context_names); + + let discovery_config_map = serde_json::to_value(role_builder.build_discovery_config_map()) + .expect("should be serializable"); + + assert_eq!( + json!({ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "labels": { + "app.kubernetes.io/component": "nodes", + "app.kubernetes.io/instance": "my-opensearch-cluster", + "app.kubernetes.io/managed-by": "opensearch.stackable.tech_opensearchcluster", + "app.kubernetes.io/name": "opensearch", + "app.kubernetes.io/version": "3.1.0", + "stackable.tech/vendor": "Stackable", + }, + "name": "my-opensearch-cluster", + "namespace": "default", + "ownerReferences": [ + { + "apiVersion": "opensearch.stackable.tech/v1alpha1", + "controller": true, + "kind": "OpenSearchCluster", + "name": "my-opensearch-cluster", + "uid": "0b1e30e6-326e-4c1a-868d-ad6598b49e8b", + }, + ], + }, + "data": { + "OPENSEARCH_HOST": "1.2.3.4", + "OPENSEARCH_PORT": "12345", + "OPENSEARCH_PROTOCOL": "https", + }, + }), + discovery_config_map ); } @@ -514,4 +715,34 @@ mod tests { pdb ); } + + #[test] + fn test_seed_nodes_service_name() { + let cluster_name = ClusterName::from_str_unsafe("test-cluster"); + + assert_eq!( + ServiceName::from_str_unsafe("test-cluster-seed-nodes"), + seed_nodes_service_name(&cluster_name) + ); + } + + #[test] + fn test_discovery_config_map_name() { + let cluster_name = ClusterName::from_str_unsafe("test-cluster"); + + assert_eq!( + ConfigMapName::from_str_unsafe("test-cluster"), + discovery_config_map_name(&cluster_name) + ); + } + + #[test] + fn test_discovery_service_listener_name() { + let cluster_name = ClusterName::from_str_unsafe("test-cluster"); + + assert_eq!( + ListenerName::from_str_unsafe("test-cluster"), + discovery_service_listener_name(&cluster_name) + ); + } } diff --git a/rust/operator-binary/src/controller/build/role_group_builder.rs b/rust/operator-binary/src/controller/build/role_group_builder.rs index 4bb8e1a..e19a8cf 100644 --- a/rust/operator-binary/src/controller/build/role_group_builder.rs +++ b/rust/operator-binary/src/controller/build/role_group_builder.rs @@ -14,9 +14,9 @@ use stackable_operator::{ apps::v1::{StatefulSet, StatefulSetSpec}, core::v1::{ Affinity, ConfigMap, ConfigMapVolumeSource, Container, ContainerPort, - EmptyDirVolumeSource, KeyToPath, PersistentVolumeClaim, PodSecurityContext, - PodSpec, PodTemplateSpec, Probe, SecretVolumeSource, Service, ServicePort, - ServiceSpec, TCPSocketAction, Volume, VolumeMount, + EmptyDirVolumeSource, KeyToPath, PodSecurityContext, PodSpec, PodTemplateSpec, + Probe, SecretVolumeSource, Service, ServicePort, ServiceSpec, TCPSocketAction, + Volume, VolumeMount, }, }, apimachinery::pkg::{ @@ -61,9 +61,10 @@ use crate::{ }, role_group_utils::ResourceNames, types::{ + common::Port, kubernetes::{ - PersistentVolumeClaimName, SecretClassName, ServiceAccountName, ServiceName, - VolumeName, + ListenerName, PersistentVolumeClaimName, SecretClassName, ServiceAccountName, + ServiceName, VolumeName, }, operator::RoleGroupName, }, @@ -71,17 +72,22 @@ use crate::{ }; pub const HTTP_PORT_NAME: &str = "http"; -pub const HTTP_PORT: u16 = 9200; +pub const HTTP_PORT: Port = Port(9200); pub const TRANSPORT_PORT_NAME: &str = "transport"; -pub const TRANSPORT_PORT: u16 = 9300; +pub const TRANSPORT_PORT: Port = Port(9300); constant!(CONFIG_VOLUME_NAME: VolumeName = "config"); constant!(LOG_CONFIG_VOLUME_NAME: VolumeName = "log-config"); constant!(DATA_VOLUME_NAME: VolumeName = "data"); -constant!(LISTENER_VOLUME_NAME: PersistentVolumeClaimName = "listener"); -const LISTENER_VOLUME_DIR: &str = "/stackable/listener"; +// This is the main listener which is sometimes referenced by users in podOverrides, so keep its +// name simple. +constant!(ROLE_GROUP_LISTENER_VOLUME_NAME: PersistentVolumeClaimName = "listener"); +const ROLE_GROUP_LISTENER_VOLUME_DIR: &str = "/stackable/listeners/role-group"; + +constant!(DISCOVERY_SERVICE_LISTENER_VOLUME_NAME: PersistentVolumeClaimName = "discovery-service-listener"); +const DISCOVERY_SERVICE_LISTENER_VOLUME_DIR: &str = "/stackable/listeners/discovery-service"; constant!(TLS_SERVER_VOLUME_NAME: VolumeName = "tls-server"); constant!(TLS_INTERNAL_VOLUME_NAME: VolumeName = "tls-internal"); @@ -104,6 +110,7 @@ pub struct RoleGroupBuilder<'a> { role_group_config: OpenSearchRoleGroupConfig, context_names: &'a ContextNames, resource_names: ResourceNames, + discovery_service_listener_name: ListenerName, } impl<'a> RoleGroupBuilder<'a> { @@ -113,8 +120,14 @@ impl<'a> RoleGroupBuilder<'a> { role_group_name: RoleGroupName, role_group_config: OpenSearchRoleGroupConfig, context_names: &'a ContextNames, - discovery_service_name: ServiceName, + seed_nodes_service_name: ServiceName, + discovery_service_listener_name: ListenerName, ) -> RoleGroupBuilder<'a> { + let resource_names = ResourceNames { + cluster_name: cluster.name.clone(), + role_name: ValidatedCluster::role_name(), + role_group_name: role_group_name.clone(), + }; RoleGroupBuilder { service_account_name, cluster: cluster.clone(), @@ -122,7 +135,9 @@ impl<'a> RoleGroupBuilder<'a> { cluster.clone(), role_group_name.clone(), role_group_config.clone(), - discovery_service_name, + seed_nodes_service_name, + context_names.cluster_domain_name.clone(), + resource_names.headless_service_name(), ), role_group_name: role_group_name.clone(), role_group_config, @@ -132,6 +147,7 @@ impl<'a> RoleGroupBuilder<'a> { role_name: ValidatedCluster::role_name(), role_group_name, }, + discovery_service_listener_name, } } @@ -190,23 +206,36 @@ impl<'a> RoleGroupBuilder<'a> { .data .build_pvc(DATA_VOLUME_NAME.as_ref(), Some(vec!["ReadWriteOnce"])); - let listener_group_name = self.resource_names.listener_name(); - - // Listener endpoints for the all rolegroups will use persistent - // volumes so that load balancers can hard-code the target - // addresses. This will be the case even when no class is set (and - // the value defaults to cluster-internal) as the address should - // still be consistent. - let listener_volume_claim_template = listener_operator_volume_source_builder_build_pvc( - &ListenerReference::Listener(listener_group_name), - &self.recommended_labels(), - &LISTENER_VOLUME_NAME, - ); + // Listener endpoints for all rolegroups will use persistent volumes so that load balancers + // can hard-code the target addresses. This will be the case even when no class is set (and + // the value defaults to cluster-internal) as the address should still be consistent. + let role_group_listener_volume_claim_template = + listener_operator_volume_source_builder_build_pvc( + &ListenerReference::Listener(self.resource_names.listener_name()), + &self.recommended_labels(), + &ROLE_GROUP_LISTENER_VOLUME_NAME, + ); + + let maybe_discovery_service_listener_volume_claim_template = self + .role_group_config + .config + .discovery_service_exposed + .then(|| { + listener_operator_volume_source_builder_build_pvc( + &ListenerReference::Listener(self.discovery_service_listener_name.to_owned()), + &self.recommended_labels(), + &DISCOVERY_SERVICE_LISTENER_VOLUME_NAME, + ) + }); - let pvcs: Option> = Some(vec![ - data_volume_claim_template, - listener_volume_claim_template, - ]); + let pvcs = vec![ + Some(data_volume_claim_template), + Some(role_group_listener_volume_claim_template), + maybe_discovery_service_listener_volume_claim_template, + ] + .into_iter() + .flatten() + .collect(); let spec = StatefulSetSpec { // Order does not matter for OpenSearch @@ -218,7 +247,7 @@ impl<'a> RoleGroupBuilder<'a> { }, service_name: Some(self.resource_names.headless_service_name().to_string()), template, - volume_claim_templates: pvcs, + volume_claim_templates: Some(pvcs), ..StatefulSetSpec::default() }; @@ -232,7 +261,6 @@ impl<'a> RoleGroupBuilder<'a> { /// Builds the [`PodTemplateSpec`] for the role-group [`StatefulSet`] fn build_pod_template(&self) -> PodTemplateSpec { let mut node_role_labels = Labels::new(); - let service_scopes = vec![self.node_config.discovery_service_name.clone()]; for node_role in self.role_group_config.config.node_roles.iter() { node_role_labels.insert(Self::build_node_role_label(node_role)); @@ -283,6 +311,25 @@ impl<'a> RoleGroupBuilder<'a> { self.resource_names.role_group_config_map() }; + let mut internal_tls_volume_service_scopes = vec![]; + if self + .role_group_config + .config + .node_roles + .contains(&v1alpha1::NodeRole::ClusterManager) + { + internal_tls_volume_service_scopes + .push(self.node_config.seed_nodes_service_name.clone()); + } + let internal_tls_volume = self.build_tls_volume( + &TLS_INTERNAL_VOLUME_NAME, + &self.cluster.tls_config.internal_secret_class, + internal_tls_volume_service_scopes, + SecretFormat::TlsPem, + &self.role_group_config.config.requested_secret_lifetime, + vec![ROLE_GROUP_LISTENER_VOLUME_NAME.clone()], + ); + let mut volumes = vec![ Volume { name: CONFIG_VOLUME_NAME.to_string(), @@ -312,24 +359,22 @@ impl<'a> RoleGroupBuilder<'a> { }), ..Volume::default() }, - self.build_tls_volume( - &TLS_INTERNAL_VOLUME_NAME, - &self.cluster.tls_config.internal_secret_class, - vec![], - SecretFormat::TlsPem, - &self.role_group_config.config.requested_secret_lifetime, - &LISTENER_VOLUME_NAME, - ), + internal_tls_volume, ]; if let Some(tls_http_secret_class_name) = &self.cluster.tls_config.server_secret_class { + let mut listener_scopes = vec![ROLE_GROUP_LISTENER_VOLUME_NAME.to_owned()]; + if self.role_group_config.config.discovery_service_exposed { + listener_scopes.push(DISCOVERY_SERVICE_LISTENER_VOLUME_NAME.to_owned()); + } + volumes.push(self.build_tls_volume( &TLS_SERVER_VOLUME_NAME, tls_http_secret_class_name, - service_scopes, + vec![], SecretFormat::TlsPem, &self.role_group_config.config.requested_secret_lifetime, - &LISTENER_VOLUME_NAME, + listener_scopes, )) }; @@ -412,7 +457,7 @@ impl<'a> RoleGroupBuilder<'a> { /// Returns the labels of OpenSearch nodes with the `cluster_manager` role. /// - /// As described in [`super::role_builder::RoleBuilder::build_cluster_manager_service`], this + /// As described in [`super::role_builder::RoleBuilder::build_seed_nodes_service`], this /// function will be changed or deleted. pub fn cluster_manager_labels( cluster: &ValidatedCluster, @@ -545,8 +590,8 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO ..VolumeMount::default() }, VolumeMount { - mount_path: LISTENER_VOLUME_DIR.to_owned(), - name: LISTENER_VOLUME_NAME.to_string(), + mount_path: ROLE_GROUP_LISTENER_VOLUME_DIR.to_owned(), + name: ROLE_GROUP_LISTENER_VOLUME_NAME.to_string(), ..VolumeMount::default() }, VolumeMount { @@ -561,12 +606,20 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO }, ]; + if self.role_group_config.config.discovery_service_exposed { + volume_mounts.push(VolumeMount { + mount_path: DISCOVERY_SERVICE_LISTENER_VOLUME_DIR.to_owned(), + name: DISCOVERY_SERVICE_LISTENER_VOLUME_NAME.to_string(), + ..VolumeMount::default() + }); + } + if self.cluster.tls_config.server_secret_class.is_some() { volume_mounts.push(VolumeMount { mount_path: format!("{opensearch_path_conf}/tls/server"), name: TLS_SERVER_VOLUME_NAME.to_string(), ..VolumeMount::default() - }) + }); } if !self.cluster.keystores.is_empty() { @@ -708,17 +761,17 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO let listener_class = self.role_group_config.config.listener_class.to_owned(); - let ports = [listener::v1alpha1::ListenerPort { - name: HTTP_PORT_NAME.to_string(), + let ports = vec![listener::v1alpha1::ListenerPort { + name: HTTP_PORT_NAME.to_owned(), port: HTTP_PORT.into(), - protocol: Some("TCP".to_string()), + protocol: Some("TCP".to_owned()), }]; listener::v1alpha1::Listener { metadata, spec: listener::v1alpha1::ListenerSpec { class_name: Some(listener_class.to_string()), - ports: Some(ports.to_vec()), + ports: Some(ports), ..listener::v1alpha1::ListenerSpec::default() }, status: None, @@ -774,7 +827,7 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO service_scopes: Vec, secret_format: SecretFormat, requested_secret_lifetime: &Duration, - listener_scope: &PersistentVolumeClaimName, + listener_volume_scopes: Vec, ) -> Volume { let mut secret_volume_source_builder = SecretOperatorVolumeSourceBuilder::new(tls_secret_class_name); @@ -782,11 +835,13 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO for scope in service_scopes { secret_volume_source_builder.with_service_scope(scope); } + for scope in listener_volume_scopes { + secret_volume_source_builder.with_listener_volume_scope(scope); + } VolumeBuilder::new(volume_name.to_string()) .ephemeral( secret_volume_source_builder - .with_listener_volume_scope(listener_scope) .with_pod_scope() .with_format(secret_format) .with_auto_tls_cert_lifetime(*requested_secret_lifetime) @@ -808,26 +863,29 @@ mod tests { use serde_json::json; use stackable_operator::{ commons::{ - affinity::StackableAffinity, product_image_selection::ResolvedProductImage, - resources::Resources, + affinity::StackableAffinity, networking::DomainName, + product_image_selection::ResolvedProductImage, resources::Resources, }, k8s_openapi::api::core::v1::PodTemplateSpec, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use strum::IntoEnumIterator; use uuid::uuid; use super::{ - CONFIG_VOLUME_NAME, DATA_VOLUME_NAME, LISTENER_VOLUME_NAME, LOG_CONFIG_VOLUME_NAME, - LOG_VOLUME_NAME, RoleGroupBuilder, + CONFIG_VOLUME_NAME, DATA_VOLUME_NAME, LOG_CONFIG_VOLUME_NAME, LOG_VOLUME_NAME, + ROLE_GROUP_LISTENER_VOLUME_NAME, RoleGroupBuilder, }; use crate::{ controller::{ ContextNames, OpenSearchRoleGroupConfig, ValidatedCluster, ValidatedContainerLogConfigChoice, ValidatedLogging, ValidatedOpenSearchConfig, + build::role_group_builder::{ + DISCOVERY_SERVICE_LISTENER_VOLUME_NAME, OPENSEARCH_KEYSTORE_VOLUME_NAME, + TLS_INTERNAL_VOLUME_NAME, TLS_SERVER_VOLUME_NAME, + }, }, crd::{NodeRoles, OpenSearchKeystoreKey, v1alpha1}, framework::{ @@ -836,8 +894,8 @@ mod tests { role_utils::GenericProductSpecificCommonConfig, types::{ kubernetes::{ - ConfigMapName, ListenerClassName, NamespaceName, SecretKey, SecretName, - ServiceAccountName, ServiceName, + ConfigMapName, ListenerClassName, ListenerName, NamespaceName, SecretKey, + SecretName, ServiceAccountName, ServiceName, }, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, @@ -853,8 +911,12 @@ mod tests { let _ = CONFIG_VOLUME_NAME; let _ = LOG_CONFIG_VOLUME_NAME; let _ = DATA_VOLUME_NAME; - let _ = LISTENER_VOLUME_NAME; + let _ = ROLE_GROUP_LISTENER_VOLUME_NAME; + let _ = DISCOVERY_SERVICE_LISTENER_VOLUME_NAME; + let _ = TLS_SERVER_VOLUME_NAME; + let _ = TLS_INTERNAL_VOLUME_NAME; let _ = LOG_VOLUME_NAME; + let _ = OPENSEARCH_KEYSTORE_VOLUME_NAME; } fn context_names() -> ContextNames { @@ -862,6 +924,8 @@ mod tests { product_name: ProductName::from_str_unsafe("opensearch"), operator_name: OperatorName::from_str_unsafe("opensearch.stackable.tech"), controller_name: ControllerName::from_str_unsafe("opensearchcluster"), + cluster_domain_name: DomainName::from_str("cluster.local") + .expect("should be a valid domain name"), } } @@ -879,6 +943,7 @@ mod tests { replicas: 1, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("cluster-internal"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( @@ -917,7 +982,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch-cluster"), NamespaceName::from_str_unsafe("default"), uuid!("0b1e30e6-326e-4c1a-868d-ad6598b49e8b"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [( RoleGroupName::from_str_unsafe("default"), role_group_config.clone(), @@ -931,6 +996,7 @@ mod tests { key: SecretKey::from_str_unsafe("my-keystore-file"), }, }], + None, ) } @@ -951,7 +1017,8 @@ mod tests { role_group_name, role_group_config, context_names, - ServiceName::from_str_unsafe("my-opensearch-cluster"), + ServiceName::from_str_unsafe("my-opensearch-cluster-seed-nodes"), + ListenerName::from_str_unsafe("my-opensearch-cluster"), ) } @@ -1130,12 +1197,24 @@ mod tests { ], "env": [ { - "name": "cluster.initial_cluster_manager_nodes", - "value": "" + "name": "_POD_NAME", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.name" + } + } }, { "name": "discovery.seed_hosts", - "value": "my-opensearch-cluster" + "value": "my-opensearch-cluster-seed-nodes.default.svc.cluster.local" + }, + { + "name": "http.publish_host", + "value": "$(_POD_NAME).my-opensearch-cluster-nodes-default-headless.default.svc.cluster.local" + }, + { + "name": "network.publish_host", + "value": "$(_POD_NAME).my-opensearch-cluster-nodes-default-headless.default.svc.cluster.local" }, { "name": "node.name", @@ -1148,7 +1227,11 @@ mod tests { { "name": "node.roles", "value": "cluster_manager,data,ingest,remote_cluster_client" - } + }, + { + "name": "transport.publish_host", + "value": "$(_POD_NAME).my-opensearch-cluster-nodes-default-headless.default.svc.cluster.local" + }, ], "image": "oci.stackable.tech/sdp/opensearch:3.1.0-stackable0.0.0-dev", "imagePullPolicy": "Always", @@ -1199,17 +1282,21 @@ mod tests { "name": "data" }, { - "mountPath": "/stackable/listener", + "mountPath": "/stackable/listeners/role-group", "name": "listener" }, { "mountPath": "/stackable/log", "name": "log" }, - { + { "mountPath": "/stackable/opensearch/config/tls/internal", "name": "tls-internal" }, + { + "mountPath": "/stackable/listeners/discovery-service", + "name": "discovery-service-listener" + }, { "mountPath": "/stackable/opensearch/config/tls/server", "name": "tls-server", @@ -1391,7 +1478,7 @@ mod tests { "secrets.stackable.tech/backend.autotls.cert.lifetime": "1d", "secrets.stackable.tech/class": "tls", "secrets.stackable.tech/format": "tls-pem", - "secrets.stackable.tech/scope": "listener-volume=listener,pod" + "secrets.stackable.tech/scope": "service=my-opensearch-cluster-seed-nodes,listener-volume=listener,pod" } }, "spec": { @@ -1417,7 +1504,7 @@ mod tests { "secrets.stackable.tech/backend.autotls.cert.lifetime": "1d", "secrets.stackable.tech/class": "tls", "secrets.stackable.tech/format": "tls-pem", - "secrets.stackable.tech/scope": "service=my-opensearch-cluster,listener-volume=listener,pod" + "secrets.stackable.tech/scope": "listener-volume=listener,listener-volume=discovery-service-listener,pod" } }, "spec": { @@ -1491,6 +1578,36 @@ mod tests { }, "name": "listener" }, + "spec": { + "accessModes": [ + "ReadWriteMany", + ], + "resources": { + "requests": { + "storage": "1", + }, + }, + "storageClassName": "listeners.stackable.tech", + }, + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "annotations": { + "listeners.stackable.tech/listener-name": "my-opensearch-cluster", + }, + "labels": { + "app.kubernetes.io/component": "nodes", + "app.kubernetes.io/instance": "my-opensearch-cluster", + "app.kubernetes.io/managed-by": "opensearch.stackable.tech_opensearchcluster", + "app.kubernetes.io/name": "opensearch", + "app.kubernetes.io/role-group": "default", + "app.kubernetes.io/version": "3.1.0", + "stackable.tech/vendor": "Stackable", + }, + "name": "discovery-service-listener", + }, "spec": { "accessModes": [ "ReadWriteMany" diff --git a/rust/operator-binary/src/controller/dereference.rs b/rust/operator-binary/src/controller/dereference.rs new file mode 100644 index 0000000..24adf74 --- /dev/null +++ b/rust/operator-binary/src/controller/dereference.rs @@ -0,0 +1,65 @@ +//! The dereference step in the OpenSearchCluster controller + +use snafu::{ResultExt, Snafu}; +use stackable_operator::{client::Client, crd::listener}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::{ + controller::{DereferencedObjects, build::role_builder}, + crd::v1alpha1, + framework::{ + controller_utils::{get_cluster_name, get_namespace}, + types::{kubernetes::NamespaceName, operator::ClusterName}, + }, +}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to get the cluster name"))] + GetClusterName { + source: crate::framework::controller_utils::Error, + }, + + #[snafu(display("failed to get the cluster namespace"))] + GetClusterNamespace { + source: crate::framework::controller_utils::Error, + }, + + #[snafu(display("failed to fetch the discovery service listener"))] + FetchDiscoveryServiceListener { + source: stackable_operator::client::Error, + }, +} + +type Result = std::result::Result; + +/// Dereference additional objects that are required to build the cluster resources. +pub async fn dereference( + client: &Client, + cluster: &v1alpha1::OpenSearchCluster, +) -> Result { + let cluster_name = get_cluster_name(cluster).context(GetClusterNameSnafu)?; + let namespace = get_namespace(cluster).context(GetClusterNamespaceSnafu)?; + + let maybe_discovery_service_listener = + fetch_discovery_service_listener(client, &cluster_name, &namespace).await?; + + Ok(DereferencedObjects { + maybe_discovery_service_listener, + }) +} + +async fn fetch_discovery_service_listener( + client: &Client, + cluster_name: &ClusterName, + namespace: &NamespaceName, +) -> Result> { + let discovery_service_listener_name = + role_builder::discovery_service_listener_name(cluster_name); + + client + .get_opt(discovery_service_listener_name.as_ref(), namespace.as_ref()) + .await + .context(FetchDiscoveryServiceListenerSnafu) +} diff --git a/rust/operator-binary/src/controller/validate.rs b/rust/operator-binary/src/controller/validate.rs index 2079558..4365483 100644 --- a/rust/operator-binary/src/controller/validate.rs +++ b/rust/operator-binary/src/controller/validate.rs @@ -1,12 +1,10 @@ //! The validate step in the OpenSearchCluster controller -use std::{collections::BTreeMap, num::TryFromIntError, str::FromStr}; +use std::{collections::BTreeMap, str::FromStr}; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ - kube::{Resource, ResourceExt}, - product_logging::spec::Logging, - role_utils::RoleGroup, + crd::listener, kube::ResourceExt, product_logging::spec::Logging, role_utils::RoleGroup, shared::time::Duration, }; use strum::{EnumDiscriminants, IntoStaticStr}; @@ -16,15 +14,18 @@ use super::{ ValidatedLogging, ValidatedOpenSearchConfig, }; use crate::{ + controller::{DereferencedObjects, ValidatedDiscoveryEndpoint}, crd::v1alpha1::{self}, framework::{ builder::pod::container::{EnvVarName, EnvVarSet}, + controller_utils::{get_cluster_name, get_namespace, get_uid}, product_logging::framework::{ VectorContainerLogConfig, validate_logging_configuration_for_container, }, role_utils::{GenericProductSpecificCommonConfig, RoleGroupConfig, with_validated_config}, types::{ - kubernetes::{ConfigMapName, NamespaceName, Uid}, + common::Port, + kubernetes::{ConfigMapName, Hostname}, operator::ClusterName, }, }, @@ -34,37 +35,41 @@ use crate::{ #[strum_discriminants(derive(IntoStaticStr))] pub enum Error { #[snafu(display("failed to get the cluster name"))] - GetClusterName {}, + GetClusterName { + source: crate::framework::controller_utils::Error, + }, #[snafu(display("failed to get the cluster namespace"))] - GetClusterNamespace {}, + GetClusterNamespace { + source: crate::framework::controller_utils::Error, + }, #[snafu(display("failed to get the cluster UID"))] - GetClusterUid {}, + GetClusterUid { + source: crate::framework::controller_utils::Error, + }, + + #[snafu(display("failed to get the port of the Listener status"))] + GetListenerStatusPort {}, #[snafu(display( "failed to get vectorAggregatorConfigMapName; It must be set if enableVectorAgent is true." ))] GetVectorAggregatorConfigMapName {}, - #[snafu(display("failed to set cluster name"))] - ParseClusterName { - source: crate::framework::macros::attributed_string_type::Error, - }, - - #[snafu(display("failed to set cluster namespace"))] - ParseClusterNamespace { - source: crate::framework::macros::attributed_string_type::Error, + #[snafu(display("failed to parse environment variable"))] + ParseEnvironmentVariable { + source: crate::framework::builder::pod::container::Error, }, - #[snafu(display("failed to set UID"))] - ParseClusterUid { + #[snafu(display("failed to parse the hostname of the Listener status"))] + ParseListenerStatusHostname { source: crate::framework::macros::attributed_string_type::Error, }, - #[snafu(display("failed to parse environment variable"))] - ParseEnvironmentVariable { - source: crate::framework::builder::pod::container::Error, + #[snafu(display("failed to parse the port of the Listener status"))] + ParseListenerStatusPort { + source: crate::framework::types::common::Error, }, #[snafu(display("failed to set product version"))] @@ -94,7 +99,7 @@ pub enum Error { #[snafu(display("termination grace period is too long (got {duration}, maximum allowed is {max})", max = Duration::from_secs(i64::MAX as u64)))] TerminationGracePeriodTooLong { - source: TryFromIntError, + source: std::num::TryFromIntError, duration: Duration, }, } @@ -113,15 +118,11 @@ const DEFAULT_IMAGE_BASE_NAME: &str = "opensearch"; pub fn validate( context_names: &ContextNames, cluster: &v1alpha1::OpenSearchCluster, + dereferenced_objects: &DereferencedObjects, ) -> Result { - let raw_cluster_name = cluster.meta().name.clone().context(GetClusterNameSnafu)?; - let cluster_name = ClusterName::from_str(&raw_cluster_name).context(ParseClusterNameSnafu)?; - - let raw_namespace = cluster.namespace().context(GetClusterNamespaceSnafu)?; - let namespace = NamespaceName::from_str(&raw_namespace).context(ParseClusterNamespaceSnafu)?; - - let raw_uid = cluster.uid().context(GetClusterUidSnafu)?; - let uid = Uid::from_str(&raw_uid).context(ParseClusterUidSnafu)?; + let cluster_name = get_cluster_name(cluster).context(GetClusterNameSnafu)?; + let namespace = get_namespace(cluster).context(GetClusterNamespaceSnafu)?; + let uid = get_uid(cluster).context(GetClusterUidSnafu)?; let product_image = cluster .spec @@ -145,6 +146,8 @@ pub fn validate( role_group_configs.insert(role_group_name, validated_role_group_config); } + let validated_discovery_endpoint = validate_discovery_endpoint(dereferenced_objects)?; + Ok(ValidatedCluster::new( product_image, product_version, @@ -155,6 +158,7 @@ pub fn validate( role_group_configs, cluster.spec.cluster_config.tls.clone(), cluster.spec.cluster_config.keystore.clone(), + validated_discovery_endpoint, )) } @@ -195,6 +199,7 @@ fn validate_role_group_config( let validated_config = ValidatedOpenSearchConfig { affinity: merged_role_group.config.config.affinity, + discovery_service_exposed: merged_role_group.config.config.discovery_service_exposed, listener_class: merged_role_group.config.config.listener_class, logging, node_roles: merged_role_group.config.config.node_roles, @@ -254,6 +259,69 @@ fn validate_logging_configuration( }) } +fn validate_discovery_endpoint( + dereferenced_objects: &DereferencedObjects, +) -> Result> { + let validated_discovery_endpoint = if let Some(discovery_service_listener) = + &dereferenced_objects.maybe_discovery_service_listener + { + if let Some((hostname, port)) = extract_listener_ingresses(discovery_service_listener)? { + tracing::info!( + "The status of the discovery service listener {} contains the discovery endpoint. \ + The discovery ConfigMap will be created or updated.", + discovery_service_listener.name_any() + ); + Some(ValidatedDiscoveryEndpoint { hostname, port }) + } else { + tracing::info!( + "The status of the discovery service listener {} does not yet contain the \ + discovery endpoint. The creation of the discovery ConfigMap will be postponed \ + until the status is updated.", + discovery_service_listener.name_any() + ); + None + } + } else { + tracing::info!( + "The discovery service listener is not yet deployed. The creation of the discovery \ + ConfigMap will be postponed until the discovery service listener is deployed and its \ + status is set." + ); + None + }; + + Ok(validated_discovery_endpoint) +} + +fn extract_listener_ingresses( + discovery_service_listener: &listener::v1alpha1::Listener, +) -> Result> { + let maybe_first_ingress_address = discovery_service_listener + .status + .as_ref() + .and_then(|status| status.ingress_addresses.as_ref()) + .into_iter() + .flatten() + .next(); + + // It is okay if the status is not set yet. But if it is set, then it must be valid. + if let Some(ingress_address) = maybe_first_ingress_address { + let hostname = Hostname::from_str(&ingress_address.address) + .context(ParseListenerStatusHostnameSnafu)?; + + let raw_port = *ingress_address + .ports + // TODO Use HTTP_PORT_NAME somehow + .get("http") + .context(GetListenerStatusPortSnafu)?; + let port = Port::try_from(raw_port).context(ParseListenerStatusPortSnafu)?; + + Ok(Some((hostname, port))) + } else { + Ok(None) + } +} + #[cfg(test)] mod tests { use std::{collections::BTreeMap, str::FromStr}; @@ -263,6 +331,7 @@ mod tests { commons::{ affinity::StackableAffinity, cluster_operation::ClusterOperation, + networking::DomainName, product_image_selection::ResolvedProductImage, resources::{CpuLimits, MemoryLimits, PvcConfig, Resources}, }, @@ -280,7 +349,7 @@ mod tests { ContainerLogConfigChoiceFragment, ContainerLogConfigFragment, CustomContainerLogConfigFragment, LogLevel, LoggerConfig, LoggingFragment, }, - role_utils::{CommonConfiguration, GenericRoleConfig, Role, RoleGroup}, + role_utils::{CommonConfiguration, Role, RoleGroup}, shared::time::Duration, }; use uuid::uuid; @@ -288,7 +357,10 @@ mod tests { use super::{ErrorDiscriminants, validate}; use crate::{ built_info, - controller::{ContextNames, ValidatedCluster, ValidatedLogging, ValidatedOpenSearchConfig}, + controller::{ + ContextNames, DereferencedObjects, ValidatedCluster, ValidatedLogging, + ValidatedOpenSearchConfig, + }, crd::{NodeRoles, OpenSearchKeystoreKey, v1alpha1}, framework::{ builder::pod::container::{EnvVarName, EnvVarSet}, @@ -311,7 +383,11 @@ mod tests { #[test] fn test_validate_ok() { - let result = validate(&context_names(), &cluster()); + let dereferenced_objects = DereferencedObjects { + maybe_discovery_service_listener: None, + }; + + let result = validate(&context_names(), &cluster(), &dereferenced_objects); assert_eq!( Some(ValidatedCluster::new( @@ -333,7 +409,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch"), NamespaceName::from_str_unsafe("default"), uuid!("e6ac237d-a6d4-43a1-8135-f36506110912"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [( RoleGroupName::from_str_unsafe("default"), RoleGroupConfig { @@ -377,6 +453,7 @@ mod tests { }), ..StackableAffinity::default() }, + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe( "listener-class-from-role-group-level" ), @@ -526,7 +603,8 @@ mod tests { name: SecretName::from_str_unsafe("my-keystore-secret"), key: SecretKey::from_str_unsafe("my-keystore-file") } - }] + }], + None )), result.ok() ); @@ -540,14 +618,6 @@ mod tests { ); } - #[test] - fn test_validate_err_parse_cluster_name() { - test_validate_err( - |cluster| cluster.metadata.name = Some("invalid cluster name".to_owned()), - ErrorDiscriminants::ParseClusterName, - ); - } - #[test] fn test_validate_err_get_cluster_namespace() { test_validate_err( @@ -556,14 +626,6 @@ mod tests { ); } - #[test] - fn test_validate_err_parse_cluster_namespace() { - test_validate_err( - |cluster| cluster.metadata.namespace = Some("invalid cluster namespace".to_owned()), - ErrorDiscriminants::ParseClusterNamespace, - ); - } - #[test] fn test_validate_err_get_cluster_uid() { test_validate_err( @@ -572,14 +634,6 @@ mod tests { ); } - #[test] - fn test_validate_err_parse_cluster_uid() { - test_validate_err( - |cluster| cluster.metadata.uid = Some("invalid cluster UID".to_owned()), - ErrorDiscriminants::ParseClusterUid, - ); - } - #[test] fn test_validate_err_resolve_product_image() { test_validate_err( @@ -679,7 +733,11 @@ mod tests { let mut cluster = cluster(); f(&mut cluster); - let result = validate(&context_names(), &cluster); + let dereferenced_objects = DereferencedObjects { + maybe_discovery_service_listener: None, + }; + + let result = validate(&context_names(), &cluster, &dereferenced_objects); assert_eq!(Err(expected_err), result.map_err(ErrorDiscriminants::from)); } @@ -689,6 +747,8 @@ mod tests { product_name: ProductName::from_str_unsafe("opensearch"), operator_name: OperatorName::from_str_unsafe("opensearch.stackable.tech"), controller_name: ControllerName::from_str_unsafe("opensearchcluster"), + cluster_domain_name: DomainName::from_str("cluster.local") + .expect("should be a valid domain name"), } } @@ -766,7 +826,7 @@ mod tests { product_specific_common_config: GenericProductSpecificCommonConfig::default( ), }, - role_config: GenericRoleConfig::default(), + role_config: v1alpha1::OpenSearchRoleConfig::default(), role_groups: [( "default".to_owned(), RoleGroup { diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index 826d92c..36de1f8 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -42,7 +42,8 @@ use crate::{ }, }; -constant!(DEFAULT_LISTENER_CLASS: ListenerClassName = "cluster-internal"); +constant!(DEFAULT_ROLE_GROUP_LISTENER_CLASS: ListenerClassName = "cluster-internal"); +constant!(DEFAULT_DISCOVERY_SERVICE_LISTENER_CLASS: ListenerClassName = "cluster-internal"); constant!(TLS_DEFAULT_SECRET_CLASS: SecretClassName = "tls"); #[versioned( @@ -86,8 +87,11 @@ pub mod versioned { pub object_overrides: ObjectOverrides, // no doc - docs in Role struct - pub nodes: - Role, + pub nodes: Role< + OpenSearchConfigFragment, + OpenSearchRoleConfig, + GenericProductSpecificCommonConfig, + >, } #[derive(Clone, Debug, Default, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] @@ -207,6 +211,9 @@ pub mod versioned { #[fragment_attrs(serde(default))] pub affinity: StackableAffinity, + /// Determines whether this role group is exposed in the discovery service. + pub discovery_service_exposed: bool, + /// Time period Pods have to gracefully shut down, e.g. `30m`, `1h` or `2d`. Consult the /// operator documentation for details. #[fragment_attrs(serde(default))] @@ -282,6 +289,17 @@ pub mod versioned { pub data: PvcConfig, } + #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] + #[serde(rename_all = "camelCase")] + pub struct OpenSearchRoleConfig { + #[serde(flatten)] + pub common: GenericRoleConfig, + + /// The [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) that is used for the discovery service. + #[serde(default = "discovery_service_listener_class_default")] + pub discovery_service_listener_class: ListenerClassName, + } + #[derive(Clone, Default, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct OpenSearchClusterStatus { @@ -325,12 +343,13 @@ impl v1alpha1::OpenSearchConfig { node_affinity: None, node_selector: None, }, + discovery_service_exposed: Some(true), // Default taken from the Helm chart, see // https://github.com/opensearch-project/helm-charts/blob/opensearch-3.0.0/charts/opensearch/values.yaml#L364 graceful_shutdown_timeout: Some( Duration::from_str("2m").expect("should be a valid duration"), ), - listener_class: Some(DEFAULT_LISTENER_CLASS.to_owned()), + listener_class: Some(DEFAULT_ROLE_GROUP_LISTENER_CLASS.to_owned()), logging: product_logging::spec::default_logging(), // Defaults taken from the Helm chart, see // https://github.com/opensearch-project/helm-charts/blob/opensearch-3.0.0/charts/opensearch/values.yaml#L16-L20 @@ -389,6 +408,19 @@ fn internal_secret_class_default() -> SecretClassName { TLS_DEFAULT_SECRET_CLASS.to_owned() } +impl Default for v1alpha1::OpenSearchRoleConfig { + fn default() -> Self { + v1alpha1::OpenSearchRoleConfig { + common: GenericRoleConfig::default(), + discovery_service_listener_class: discovery_service_listener_class_default(), + } + } +} + +fn discovery_service_listener_class_default() -> ListenerClassName { + DEFAULT_DISCOVERY_SERVICE_LISTENER_CLASS.to_owned() +} + #[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct NodeRoles(pub Vec); diff --git a/rust/operator-binary/src/framework.rs b/rust/operator-binary/src/framework.rs index 2e42c3c..e6f58e9 100644 --- a/rust/operator-binary/src/framework.rs +++ b/rust/operator-binary/src/framework.rs @@ -24,6 +24,7 @@ use types::kubernetes::Uid; pub mod builder; pub mod cluster_resources; +pub mod controller_utils; pub mod kvp; pub mod macros; pub mod product_logging; diff --git a/rust/operator-binary/src/framework/controller_utils.rs b/rust/operator-binary/src/framework/controller_utils.rs new file mode 100644 index 0000000..4f34020 --- /dev/null +++ b/rust/operator-binary/src/framework/controller_utils.rs @@ -0,0 +1,211 @@ +//! Helper functions which are not tied to a specific controller step + +use std::str::FromStr; + +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::kube::runtime::reflector::Lookup; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::framework::types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, +}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to get the cluster name"))] + GetClusterName {}, + + #[snafu(display("failed to get the namespace"))] + GetNamespace {}, + + #[snafu(display("failed to get the UID"))] + GetUid {}, + + #[snafu(display("failed to set the cluster name"))] + ParseClusterName { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to set the namespace"))] + ParseNamespace { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to set the UID"))] + ParseUid { + source: crate::framework::macros::attributed_string_type::Error, + }, +} + +type Result = std::result::Result; + +/// Get the cluster name from the given resource +pub fn get_cluster_name(cluster: &impl Lookup) -> Result { + let raw_cluster_name = cluster.name().context(GetClusterNameSnafu)?; + let cluster_name = ClusterName::from_str(&raw_cluster_name).context(ParseClusterNameSnafu)?; + + Ok(cluster_name) +} + +/// Get the namespace from the given resource +pub fn get_namespace(resource: &impl Lookup) -> Result { + let raw_namespace = resource.namespace().context(GetNamespaceSnafu)?; + let namespace = NamespaceName::from_str(&raw_namespace).context(ParseNamespaceSnafu)?; + + Ok(namespace) +} + +/// Get the UID from the given resource +pub fn get_uid(resource: &impl Lookup) -> Result { + let raw_uid = resource.uid().context(GetUidSnafu)?; + let uid = Uid::from_str(&raw_uid).context(ParseUidSnafu)?; + + Ok(uid) +} + +#[cfg(test)] +mod tests { + use stackable_operator::kube::runtime::reflector::Lookup; + use uuid::uuid; + + use super::{ErrorDiscriminants, get_cluster_name, get_namespace, get_uid}; + use crate::framework::types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, + }; + + #[derive(Debug, Default)] + struct TestResource { + name: Option<&'static str>, + namespace: Option<&'static str>, + uid: Option<&'static str>, + } + + impl Lookup for TestResource { + type DynamicType = (); + + fn kind(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "".into() + } + + fn group(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + todo!() + } + + fn version(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + todo!() + } + + fn plural(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + todo!() + } + + fn name(&self) -> Option> { + self.name.map(std::borrow::Cow::Borrowed) + } + + fn namespace(&self) -> Option> { + self.namespace.map(std::borrow::Cow::Borrowed) + } + + fn resource_version(&self) -> Option> { + todo!() + } + + fn uid(&self) -> Option> { + self.uid.map(std::borrow::Cow::Borrowed) + } + } + + #[test] + fn test_get_cluster_name() { + assert_eq!( + ClusterName::from_str_unsafe("test-cluster"), + get_cluster_name(&TestResource { + name: Some("test-cluster"), + ..TestResource::default() + }) + .expect("should contain a valid cluster name") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetClusterName), + get_cluster_name(&TestResource { + name: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseClusterName), + get_cluster_name(&TestResource { + name: Some("invalid cluster name"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } + + #[test] + fn test_get_namespace() { + assert_eq!( + NamespaceName::from_str_unsafe("test-namespace"), + get_namespace(&TestResource { + namespace: Some("test-namespace"), + ..TestResource::default() + }) + .expect("should contain a valid namespace") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetNamespace), + get_namespace(&TestResource { + namespace: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseNamespace), + get_namespace(&TestResource { + namespace: Some("invalid namespace"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } + + #[test] + fn test_get_uid() { + assert_eq!( + Uid::from(uuid!("e6ac237d-a6d4-43a1-8135-f36506110912")), + get_uid(&TestResource { + uid: Some("e6ac237d-a6d4-43a1-8135-f36506110912"), + ..TestResource::default() + }) + .expect("should contain a valid UID") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetUid), + get_uid(&TestResource { + uid: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseUid), + get_uid(&TestResource { + uid: Some("invalid UID"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } +} diff --git a/rust/operator-binary/src/framework/macros/attributed_string_type.rs b/rust/operator-binary/src/framework/macros/attributed_string_type.rs index f961712..6f72c01 100644 --- a/rust/operator-binary/src/framework/macros/attributed_string_type.rs +++ b/rust/operator-binary/src/framework/macros/attributed_string_type.rs @@ -3,7 +3,7 @@ use strum::{EnumDiscriminants, IntoStaticStr}; /// Maximum length of label values /// -/// Duplicates the private constant [`stackable-operator::kvp::label::value::LABEL_VALUE_MAX_LEN`] +/// Duplicates the private constant [`stackable_operator::kvp::LABEL_VALUE_MAX_LEN`] pub const MAX_LABEL_VALUE_LENGTH: usize = 63; #[derive(Debug, EnumDiscriminants, Snafu)] diff --git a/rust/operator-binary/src/framework/role_utils.rs b/rust/operator-binary/src/framework/role_utils.rs index d6facda..dd5b464 100644 --- a/rust/operator-binary/src/framework/role_utils.rs +++ b/rust/operator-binary/src/framework/role_utils.rs @@ -17,7 +17,7 @@ use stackable_operator::{ use super::{ builder::pod::container::EnvVarSet, types::{ - kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName, ServiceName}, + kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName}, operator::{ClusterName, ProductName}, }, }; @@ -220,18 +220,6 @@ impl ResourceNames { ClusterRoleName::from_str(&format!("{}{SUFFIX}", self.product_name)) .expect("should be a valid cluster role name") } - - pub fn discovery_service_name(&self) -> ServiceName { - // compile-time checks - const _: () = assert!( - ClusterName::MAX_LENGTH <= ServiceName::MAX_LENGTH, - "The string `` must not exceed the limit of Service names." - ); - let _ = ClusterName::IS_RFC_1035_LABEL_NAME; - let _ = ClusterName::IS_VALID_LABEL_VALUE; - - ServiceName::from_str(self.cluster_name.as_ref()).expect("should be a valid Service name") - } } #[cfg(test)] @@ -252,7 +240,7 @@ mod tests { use crate::framework::{ role_utils::with_validated_config, types::{ - kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName, ServiceName}, + kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName}, operator::{ClusterName, ProductName}, }, }; @@ -410,9 +398,5 @@ mod tests { ClusterRoleName::from_str_unsafe("my-product-clusterrole"), resource_names.cluster_role_name() ); - assert_eq!( - ServiceName::from_str_unsafe("my-cluster"), - resource_names.discovery_service_name() - ); } } diff --git a/rust/operator-binary/src/framework/types.rs b/rust/operator-binary/src/framework/types.rs index fb75cd3..65f6116 100644 --- a/rust/operator-binary/src/framework/types.rs +++ b/rust/operator-binary/src/framework/types.rs @@ -1,2 +1,3 @@ +pub mod common; pub mod kubernetes; pub mod operator; diff --git a/rust/operator-binary/src/framework/types/common.rs b/rust/operator-binary/src/framework/types/common.rs new file mode 100644 index 0000000..ef9783b --- /dev/null +++ b/rust/operator-binary/src/framework/types/common.rs @@ -0,0 +1,34 @@ +use snafu::{ResultExt, Snafu}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to convert to port number"))] + ConvertToPortNumber { source: std::num::TryFromIntError }, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Port(pub u16); + +impl std::fmt::Display for Port { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl TryFrom for Port { + type Error = Error; + + fn try_from(value: i32) -> Result { + Ok(Port( + u16::try_from(value).context(ConvertToPortNumberSnafu)?, + )) + } +} + +impl From for i32 { + fn from(value: Port) -> Self { + value.0 as i32 + } +} diff --git a/rust/operator-binary/src/framework/types/kubernetes.rs b/rust/operator-binary/src/framework/types/kubernetes.rs index f7b5282..07aaa33 100644 --- a/rust/operator-binary/src/framework/types/kubernetes.rs +++ b/rust/operator-binary/src/framework/types/kubernetes.rs @@ -40,6 +40,16 @@ attributed_string_type! { is_rfc_1123_dns_subdomain_name } +attributed_string_type! { + Hostname, + "A hostname", + "example.com", + (min_length = 1), + (max_length = 253), + // see https://en.wikipedia.org/wiki/Hostname#Syntax + (regex = "[a-zA-Z0-9]([-a-zA-Z0-9]{0,60}[a-zA-Z0-9])?(\\.[a-zA-Z0-9]([-a-zA-Z0-9]{0,60}[a-zA-Z0-9])?)*\\.?") +} + attributed_string_type! { ListenerName, "The name of a Listener", @@ -152,7 +162,7 @@ attributed_string_type! { #[cfg(test)] mod tests { use super::{ - ClusterRoleName, ConfigMapKey, ConfigMapName, ContainerName, ListenerClassName, + ClusterRoleName, ConfigMapKey, ConfigMapName, ContainerName, Hostname, ListenerClassName, ListenerName, NamespaceName, PersistentVolumeClaimName, RoleBindingName, SecretClassName, SecretKey, SecretName, ServiceAccountName, ServiceName, StatefulSetName, Uid, VolumeName, }; @@ -163,6 +173,7 @@ mod tests { ConfigMapKey::test_example(); ContainerName::test_example(); ClusterRoleName::test_example(); + Hostname::test_example(); ListenerName::test_example(); ListenerClassName::test_example(); NamespaceName::test_example(); diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index e7df8f2..9a9b70e 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -8,8 +8,14 @@ use snafu::{ResultExt as _, Snafu}; use stackable_operator::{ YamlSchema as _, cli::{Command, RunArguments}, + crd::listener, eos::EndOfSupportChecker, - k8s_openapi::api::{apps::v1::StatefulSet, core::v1::Service}, + k8s_openapi::api::{ + apps::v1::StatefulSet, + core::v1::{ConfigMap, Service, ServiceAccount}, + policy::v1::PodDisruptionBudget, + rbac::v1::RoleBinding, + }, kube::{ core::DeserializeGuard, runtime::{ @@ -133,40 +139,60 @@ async fn main() -> Result<()> { let controller = Controller::new( watch_namespace.get_api::>(&client), watcher::Config::default(), - ); - let controller = controller - .owns( - watch_namespace.get_api::(&client), - watcher::Config::default(), - ) - .owns( - watch_namespace.get_api::(&client), - watcher::Config::default(), - ) - .shutdown_on_signal() - .run( - controller::reconcile, - controller::error_policy, - Arc::new(controller_context), - ) - .for_each_concurrent( - 16, // concurrency limit - |result| { - // The event_recorder needs to be shared across all invocations, so that - // events are correctly aggregated - let event_recorder = event_recorder.clone(); - let full_controller_name = full_controller_name.clone(); - async move { - report_controller_reconciled( - &event_recorder, - &full_controller_name, - &result, - ) - .await; - } - }, - ) - .map(Ok); + ) + .owns( + // TODO Test if other ConfigMaps start a reconciliation + watch_namespace.get_api::>(&client), + watcher::Config::default(), + ) + .owns( + watch_namespace.get_api::>(&client), + watcher::Config::default(), + ) + .owns( + watch_namespace.get_api::>(&client), + watcher::Config::default(), + ) + .owns( + watch_namespace.get_api::>(&client), + watcher::Config::default(), + ) + .owns( + watch_namespace.get_api::>(&client), + watcher::Config::default(), + ) + .owns( + watch_namespace.get_api::>(&client), + watcher::Config::default(), + ) + .owns( + watch_namespace.get_api::>(&client), + watcher::Config::default(), + ) + .shutdown_on_signal() + .run( + controller::reconcile, + controller::error_policy, + Arc::new(controller_context), + ) + .for_each_concurrent( + 16, // concurrency limit + |result| { + // The event_recorder needs to be shared across all invocations, so that + // events are correctly aggregated + let event_recorder = event_recorder.clone(); + let full_controller_name = full_controller_name.clone(); + async move { + report_controller_reconciled( + &event_recorder, + &full_controller_name, + &result, + ) + .await; + } + }, + ) + .map(Ok); futures::try_join!(controller, eos_checker)?; } diff --git a/tests/templates/kuttl/backup-restore/11_minio-values.yaml b/tests/templates/kuttl/backup-restore/11_minio-values.yaml.j2 similarity index 95% rename from tests/templates/kuttl/backup-restore/11_minio-values.yaml rename to tests/templates/kuttl/backup-restore/11_minio-values.yaml.j2 index 1f361bc..4b2ada0 100644 --- a/tests/templates/kuttl/backup-restore/11_minio-values.yaml +++ b/tests/templates/kuttl/backup-restore/11_minio-values.yaml.j2 @@ -16,7 +16,7 @@ console: repository: bitnamilegacy/minio-object-browser tls: - enabled: true + enabled: {{ test_scenario['values']['s3-use-tls'] }} persistence: enabled: false # "false" means, that an emptyDir is used instead of a persistentVolumeClaim diff --git a/tests/templates/kuttl/backup-restore/21-install-opensearch-1.yaml.j2 b/tests/templates/kuttl/backup-restore/21-install-opensearch-1.yaml.j2 index 52b8e4b..f72b709 100644 --- a/tests/templates/kuttl/backup-restore/21-install-opensearch-1.yaml.j2 +++ b/tests/templates/kuttl/backup-restore/21-install-opensearch-1.yaml.j2 @@ -45,13 +45,19 @@ spec: plugins.security.authcz.admin_dn: CN=opensearch-1-admin-certificate plugins.security.restapi.roles_enabled: all_access plugins.security.ssl.http.pemtrustedcas_filepath: /stackable/opensearch/config/tls/concatenated/ca.crt +{% if test_scenario['values']['s3-use-tls'] == 'true' %} s3.client.default.endpoint: https://minio:9000/ s3.client.default.protocol: https +{% else %} + s3.client.default.endpoint: http://minio:9000/ + s3.client.default.protocol: http +{% endif %} s3.client.default.region: unused # but required s3.client.default.path_style_access: "true" podOverrides: spec: initContainers: +{% if test_scenario['values']['s3-use-tls'] == 'true' %} - name: init-system-keystore {% if test_scenario['values']['opensearch'].find(",") > 0 %} image: "{{ test_scenario['values']['opensearch'].split(',')[1] }}" @@ -72,6 +78,7 @@ spec: mountPath: /etc/pki/ca-trust/source/anchors/s3-ca.crt subPath: tls.crt readOnly: true +{% endif %} - name: init-tls {% if test_scenario['values']['opensearch'].find(",") > 0 %} image: "{{ test_scenario['values']['opensearch'].split(',')[1] }}" @@ -106,18 +113,16 @@ spec: - name: security-config mountPath: /stackable/opensearch/config/opensearch-security readOnly: true +{% if test_scenario['values']['s3-use-tls'] == 'true' %} - name: system-trust-store mountPath: /etc/pki/java/cacerts subPath: java/cacerts readOnly: true +{% endif %} - name: tls-concatenated mountPath: /stackable/opensearch/config/tls/concatenated readOnly: true volumes: - - name: s3-ca-crt - secret: - secretName: minio-ca-crt - defaultMode: 0o660 - name: admin-certificate secret: secretName: opensearch-1-admin-certificate @@ -126,9 +131,15 @@ spec: secret: secretName: opensearch-1-security-config defaultMode: 0o660 +{% if test_scenario['values']['s3-use-tls'] == 'true' %} + - name: s3-ca-crt + secret: + secretName: minio-ca-crt + defaultMode: 0o660 - name: system-trust-store emptyDir: sizeLimit: 10Mi +{% endif %} - name: tls-concatenated emptyDir: sizeLimit: 1Mi diff --git a/tests/templates/kuttl/backup-restore/22-create-testuser.yaml b/tests/templates/kuttl/backup-restore/22-create-testuser.yaml index 46c3181..af23982 100644 --- a/tests/templates/kuttl/backup-restore/22-create-testuser.yaml +++ b/tests/templates/kuttl/backup-restore/22-create-testuser.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-1 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,17 +65,16 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOST'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' # Login as admin client = OpenSearch( + hosts = [{'host': host, 'port': port}], http_auth=('admin', 'AJVFsGJBbpT6mChn'), - hosts=[{ - 'host': f'opensearch-1.{namespace}.svc.cluster.local', - 'port': 9200 - }], http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) diff --git a/tests/templates/kuttl/backup-restore/23-create-data.yaml b/tests/templates/kuttl/backup-restore/23-create-data.yaml index bcde316..8f103c0 100644 --- a/tests/templates/kuttl/backup-restore/23-create-data.yaml +++ b/tests/templates/kuttl/backup-restore/23-create-data.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-1 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,17 +65,16 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOST'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' # Login as test user client = OpenSearch( + hosts = [{'host': host, 'port': port}], http_auth=('testuser', 'L9hUHtLVVEsrcLzZ'), - hosts=[{ - 'host': f'opensearch-1.{namespace}.svc.cluster.local', - 'port': 9200 - }], http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) diff --git a/tests/templates/kuttl/backup-restore/30-create-snapshot.yaml b/tests/templates/kuttl/backup-restore/30-create-snapshot.yaml index 35b1c71..914cf38 100644 --- a/tests/templates/kuttl/backup-restore/30-create-snapshot.yaml +++ b/tests/templates/kuttl/backup-restore/30-create-snapshot.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-1 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,16 +65,15 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOST'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' client = OpenSearch( - hosts=[{ - 'host': f'opensearch-1.{namespace}.svc.cluster.local', - 'port': 9200 - }], + hosts = [{'host': host, 'port': port}], http_auth=('admin', 'AJVFsGJBbpT6mChn'), http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) @@ -88,7 +86,10 @@ data: body={ "type": "s3", "settings": { - "bucket": "opensearch-data" + "bucket": "opensearch-data", + # The S3CrtClient that was introduced in OpenSearch 3.3.0, does not + # work with a TLS-secured MinIO. Use the old Netty client instead. + "s3_async_client_type": "netty" } } ) diff --git a/tests/templates/kuttl/backup-restore/31-backup-security-indices.yaml.j2 b/tests/templates/kuttl/backup-restore/31-backup-security-indices.yaml.j2 index f46c65b..5519c80 100644 --- a/tests/templates/kuttl/backup-restore/31-backup-security-indices.yaml.j2 +++ b/tests/templates/kuttl/backup-restore/31-backup-security-indices.yaml.j2 @@ -8,14 +8,16 @@ spec: spec: initContainers: - name: backup-security-indices +{% if test_scenario['values']['opensearch'].find(",") > 0 %} + image: "{{ test_scenario['values']['opensearch'].split(',')[1] }}" +{% else %} image: oci.stackable.tech/sdp/opensearch:{{ test_scenario['values']['opensearch'].split(',')[0] }}-stackable{{ test_scenario['values']['release'] }} +{% endif %} command: - /stackable/scripts/backup-security-indices.sh - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-1 volumeMounts: - name: scripts mountPath: /stackable/scripts @@ -62,8 +64,13 @@ spec: volumeMounts: - name: scripts mountPath: /root/scripts +{% if test_scenario['values']['s3-use-tls'] == 'true' %} - name: s3-ca-crt mountPath: /root/.mc/certs/CAs +{% else %} + - name: mc-config + mountPath: /root/.mc +{% endif %} - name: backup mountPath: /tmp/backup securityContext: @@ -92,10 +99,16 @@ spec: secret: secretName: opensearch-1-admin-certificate defaultMode: 0o660 +{% if test_scenario['values']['s3-use-tls'] == 'true' %} - name: s3-ca-crt secret: secretName: minio-ca-crt defaultMode: 0o660 +{% else %} + - name: mc-config + emptyDir: + sizeLimit: 1Mi +{% endif %} - name: backup emptyDir: sizeLimit: 1Mi @@ -116,11 +129,16 @@ data: -cacert config/tls/ca.crt \ -cert config/tls-client/tls.crt \ -key config/tls-client/tls.key \ - --hostname opensearch-1.$NAMESPACE.svc.cluster.local \ + --hostname $OPENSEARCH_HOST \ -backup /tmp/backup upload-security-indices-backup.sh: | #!/usr/bin/env sh - mc alias set minio https://minio:9000/ $S3_ACCESS_KEY $S3_SECRET_KEY +{% if test_scenario['values']['s3-use-tls'] == 'true' %} + S3_PROTOCOL=https +{% else %} + S3_PROTOCOL=http +{% endif %} + mc alias set minio $S3_PROTOCOL://minio:9000/ $S3_ACCESS_KEY $S3_SECRET_KEY mc mb minio/opensearch-security mc cp /tmp/backup/* minio/opensearch-security/ diff --git a/tests/templates/kuttl/backup-restore/51-install-opensearch-2.yaml.j2 b/tests/templates/kuttl/backup-restore/51-install-opensearch-2.yaml.j2 index 76dec7d..c861b32 100644 --- a/tests/templates/kuttl/backup-restore/51-install-opensearch-2.yaml.j2 +++ b/tests/templates/kuttl/backup-restore/51-install-opensearch-2.yaml.j2 @@ -45,13 +45,19 @@ spec: plugins.security.authcz.admin_dn: CN=opensearch-2-admin-certificate plugins.security.restapi.roles_enabled: all_access plugins.security.ssl.http.pemtrustedcas_filepath: /stackable/opensearch/config/tls/concatenated/ca.crt +{% if test_scenario['values']['s3-use-tls'] == 'true' %} s3.client.default.endpoint: https://minio:9000/ s3.client.default.protocol: https +{% else %} + s3.client.default.endpoint: http://minio:9000/ + s3.client.default.protocol: http +{% endif %} s3.client.default.region: unused # but required s3.client.default.path_style_access: "true" podOverrides: spec: initContainers: +{% if test_scenario['values']['s3-use-tls'] == 'true' %} - name: init-system-keystore {% if test_scenario['values']['opensearch'].find(",") > 0 %} image: "{{ test_scenario['values']['opensearch'].split(',')[1] }}" @@ -72,6 +78,7 @@ spec: mountPath: /etc/pki/ca-trust/source/anchors/s3-ca.crt subPath: tls.crt readOnly: true +{% endif %} - name: init-tls {% if test_scenario['values']['opensearch'].find(",") > 0 %} image: "{{ test_scenario['values']['opensearch'].split(',')[1] }}" @@ -106,18 +113,16 @@ spec: - name: security-config mountPath: /stackable/opensearch/config/opensearch-security readOnly: true +{% if test_scenario['values']['s3-use-tls'] == 'true' %} - name: system-trust-store mountPath: /etc/pki/java/cacerts subPath: java/cacerts readOnly: true +{% endif %} - name: tls-concatenated mountPath: /stackable/opensearch/config/tls/concatenated readOnly: true volumes: - - name: s3-ca-crt - secret: - secretName: minio-ca-crt - defaultMode: 0o660 - name: admin-certificate secret: secretName: opensearch-2-admin-certificate @@ -126,9 +131,15 @@ spec: secret: secretName: opensearch-2-security-config defaultMode: 0o660 +{% if test_scenario['values']['s3-use-tls'] == 'true' %} + - name: s3-ca-crt + secret: + secretName: minio-ca-crt + defaultMode: 0o660 - name: system-trust-store emptyDir: sizeLimit: 10Mi +{% endif %} - name: tls-concatenated emptyDir: sizeLimit: 1Mi diff --git a/tests/templates/kuttl/backup-restore/60-restore-security-indices.yaml.j2 b/tests/templates/kuttl/backup-restore/60-restore-security-indices.yaml.j2 index e3100bb..fd74933 100644 --- a/tests/templates/kuttl/backup-restore/60-restore-security-indices.yaml.j2 +++ b/tests/templates/kuttl/backup-restore/60-restore-security-indices.yaml.j2 @@ -25,8 +25,13 @@ spec: volumeMounts: - name: scripts mountPath: /root/scripts +{% if test_scenario['values']['s3-use-tls'] == 'true' %} - name: s3-ca-crt mountPath: /root/.mc/certs/CAs +{% else %} + - name: mc-config + mountPath: /root/.mc +{% endif %} - name: backup mountPath: /tmp/backup securityContext: @@ -45,14 +50,16 @@ spec: cpu: 400m containers: - name: restore-security-indices +{% if test_scenario['values']['opensearch'].find(",") > 0 %} + image: "{{ test_scenario['values']['opensearch'].split(',')[1] }}" +{% else %} image: oci.stackable.tech/sdp/opensearch:{{ test_scenario['values']['opensearch'].split(',')[0] }}-stackable{{ test_scenario['values']['release'] }} +{% endif %} command: - /stackable/scripts/restore-security-indices.sh - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-2 volumeMounts: - name: scripts mountPath: /stackable/scripts @@ -92,10 +99,16 @@ spec: secret: secretName: opensearch-2-admin-certificate defaultMode: 0o660 +{% if test_scenario['values']['s3-use-tls'] == 'true' %} - name: s3-ca-crt secret: secretName: minio-ca-crt defaultMode: 0o660 +{% else %} + - name: mc-config + emptyDir: + sizeLimit: 1Mi +{% endif %} - name: backup emptyDir: sizeLimit: 1Mi @@ -112,7 +125,12 @@ data: download-security-indices-backup.sh: | #!/usr/bin/env sh - mc alias set minio https://minio:9000/ $S3_ACCESS_KEY $S3_SECRET_KEY +{% if test_scenario['values']['s3-use-tls'] == 'true' %} + S3_PROTOCOL=https +{% else %} + S3_PROTOCOL=http +{% endif %} + mc alias set minio $S3_PROTOCOL://minio:9000/ $S3_ACCESS_KEY $S3_SECRET_KEY mc cp --recursive minio/opensearch-security/ /tmp/backup restore-security-indices.sh: | #!/usr/bin/env sh @@ -121,5 +139,5 @@ data: -cacert config/tls/ca.crt \ -cert config/tls-client/tls.crt \ -key config/tls-client/tls.key \ - --hostname opensearch-2.$NAMESPACE.svc.cluster.local \ + --hostname $OPENSEARCH_HOST \ --configdir /tmp/backup diff --git a/tests/templates/kuttl/backup-restore/61-restore-snapshot.yaml b/tests/templates/kuttl/backup-restore/61-restore-snapshot.yaml index 4cf94b5..3755ff7 100644 --- a/tests/templates/kuttl/backup-restore/61-restore-snapshot.yaml +++ b/tests/templates/kuttl/backup-restore/61-restore-snapshot.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-2 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,16 +65,15 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOST'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' client = OpenSearch( - hosts=[{ - 'host': f'opensearch-2.{namespace}.svc.cluster.local', - 'port': 9200 - }], + hosts = [{'host': host, 'port': port}], http_auth=('admin', 'AJVFsGJBbpT6mChn'), http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) @@ -88,7 +86,10 @@ data: body={ "type": "s3", "settings": { - "bucket": "opensearch-data" + "bucket": "opensearch-data", + # The S3CrtClient that was introduced in OpenSearch 3.3.0, does not + # work with a TLS-secured MinIO. Use the old Netty client instead. + "s3_async_client_type": "netty" } } ) diff --git a/tests/templates/kuttl/backup-restore/70-test-opensearch-2.yaml b/tests/templates/kuttl/backup-restore/70-test-opensearch-2.yaml index b5ec26f..584500a 100644 --- a/tests/templates/kuttl/backup-restore/70-test-opensearch-2.yaml +++ b/tests/templates/kuttl/backup-restore/70-test-opensearch-2.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-2 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,17 +65,16 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOST'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' # Login as test user client = OpenSearch( + hosts = [{'host': host, 'port': port}], http_auth=('testuser', 'L9hUHtLVVEsrcLzZ'), - hosts=[{ - 'host': f'opensearch-2.{namespace}.svc.cluster.local', - 'port': 9200 - }], http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) diff --git a/tests/templates/kuttl/external-access/20-assert.yaml b/tests/templates/kuttl/external-access/20-assert.yaml index 6e7c514..1d8b6db 100644 --- a/tests/templates/kuttl/external-access/20-assert.yaml +++ b/tests/templates/kuttl/external-access/20-assert.yaml @@ -59,4 +59,4 @@ kind: Service metadata: name: opensearch spec: - type: ClusterIP + type: NodePort # external-stable diff --git a/tests/templates/kuttl/external-access/opensearch.yaml.j2 b/tests/templates/kuttl/external-access/opensearch.yaml.j2 index 514fef6..71eca2f 100644 --- a/tests/templates/kuttl/external-access/opensearch.yaml.j2 +++ b/tests/templates/kuttl/external-access/opensearch.yaml.j2 @@ -18,6 +18,8 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleConfig: + discoveryServiceListenerClass: test-external-stable-$NAMESPACE roleGroups: cluster-manager: config: diff --git a/tests/templates/kuttl/ldap/30-test-opensearch.yaml b/tests/templates/kuttl/ldap/30-test-opensearch.yaml index 1c2b31b..9b692f9 100644 --- a/tests/templates/kuttl/ldap/30-test-opensearch.yaml +++ b/tests/templates/kuttl/ldap/30-test-opensearch.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch volumeMounts: - name: script mountPath: /stackable/scripts @@ -75,20 +74,17 @@ data: import os from opensearchpy import OpenSearch - namespace = os.environ['NAMESPACE'] - - host = f'opensearch.{namespace}.svc.cluster.local' - port = 9200 - auth = ('integrationtest', 'integrationtest') - ca_certs_path = '/stackable/tls/ca.crt' + host = os.environ['OPENSEARCH_HOST'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_compress = True, - http_auth = auth, - use_ssl = True, - verify_certs = True, - ca_certs = ca_certs_path + hosts = [{'host': host, 'port': port}], + http_auth=('integrationtest', 'integrationtest'), + http_compress=True, + use_ssl=http_use_tls, + verify_certs=True, + ca_certs='/stackable/tls/ca.crt' ) # Create an index diff --git a/tests/templates/kuttl/logging/20-assert.yaml.j2 b/tests/templates/kuttl/logging/20-assert.yaml.j2 index e705ea3..76e2a2f 100644 --- a/tests/templates/kuttl/logging/20-assert.yaml.j2 +++ b/tests/templates/kuttl/logging/20-assert.yaml.j2 @@ -10,3 +10,11 @@ metadata: status: readyReplicas: 1 replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: opensearch-nodes-custom +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/opensearch-dashboards/20-install-opensearch-dashboards.yaml.j2 b/tests/templates/kuttl/opensearch-dashboards/20-install-opensearch-dashboards.yaml.j2 index fbd2371..0a4cdc2 100644 --- a/tests/templates/kuttl/opensearch-dashboards/20-install-opensearch-dashboards.yaml.j2 +++ b/tests/templates/kuttl/opensearch-dashboards/20-install-opensearch-dashboards.yaml.j2 @@ -2,16 +2,18 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - script: > - helm install opensearch-dashboards opensearch-dashboards - --repo https://opensearch-project.github.io/helm-charts - --version "{{ test_scenario['values']['opensearch'].split(',')[0] }}" - --values 20_opensearch-dashboards-values.yaml -{% if test_scenario['values']['server-use-tls'] == 'true' %} - --set opensearchHosts=https://opensearch.$NAMESPACE.svc.cluster.local:9200 -{% else %} - --set opensearchHosts=http://opensearch.$NAMESPACE.svc.cluster.local:9200 -{% endif %} - --namespace $NAMESPACE - --wait + - script: | +{% raw %} + OPENSEARCH_URL=$(kubectl get configmaps opensearch \ + --namespace $NAMESPACE \ + --output=template='{{.data.OPENSEARCH_PROTOCOL}}://{{.data.OPENSEARCH_HOST}}:{{.data.OPENSEARCH_PORT}}') +{% endraw %} + + helm install opensearch-dashboards opensearch-dashboards \ + --repo https://opensearch-project.github.io/helm-charts \ + --version "{{ test_scenario['values']['opensearch'].split(',')[0] }}" \ + --values 20_opensearch-dashboards-values.yaml \ + --set opensearchHosts=$OPENSEARCH_URL \ + --namespace $NAMESPACE \ + --wait timeout: 600 diff --git a/tests/templates/kuttl/opensearch-dashboards/20_opensearch-dashboards-values.yaml.j2 b/tests/templates/kuttl/opensearch-dashboards/20_opensearch-dashboards-values.yaml.j2 index 30b5b56..eac6232 100644 --- a/tests/templates/kuttl/opensearch-dashboards/20_opensearch-dashboards-values.yaml.j2 +++ b/tests/templates/kuttl/opensearch-dashboards/20_opensearch-dashboards-values.yaml.j2 @@ -1,6 +1,7 @@ image: - repository: oci.stackable.tech/sdp/opensearch-dashboards - tag: {{ test_scenario['values']['opensearch'].split(',')[0] }}-stackable{{ test_scenario['values']['release'] }} + # TODO Revert when the image is released + repository: oci.stackable.tech/sandbox/sdp26.3/opensearch-dashboards + tag: {{ test_scenario['values']['opensearch'].split(',')[0] }}-stackable{{ test_scenario['values']['release'] }}-amd64 service: type: NodePort serviceAccount: diff --git a/tests/templates/kuttl/smoke/10-assert.yaml.j2 b/tests/templates/kuttl/smoke/10-assert.yaml.j2 index 667d31a..2896875 100644 --- a/tests/templates/kuttl/smoke/10-assert.yaml.j2 +++ b/tests/templates/kuttl/smoke/10-assert.yaml.j2 @@ -112,10 +112,19 @@ spec: value: "true" - name: OPENSEARCH_HOME value: {{ test_scenario['values']['opensearch_home'] }} + - name: _POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name - name: cluster.initial_cluster_manager_nodes value: opensearch-nodes-cluster-manager-0,opensearch-nodes-cluster-manager-1,opensearch-nodes-cluster-manager-2 - name: discovery.seed_hosts - value: opensearch + # value: opensearch-seed-nodes.$NAMESPACE.svc.cluster.local + - name: http.publish_host + # value: $(_POD_NAME).opensearch-nodes-cluster-manager-headless.$NAMESPACE.svc.cluster.local + - name: network.publish_host + # value: $(_POD_NAME).opensearch-nodes-cluster-manager-headless.$NAMESPACE.svc.cluster.local - name: node.name valueFrom: fieldRef: @@ -123,6 +132,8 @@ spec: fieldPath: metadata.name - name: node.roles value: cluster_manager + - name: transport.publish_host + # value: $(_POD_NAME).opensearch-nodes-cluster-manager-headless.$NAMESPACE.svc.cluster.local imagePullPolicy: IfNotPresent name: opensearch ports: @@ -165,12 +176,14 @@ spec: subPath: log4j2.properties - mountPath: {{ test_scenario['values']['opensearch_home'] }}/data name: data - - mountPath: /stackable/listener + - mountPath: /stackable/listeners/role-group name: listener - mountPath: /stackable/log name: log - mountPath: {{ test_scenario['values']['opensearch_home'] }}/config/tls/internal name: tls-internal + - mountPath: /stackable/listeners/discovery-service + name: discovery-service-listener {% if test_scenario['values']['server-use-tls'] == 'true' %} - mountPath: {{ test_scenario['values']['opensearch_home'] }}/config/tls/server name: tls-server @@ -262,8 +275,10 @@ spec: volumeClaimTemplate: metadata: annotations: + secrets.stackable.tech/backend.autotls.cert.lifetime: 1d secrets.stackable.tech/class: tls - secrets.stackable.tech/scope: listener-volume=listener,pod + secrets.stackable.tech/format: tls-pem + secrets.stackable.tech/scope: service=opensearch-seed-nodes,listener-volume=listener,pod spec: accessModes: - ReadWriteOnce @@ -281,7 +296,7 @@ spec: secrets.stackable.tech/backend.autotls.cert.lifetime: 1d secrets.stackable.tech/class: tls secrets.stackable.tech/format: tls-pem - secrets.stackable.tech/scope: service=opensearch,listener-volume=listener,pod + secrets.stackable.tech/scope: listener-volume=listener,listener-volume=discovery-service-listener,pod spec: accessModes: - ReadWriteOnce @@ -334,6 +349,30 @@ spec: volumeMode: Filesystem status: phase: Pending + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + annotations: + listeners.stackable.tech/listener-name: opensearch + labels: + app.kubernetes.io/component: nodes + app.kubernetes.io/instance: opensearch + app.kubernetes.io/managed-by: opensearch.stackable.tech_opensearchcluster + app.kubernetes.io/name: opensearch + app.kubernetes.io/role-group: cluster-manager + app.kubernetes.io/version: {{ test_scenario['values']['opensearch'].split(',')[0] }} + stackable.tech/vendor: Stackable + name: discovery-service-listener + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "1" + storageClassName: listeners.stackable.tech + volumeMode: Filesystem + status: + phase: Pending status: readyReplicas: 3 replicas: 3 @@ -446,9 +485,17 @@ spec: value: "true" - name: OPENSEARCH_HOME value: {{ test_scenario['values']['opensearch_home'] }} - - name: cluster.initial_cluster_manager_nodes + - name: _POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name - name: discovery.seed_hosts - value: opensearch + # value: opensearch-seed-nodes.$NAMESPACE.svc.cluster.local + - name: http.publish_host + # value: $(_POD_NAME).opensearch-nodes-data-headless.$NAMESPACE.svc.cluster.local + - name: network.publish_host + # value: $(_POD_NAME).opensearch-nodes-data-headless.$NAMESPACE.svc.cluster.local - name: node.name valueFrom: fieldRef: @@ -456,6 +503,8 @@ spec: fieldPath: metadata.name - name: node.roles value: ingest,data,remote_cluster_client + - name: transport.publish_host + # value: $(_POD_NAME).opensearch-nodes-data-headless.$NAMESPACE.svc.cluster.local imagePullPolicy: IfNotPresent name: opensearch ports: @@ -498,7 +547,7 @@ spec: subPath: log4j2.properties - mountPath: {{ test_scenario['values']['opensearch_home'] }}/data name: data - - mountPath: /stackable/listener + - mountPath: /stackable/listeners/role-group name: listener - mountPath: /stackable/log name: log @@ -595,7 +644,9 @@ spec: volumeClaimTemplate: metadata: annotations: + secrets.stackable.tech/backend.autotls.cert.lifetime: 1d secrets.stackable.tech/class: tls + secrets.stackable.tech/format: tls-pem secrets.stackable.tech/scope: listener-volume=listener,pod spec: accessModes: @@ -614,7 +665,7 @@ spec: secrets.stackable.tech/backend.autotls.cert.lifetime: 1d secrets.stackable.tech/class: tls secrets.stackable.tech/format: tls-pem - secrets.stackable.tech/scope: service=opensearch,listener-volume=listener,pod + secrets.stackable.tech/scope: listener-volume=listener,pod spec: accessModes: - ReadWriteOnce @@ -821,7 +872,7 @@ metadata: app.kubernetes.io/name: opensearch app.kubernetes.io/version: {{ test_scenario['values']['opensearch'].split(',')[0] }} stackable.tech/vendor: Stackable - name: opensearch + name: opensearch-seed-nodes ownerReferences: - apiVersion: opensearch.stackable.tech/v1alpha1 controller: true @@ -829,10 +880,6 @@ metadata: name: opensearch spec: ports: - - name: http - port: 9200 - protocol: TCP - targetPort: 9200 - name: transport port: 9300 protocol: TCP @@ -921,7 +968,7 @@ metadata: kind: OpenSearchCluster name: opensearch spec: - className: external-unstable + className: cluster-internal extraPodSelectorLabels: {} ports: - name: http @@ -954,3 +1001,53 @@ spec: port: 9200 protocol: TCP publishNotReadyAddresses: null +--- +apiVersion: listeners.stackable.tech/v1alpha1 +kind: Listener +metadata: + labels: + app.kubernetes.io/component: nodes + app.kubernetes.io/instance: opensearch + app.kubernetes.io/managed-by: opensearch.stackable.tech_opensearchcluster + app.kubernetes.io/name: opensearch + app.kubernetes.io/version: {{ test_scenario['values']['opensearch'].split(',')[0] }} + stackable.tech/vendor: Stackable + name: opensearch + ownerReferences: + - apiVersion: opensearch.stackable.tech/v1alpha1 + controller: true + kind: OpenSearchCluster + name: opensearch +spec: + className: external-unstable + extraPodSelectorLabels: {} + ports: + - name: http + port: 9200 + protocol: TCP + publishNotReadyAddresses: null +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: nodes + app.kubernetes.io/instance: opensearch + app.kubernetes.io/managed-by: opensearch.stackable.tech_opensearchcluster + app.kubernetes.io/name: opensearch + app.kubernetes.io/version: {{ test_scenario['values']['opensearch'].split(',')[0] }} + stackable.tech/vendor: Stackable + name: opensearch + ownerReferences: + - apiVersion: opensearch.stackable.tech/v1alpha1 + controller: true + kind: OpenSearchCluster + name: opensearch +data: + # OPENSEARCH_HOST: ... + # OPENSEARCH_PORT: ... +{% if test_scenario['values']['server-use-tls'] == 'true' %} + OPENSEARCH_PROTOCOL: https +{% else %} + OPENSEARCH_PROTOCOL: http +{% endif %} diff --git a/tests/templates/kuttl/smoke/10-install-opensearch.yaml.j2 b/tests/templates/kuttl/smoke/10-install-opensearch.yaml.j2 index 734bc07..1901ce1 100644 --- a/tests/templates/kuttl/smoke/10-install-opensearch.yaml.j2 +++ b/tests/templates/kuttl/smoke/10-install-opensearch.yaml.j2 @@ -22,19 +22,22 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleConfig: + discoveryServiceListenerClass: external-unstable roleGroups: cluster-manager: config: + discoveryServiceExposed: true nodeRoles: - cluster_manager resources: storage: data: capacity: 100Mi - listenerClass: external-unstable replicas: 3 data: config: + discoveryServiceExposed: false nodeRoles: - ingest - data @@ -43,7 +46,6 @@ spec: storage: data: capacity: 2Gi - listenerClass: cluster-internal replicas: 2 envOverrides: # Only required for the official image diff --git a/tests/templates/kuttl/smoke/20-test-opensearch.yaml.j2 b/tests/templates/kuttl/smoke/20-test-opensearch.yaml.j2 index f63d3b1..aa9a8b7 100644 --- a/tests/templates/kuttl/smoke/20-test-opensearch.yaml.j2 +++ b/tests/templates/kuttl/smoke/20-test-opensearch.yaml.j2 @@ -22,12 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: HTTP_USE_TLS - value: "{{ test_scenario['values']['server-use-tls'] }}" - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch volumeMounts: - name: script mountPath: /stackable/scripts @@ -51,23 +48,22 @@ spec: configMap: name: test-opensearch - name: tls - ephemeral: - volumeClaimTemplate: - metadata: - annotations: - secrets.stackable.tech/class: tls - spec: - storageClassName: secrets.stackable.tech - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "1" + configMap: + name: truststore-pem serviceAccountName: test-service-account securityContext: fsGroup: 1000 restartPolicy: OnFailure --- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: TrustStore +metadata: + name: truststore-pem +spec: + secretClassName: tls + format: tls-pem + targetKind: ConfigMap +--- apiVersion: v1 kind: ConfigMap metadata: @@ -79,11 +75,10 @@ data: import os from opensearchpy import OpenSearch - namespace = os.environ['NAMESPACE'] - http_use_tls = os.environ['HTTP_USE_TLS'] == 'true' + host = os.environ['OPENSEARCH_HOST'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' - host = f'opensearch.{namespace}.svc.cluster.local' - port = 9200 auth = ('admin', 'AJVFsGJBbpT6mChn') # For testing only. Don't store credentials in code. ca_certs_path = '/stackable/tls/ca.crt' diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index d2ffb73..31607bf 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -2,10 +2,14 @@ dimensions: - name: opensearch values: - - 3.1.0 + # - 3.1.0 + # - 3.4.0 # To use a custom image, add a comma and the full name after the product version, e.g.: # - 3.1.0,oci.stackable.tech/sandbox/opensearch:3.1.0-stackable0.0.0-dev # - 3.1.0,localhost:5000/sdp/opensearch:3.1.0-stackable0.0.0-dev + # TODO Revert when images are released + - 3.1.0,oci.stackable.tech/sandbox/sdp26.3/opensearch:3.1.0-stackable0.0.0-dev-amd64 + - 3.4.0,oci.stackable.tech/sandbox/sdp26.3/opensearch:3.4.0-stackable0.0.0-dev-amd64 - name: opensearch_home values: - /stackable/opensearch @@ -13,6 +17,9 @@ dimensions: values: - "true" - "false" + - name: s3-use-tls + values: + - "true" # The release must sometimes be known in podOverrides or Helm values - name: release values: @@ -50,6 +57,7 @@ tests: dimensions: - opensearch - release + - s3-use-tls suites: - name: nightly patch: