Skip to content

Commit

Permalink
Use a patched version of op-rs to hopefully fix SUP-148 (#616)
Browse files Browse the repository at this point in the history
* Use a patched version of op-rs to hopefully fix SUP-148

* Update revision of op-rs pr

* Make necessary changes to reflect now private fns in op-rs due.

* Added changelog entry.

Removed patched op-rs version and referenced release 0.82

* rustfmt

* Removed use of unwrap and expect in favor of proper error logging.

* Added legacy serviceAccount name to clusterrolebinding.

* Added removal of duplicate roleBindings in case a cluster is called hdfs.

* Addressed review comments.

* markdownlint
  • Loading branch information
soenkeliebau authored Nov 25, 2024
1 parent 2847681 commit 47ee88a
Show file tree
Hide file tree
Showing 7 changed files with 77 additions and 29 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,12 @@ All notable changes to this project will be documented in this file.
### Fixed

- An invalid `HdfsCluster` doesn't cause the operator to stop functioning ([#594]).
- BREAKING: Use distinct ServiceAccounts for the Stacklets, so that multiple Stacklets can be deployed in one namespace. Existing Stacklets will use the newly created ServiceAccounts after restart ([#616]).

[#574]: https://github.com/stackabletech/hdfs-operator/pull/574
[#591]: https://github.com/stackabletech/hdfs-operator/pull/591
[#594]: https://github.com/stackabletech/hdfs-operator/pull/594
[#616]: https://github.com/stackabletech/hdfs-operator/pull/616

## [24.7.0] - 2024-07-24

Expand Down
8 changes: 4 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 7 additions & 7 deletions Cargo.nix

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.9"
snafu = "0.8"
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.80.0" }
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.82.0" }
product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.7.0" }
strum = { version = "0.26", features = ["derive"] }
tokio = { version = "1.40", features = ["full"] }
tracing = "0.1"
tracing-futures = { version = "0.2", features = ["futures-03"] }

[patch."https://github.com/stackabletech/operator-rs.git"]
#[patch."https://github.com/stackabletech/operator-rs.git"]
#stackable-operator = { path = "../operator-rs/crates/stackable-operator" }
#stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" }
6 changes: 3 additions & 3 deletions crate-hashes.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@ use stackable_hdfs_crd::{
constants::{APP_NAME, FIELD_MANAGER_SCOPE},
HdfsCluster,
};
use stackable_operator::kube::ResourceExt;
use stackable_operator::{
commons::rbac::service_account_name,
commons::rbac::build_rbac_resources,
k8s_openapi::api::rbac::v1::{ClusterRoleBinding, Subject},
kube::{
api::{Patch, PatchParams},
Expand All @@ -15,6 +16,7 @@ use stackable_operator::{
},
Api, Client,
},
kvp::Labels,
};
use tracing::{error, info};

Expand All @@ -41,18 +43,62 @@ pub async fn reconcile(
)
}
}

// Build a list of SubjectRef objects for all deployed HdfsClusters.
// To do this we only need the metadata for that, as we only really
// need name and namespace of the objects
let subjects: Vec<Subject> = store
.state()
.into_iter()
.map(|object| object.metadata.clone())
.map(|meta| Subject {
kind: "ServiceAccount".to_string(),
name: service_account_name(APP_NAME),
namespace: meta.namespace.clone(),
..Subject::default()
.filter_map(|object| {
// The call to 'build_rbac_resources' can fail, so we
// use filter_map here, log an error for any failures and keep
// going with all the non-broken elements
// Usually we'd rather opt for failing completely here, but in this specific instance
// this could mean that one broken cluster somewhere could impact other working clusters
// within the namespace, so we opted for doing everything we can here, instead of failing
// completely.
match build_rbac_resources(&*object, APP_NAME, Labels::default()) {
Ok((service_account, _role_binding)) => {
Some((object.metadata.clone(), service_account.name_any()))
}
Err(e) => {
error!(
?object,
error = &e as &dyn std::error::Error,
"Failed to build serviceAccount name for hdfs cluster"
);
None
}
}
})
.flat_map(|(meta, sa_name)| {
let mut result = vec![
Subject {
kind: "ServiceAccount".to_string(),
name: sa_name,
namespace: meta.namespace.clone(),
..Subject::default()
},
// This extra Serviceaccount is being written for legacy/compatibility purposes
// to ensure that running clusters don't lose access to anything during an upgrade
// of the Stackable operators, this code can be removed in later releases
// The value is hardcoded here, as we have removed access to the private fns that
// would have built it, since this is a known target though, and will be removed soon
// this should not be an issue.
Subject {
kind: "ServiceAccount".to_string(),
name: "hdfs-serviceaccount".to_string(),
namespace: meta.namespace.clone(),
..Subject::default()
},
];
// If a cluster is called hdfs this would result in the same subject
// being written twicex.
// Since we know this vec only contains two elements we can use dedup for
// simply removing this duplicate.
result.dedup();
result
})
.collect();

Expand Down
12 changes: 6 additions & 6 deletions rust/operator-binary/src/hdfs_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use product_config::{
ProductConfigManager,
};
use snafu::{OptionExt, ResultExt, Snafu};
use stackable_operator::k8s_openapi::api::core::v1::ServiceAccount;
use stackable_operator::{
builder::{
configmap::ConfigMapBuilder,
Expand All @@ -17,10 +18,7 @@ use stackable_operator::{
},
client::Client,
cluster_resources::{ClusterResourceApplyStrategy, ClusterResources},
commons::{
product_image_selection::ResolvedProductImage,
rbac::{build_rbac_resources, service_account_name},
},
commons::{product_image_selection::ResolvedProductImage, rbac::build_rbac_resources},
iter::reverse_if,
k8s_openapi::{
api::{
Expand Down Expand Up @@ -326,7 +324,7 @@ pub async fn reconcile_hdfs(
.context(BuildRbacResourcesSnafu)?;

cluster_resources
.add(client, rbac_sa)
.add(client, rbac_sa.clone())
.await
.context(ApplyServiceAccountSnafu)?;
cluster_resources
Expand Down Expand Up @@ -434,6 +432,7 @@ pub async fn reconcile_hdfs(
env_overrides,
&merged_config,
&namenode_podrefs,
&rbac_sa,
)?;

let rg_service_name = rg_service.name_any();
Expand Down Expand Up @@ -818,6 +817,7 @@ fn rolegroup_statefulset(
env_overrides: Option<&BTreeMap<String, String>>,
merged_config: &AnyNodeConfig,
namenode_podrefs: &[HdfsPodRef],
service_account: &ServiceAccount,
) -> HdfsOperatorResult<StatefulSet> {
tracing::info!("Setting up StatefulSet for {:?}", rolegroup_ref);

Expand All @@ -837,7 +837,7 @@ fn rolegroup_statefulset(
pb.metadata(pb_metadata)
.image_pull_secrets_from_product_image(resolved_product_image)
.affinity(&merged_config.affinity)
.service_account_name(service_account_name(APP_NAME))
.service_account_name(service_account.name_any())
.security_context(
PodSecurityContextBuilder::new()
.run_as_user(HDFS_UID)
Expand Down

0 comments on commit 47ee88a

Please sign in to comment.