Skip to content

Commit 38c184a

Browse files
committed
Don't run init container as root and avoid chmod and chowning (#282)
# Description This is a first test to build the operator image https://ci.stackable.tech/view/02%20Operator%20Tests%20(custom)/job/hdfs-operator-it-custom/30/ Co-authored-by: maltesander <malte.sander.it@gmail.com> Co-authored-by: Malte Sander <malte.sander.it@gmail.com>
1 parent 7842348 commit 38c184a

File tree

2 files changed

+21
-55
lines changed

2 files changed

+21
-55
lines changed

rust/operator/src/hdfs_controller.rs

Lines changed: 21 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ fn rolegroup_statefulset(
383383
}
384384
HdfsRole::JournalNode => {
385385
replicas = hdfs.rolegroup_journalnode_replicas(rolegroup_ref)?;
386-
init_containers = journalnode_init_containers(hadoop_container);
386+
init_containers = None;
387387
containers = journalnode_containers(rolegroup_ref, hadoop_container, &resources)?;
388388
}
389389
}
@@ -408,8 +408,8 @@ fn rolegroup_statefulset(
408408
service_account: Some(rbac_sa.to_string()),
409409
security_context: Some(
410410
PodSecurityContextBuilder::new()
411-
.run_as_user(rbac::HDFS_UID)
412-
.run_as_group(0)
411+
.run_as_user(1000)
412+
.run_as_group(1000)
413413
.fs_group(1000) // Needed for secret-operator
414414
.build(),
415415
),
@@ -622,15 +622,13 @@ fn datanode_init_containers(
622622
namenode_podrefs: &[HdfsPodRef],
623623
hadoop_container: &Container,
624624
) -> Option<Vec<Container>> {
625-
Some(vec![
626-
chown_init_container(&HdfsNodeDataDirectory::default().datanode, hadoop_container),
627-
Container {
628-
name: "wait-for-namenodes".to_string(),
629-
args: Some(vec![
630-
"sh".to_string(),
631-
"-c".to_string(),
632-
format!(
633-
"
625+
Some(vec![Container {
626+
name: "wait-for-namenodes".to_string(),
627+
args: Some(vec![
628+
"sh".to_string(),
629+
"-c".to_string(),
630+
format!(
631+
"
634632
echo \"Waiting for namenodes to get ready:\"
635633
n=0
636634
while [ ${{n}} -lt 12 ];
@@ -658,32 +656,23 @@ fn datanode_init_containers(
658656
sleep 5
659657
done
660658
",
661-
hadoop_home = HADOOP_HOME,
662-
pod_names = namenode_podrefs
663-
.iter()
664-
.map(|pod_ref| pod_ref.pod_name.as_ref())
665-
.collect::<Vec<&str>>()
666-
.join(" ")
667-
),
668-
]),
669-
..hadoop_container.clone()
670-
},
671-
])
672-
}
673-
674-
fn journalnode_init_containers(hadoop_container: &Container) -> Option<Vec<Container>> {
675-
Some(vec![chown_init_container(
676-
&HdfsNodeDataDirectory::default().journalnode,
677-
hadoop_container,
678-
)])
659+
hadoop_home = HADOOP_HOME,
660+
pod_names = namenode_podrefs
661+
.iter()
662+
.map(|pod_ref| pod_ref.pod_name.as_ref())
663+
.collect::<Vec<&str>>()
664+
.join(" ")
665+
),
666+
]),
667+
..hadoop_container.clone()
668+
}])
679669
}
680670

681671
fn namenode_init_containers(
682672
namenode_podrefs: &[HdfsPodRef],
683673
hadoop_container: &Container,
684674
) -> Option<Vec<Container>> {
685675
Some(vec![
686-
chown_init_container(&HdfsNodeDataDirectory::default().namenode, hadoop_container),
687676
Container {
688677
name: "format-namenode".to_string(),
689678
args: Some(vec![
@@ -733,6 +722,7 @@ fn namenode_init_containers(
733722
]),
734723
security_context: Some(SecurityContext {
735724
run_as_user: Some(1000),
725+
run_as_group: Some(1000),
736726
..SecurityContext::default()
737727
}),
738728
..hadoop_container.clone()
@@ -749,27 +739,6 @@ fn namenode_init_containers(
749739
])
750740
}
751741

752-
/// Creates a container that chowns and chmods the provided `node_dir`.
753-
fn chown_init_container(node_dir: &str, hadoop_container: &Container) -> Container {
754-
Container {
755-
name: "chown-data".to_string(),
756-
args: Some(vec![
757-
"sh".to_string(),
758-
"-c".to_string(),
759-
format!(
760-
"mkdir -p {node_dir} && chown -R stackable:stackable {data_dir} && chmod -R a=,u=rwX {data_dir}",
761-
node_dir = node_dir,
762-
data_dir = ROOT_DATA_DIR
763-
),
764-
]),
765-
security_context: Some(SecurityContext {
766-
run_as_user: Some(0),
767-
..SecurityContext::default()
768-
}),
769-
..hadoop_container.clone()
770-
}
771-
}
772-
773742
/// Creates a probe for [`stackable_operator::k8s_openapi::api::core::v1::TCPSocketAction`]
774743
/// for liveness or readiness probes
775744
fn tcp_socket_action_probe(

rust/operator/src/rbac.rs

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,6 @@ use stackable_operator::k8s_openapi::api::core::v1::ServiceAccount;
44
use stackable_operator::k8s_openapi::api::rbac::v1::{RoleBinding, RoleRef, Subject};
55
use stackable_operator::kube::{Resource, ResourceExt};
66

7-
/// Used as runAsUser in the pod security context. This is specified in the Hadoop image file
8-
pub const HDFS_UID: i64 = 1000;
9-
107
/// Build RBAC objects for the product workloads.
118
/// The `rbac_prefix` is meant to be the product name, for example: zookeeper, airflow, etc.
129
/// and it is a assumed that a ClusterRole named `hdfs-clusterrole` exists.

0 commit comments

Comments
 (0)