Skip to content

Commit

Permalink
Merge pull request #1 from kube-champ/feature/git-auth
Browse files Browse the repository at this point in the history
support module source over git ssh
  • Loading branch information
IbraheemAlSaady authored Jan 17, 2022
2 parents 4834328 + fdfcfa2 commit e190921
Show file tree
Hide file tree
Showing 15 changed files with 1,819 additions and 81 deletions.
3 changes: 2 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
export DOCKER_REGISTRY=
export TERRAFORM_RUNNER_IMAGE=
export TERRAFORM_RUNNER_IMAGE_TAG=
export TERRAFORM_RUNNER_IMAGE_TAG=
export KNOWN_HOSTS_CONFIGMAP_NAME=
6 changes: 4 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ testbin/*
*.swo
*~

.DS_Store
**/.DS_Store
*kubeconfig
*.env
*.env

**/*k8s-secret.yaml
40 changes: 40 additions & 0 deletions api/v1alpha1/k8s_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,20 @@ func getVolumeSpec(name string, source corev1.VolumeSource) corev1.Volume {
}
}

// returns a volume spec from configMap
func getVolumeSpecFromConfigMap(volumeName string, configMapName string) corev1.Volume {
return corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMapName,
},
},
},
}
}

// returns and emptyDir volume spec
func getEmptyDirVolume(name string) corev1.Volume {
return corev1.Volume{
Expand All @@ -31,6 +45,16 @@ func getVolumeMountSpec(volumeName string, mountPath string, readOnly bool) core
}
}

// returns a volume mount spec with subpath option
func getVolumeMountSpecWithSubPath(volumeName string, mountPath string, subPath string, readOnly bool) corev1.VolumeMount {
return corev1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
ReadOnly: readOnly,
SubPath: subPath,
}
}

func getEnvVariable(name string, value string) corev1.EnvVar {
return corev1.EnvVar{
Name: name,
Expand All @@ -48,3 +72,19 @@ func getEnvVariableFromFieldSelector(name string, path string) corev1.EnvVar {
},
}
}

func getInitContainer(name string, image string, mounts []corev1.VolumeMount, cmd string) corev1.Container {
commands := []string{
"/bin/sh",
"-c",
}

commands = append(commands, cmd)

return corev1.Container{
Name: name,
Image: image,
VolumeMounts: mounts,
Command: commands,
}
}
177 changes: 103 additions & 74 deletions api/v1alpha1/k8s_jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,36 +13,22 @@ import (
)

const (
tfVarsMountPath string = "/tmp/tfvars"
moduleWorkingDir string = "/tmp/tfmodule"
conifgMapModuleMountPath string = "/terraform/modules"
emptyDirVolumeName string = "tfmodule"
tfVarsMountPath string = "/tmp/tfvars"
moduleWorkingDirMountPath string = "/tmp/tfmodule"
conifgMapModuleMountPath string = "/terraform/modules"
gitSSHKeyMountPath string = "/root/.ssh"

knownHostsVolumeName string = "known-hosts"
emptyDirVolumeName string = "tfmodule"
gitSSHKeyVolumeName string = "git-ssh"
)

func getTerraformDockerImage() string {
func getTerraformRunnerDockerImage() string {
return fmt.Sprintf("%s/%s:%s", utils.Env.DockerRepository, utils.Env.TerraformRunnerImage, utils.Env.TerraformRunnerImageTag)
}

// return the volumes to be mounted
func getJobVolumes(varFiles []VariableFile) []corev1.Volume {
volumes := []corev1.Volume{}

for _, file := range varFiles {
volumes = append(volumes, getVolumeSpec(file.Key, *file.ValueFrom))
}

return volumes
}

// return the volumes mounts
func getJobVolumeMounts(varFiles []VariableFile) []corev1.VolumeMount {
mounts := []corev1.VolumeMount{}

for _, file := range varFiles {
mounts = append(mounts, getVolumeMountSpec(file.Key, tfVarsMountPath, true))
}

return mounts
func getBusyboxDockerImage() string {
return fmt.Sprintf("%s/%s", utils.Env.DockerRepository, "busybox")
}

// returns a string that could or could not start with a TF_VAR_ prefix for the container
Expand All @@ -56,11 +42,31 @@ func getEnvVarKey(v Variable) string {
return fmt.Sprintf("%s%s", prefix, v.Key)
}

// returns a list of environment variables to injected to the job runner
// these environment variables are specific to the terraform runner container
func (t *Terraform) getRunnerSpecificEnvVars() []corev1.EnvVar {
envVars := []corev1.EnvVar{}

envVars = append(envVars, getEnvVariable("TERRAFORM_VERSION", t.Spec.TerraformVersion))
envVars = append(envVars, getEnvVariable("TERRAFORM_WORKING_DIR", moduleWorkingDirMountPath))
envVars = append(envVars, getEnvVariable("TERRAFORM_VAR_FILES_PATH", tfVarsMountPath))
envVars = append(envVars, getEnvVariable("OUTPUT_SECRET_NAME", getUniqueResourceName(t.Name, t.Status.RunId)))
envVars = append(envVars, getEnvVariable("TERRAFORM_DESTROY", strconv.FormatBool(t.Spec.Destroy)))

envVars = append(envVars, getEnvVariableFromFieldSelector("POD_NAMESPACE", "metadata.namespace"))

if t.Spec.Workspace != "" {
envVars = append(envVars, getEnvVariable("TERRAFORM_WORKSPACE", t.Spec.Workspace))
}

return envVars
}

// returns Kubernetes Pod environment variables to be passed to the run job
func getEnvVariables(variables []Variable) []corev1.EnvVar {
func (t *Terraform) getEnvVariables() []corev1.EnvVar {
vars := []corev1.EnvVar{}

for _, v := range variables {
for _, v := range t.Spec.Variables {
if v.ValueFrom != nil {
vars = append(vars, corev1.EnvVar{
Name: getEnvVarKey(v),
Expand All @@ -76,82 +82,105 @@ func getEnvVariables(variables []Variable) []corev1.EnvVar {
}
}

vars = append(vars, t.getRunnerSpecificEnvVars()...)

return vars
}

// returns a list of environment variables to injected to the job runner
// these environment variables are specific to the terraform runner container
func (t *Terraform) getRunnerSpecificEnvVars(secret *corev1.Secret) []corev1.EnvVar {
envVars := []corev1.EnvVar{}
func (t *Terraform) getRunnerSpecificVolumes() []corev1.Volume {
volumes := []corev1.Volume{}

envVars = append(envVars, getEnvVariable("TERRAFORM_VERSION", t.Spec.TerraformVersion))
envVars = append(envVars, getEnvVariable("TERRAFORM_WORKING_DIR", moduleWorkingDir))
envVars = append(envVars, getEnvVariable("TERRAFORM_VAR_FILES_PATH", tfVarsMountPath))
envVars = append(envVars, getEnvVariable("OUTPUT_SECRET_NAME", secret.ObjectMeta.Name))
envVars = append(envVars, getEnvVariable("TERRAFORM_DESTROY", strconv.FormatBool(t.Spec.Destroy)))
name := getUniqueResourceName(t.Name, t.Status.RunId)

envVars = append(envVars, getEnvVariableFromFieldSelector("POD_NAMESPACE", "metadata.namespace"))
volumes = append(volumes, getEmptyDirVolume(emptyDirVolumeName))
volumes = append(volumes, getVolumeSpecFromConfigMap(name, name))

if t.Spec.Workspace != "" {
envVars = append(envVars, getEnvVariable("TERRAFORM_WORKSPACE", t.Spec.Workspace))
if t.Spec.GitSSHKey != nil && t.Spec.GitSSHKey.ValueFrom != nil {
volumes = append(volumes, getVolumeSpec(gitSSHKeyVolumeName, *t.Spec.GitSSHKey.ValueFrom))
volumes = append(volumes, getVolumeSpecFromConfigMap(knownHostsVolumeName, utils.Env.KnownHostsConfigMapName))
}

return envVars
return volumes
}

func (t *Terraform) getRunnerSpecificVolumes(configMap *corev1.ConfigMap) []corev1.Volume {
// return the volumes to be mounted
func (t *Terraform) getJobVolumes() []corev1.Volume {
volumes := []corev1.Volume{}

volumes = append(volumes, getEmptyDirVolume(emptyDirVolumeName))
for _, file := range t.Spec.VariableFiles {
volumes = append(volumes, getVolumeSpec(file.Key, *file.ValueFrom))
}

return append(volumes, corev1.Volume{
Name: configMap.ObjectMeta.Name,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMap.ObjectMeta.Name,
},
},
},
})
volumes = append(volumes, t.getRunnerSpecificVolumes()...)

return volumes
}

func (t *Terraform) getRunnerSpecificVolumeMounts() []corev1.VolumeMount {
mounts := []corev1.VolumeMount{}

mounts = append(mounts, getVolumeMountSpec(emptyDirVolumeName, moduleWorkingDirMountPath, false))
mounts = append(mounts, getVolumeMountSpec(getUniqueResourceName(t.Name, t.Status.RunId), conifgMapModuleMountPath, false))

if t.Spec.GitSSHKey != nil && t.Spec.GitSSHKey.ValueFrom != nil {
sshKeyFileName := "id_rsa"
sshKnownHostsFileName := "known_hosts"

sshKeyMountPath := fmt.Sprintf("%s/%s", gitSSHKeyMountPath, sshKeyFileName)
sshKnownHostsMountPath := fmt.Sprintf("%s/%s", gitSSHKeyMountPath, sshKnownHostsFileName)

mounts = append(mounts, getVolumeMountSpecWithSubPath(gitSSHKeyVolumeName, sshKeyMountPath, sshKeyFileName, false))
mounts = append(mounts, getVolumeMountSpecWithSubPath(knownHostsVolumeName, sshKnownHostsMountPath, sshKnownHostsFileName, false))
}

return mounts
}

func (t *Terraform) getRunnerSpecificVolumeMounts(configMap *corev1.ConfigMap) []corev1.VolumeMount {
// return the volumes mounts
func (t *Terraform) getJobVolumeMounts() []corev1.VolumeMount {
mounts := []corev1.VolumeMount{}

mounts = append(mounts, getVolumeMountSpec(emptyDirVolumeName, moduleWorkingDir, false))
mounts = append(mounts, getVolumeMountSpec(configMap.Name, conifgMapModuleMountPath, false))
for _, file := range t.Spec.VariableFiles {
mounts = append(mounts, getVolumeMountSpec(file.Key, tfVarsMountPath, true))
}

mounts = append(mounts, t.getRunnerSpecificVolumeMounts()...)

return mounts
}

func getInitContainersSpec(configMap *corev1.ConfigMap, t *Terraform) []corev1.Container {
// returnrs the initContainers definition for the run job
func getInitContainersSpec(t *Terraform) []corev1.Container {
containers := []corev1.Container{}

cpModule := fmt.Sprintf("cp %s/main.tf %s/main.tf", conifgMapModuleMountPath, moduleWorkingDirMountPath)

commands := []string{
"/bin/sh",
"-c",
}

args := []string{
cpModule,
}

containers = append(containers, corev1.Container{
Name: "busybox",
Image: "busybox",
VolumeMounts: t.getRunnerSpecificVolumeMounts(configMap),
Command: []string{
"/bin/sh",
"-c",
fmt.Sprintf("cp -a %s/. %s/", conifgMapModuleMountPath, moduleWorkingDir),
},
Image: getBusyboxDockerImage(),
VolumeMounts: t.getRunnerSpecificVolumeMounts(),
Command: commands,
Args: args,
})

return containers
}

// returns a Kubernetes job struct to run the terraform
func getJobSpecForRun(
t *Terraform,
configMap *corev1.ConfigMap,
secret *corev1.Secret,
owner metav1.OwnerReference) *batchv1.Job {
func getJobSpecForRun(t *Terraform, owner metav1.OwnerReference) *batchv1.Job {

envVars := append(getEnvVariables(t.Spec.Variables), t.getRunnerSpecificEnvVars(secret)...)
volumes := append(getJobVolumes(t.Spec.VariableFiles), t.getRunnerSpecificVolumes(configMap)...)
mounts := append(getJobVolumeMounts(t.Spec.VariableFiles), t.getRunnerSpecificVolumeMounts(configMap)...)
envVars := t.getEnvVariables()
volumes := t.getJobVolumes()
mounts := t.getJobVolumeMounts()

job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -169,11 +198,11 @@ func getJobSpecForRun(
},
Spec: corev1.PodSpec{
ServiceAccountName: "terraform-runner",
InitContainers: getInitContainersSpec(configMap, t),
InitContainers: getInitContainersSpec(t),
Containers: []corev1.Container{
{
Name: "terraform",
Image: getTerraformDockerImage(),
Image: getTerraformRunnerDockerImage(),
VolumeMounts: mounts,
Env: envVars,
ImagePullPolicy: corev1.PullIfNotPresent,
Expand Down Expand Up @@ -211,7 +240,7 @@ func createJobForRun(run *Terraform, configMap *corev1.ConfigMap, secret *corev1

ownerRef := run.GetOwnerReference()

job := getJobSpecForRun(run, configMap, secret, ownerRef)
job := getJobSpecForRun(run, ownerRef)

if _, err := jobs.Create(context.TODO(), job, metav1.CreateOptions{}); err != nil {
return nil, err
Expand Down
Loading

0 comments on commit e190921

Please sign in to comment.