Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

oauth2 proxy https by default #16

Merged
merged 4 commits into from
Jul 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
"ms-vscode.makefile-tools",
"golang.go",
"github.vscode-github-actions",
"yzhang.markdown-all-in-one"
"yzhang.markdown-all-in-one",
"pomdtr.excalidraw-editor"
]
}
}
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ Content is based on:
#### :grey_exclamation: Requirements

- A working Kubernetes instance >1.24
- if you are in hurry:
- if you are in a hurry:
- `curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.25.11+k3s1 INSTALL_K3S_EXEC="--tls-san X.X.X.X" sh -s - --disable traefik --disable metric-server`
- you do need `--tls-san X.X.X.X` only in case of a machine with a Floating IP attached
- `k3s kubect get node` to check whenever the cluster is ready
Expand Down Expand Up @@ -147,6 +147,7 @@ Here's the complete list of every customizable environment variable. When specif
| SBATCHPATH | path to your Slurm's sbatch binary. Overwrites SbatchPath. |
| SCANCELPATH | path to your Slurm's scancel binary. Overwrites ScancelPath. |
| VKTOKENFILE | path to a file containing your token fot OAuth2 proxy authentication. Overwrites VKTokenFile. |
| SHARED_FS | set this env to "true" to save configmaps values inside files directly mounted to Singularity containers instead of using ENVS to create them later |
| CUSTOMKUBECONF | path to a service account kubeconfig |
| TSOCKS | true or false, to use tsocks library allowing proxy networking. Working on Slurm sidecar at the moment. Overwrites Tsocks. |
| TSOCKSPATH | path to your tsocks library. Overwrites TsocksPath. |
Expand Down
15 changes: 9 additions & 6 deletions docs/itwinctl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ AUTHORIZED_GROUPS="${AUTHORIZED_GROUPS:-intw}"
AUTHORIZED_AUD="${AUTHORIZED_AUD:-intertw-vk}"
API_HTTP_PORT="${API_HTTP_PORT:-8080}"
API_HTTPS_PORT="${API_HTTPS_PORT:-443}"
export HOSTCERT="${HOSTCERT:-/etc/hostcert.pem}"
export HOSTKEY="${HOSTKEY:-/etc/hostkey.pem}"
export INTERLINKPORT="${INTERLINKPORT:-3000}"
export INTERLINKURL="${INTERLINKURL:-http://0.0.0.0}"
export INTERLINKPORT="${INTERLINKPORT:-3000}"
Expand All @@ -49,7 +51,7 @@ install () {
curl -o $HOME/.config/interlink/InterLinkConfig.yaml https://raw.githubusercontent.com/intertwin-eu/interLink/main/kustomizations/InterLinkConfig.yaml

## Download binaries to $HOME/.local/interlink/bin
curl -L -o interlink.tar.gz https://github.com/intertwin-eu/interLink/releases/download/${VERSION}/interLink_${VERSION}_${OS}_$(uname -m).tar.gz \
curl -L -o interlink.tar.gz https://github.com/intertwin-eu/interLink/releases/download/${VERSION}/interLink_$(uname -s)_$(uname -m).tar.gz \
&& tar -xzvf interlink.tar.gz -C $HOME/.local/interlink/bin/
rm interlink.tar.gz

Expand All @@ -73,7 +75,7 @@ start () {
$HOME/.local/interlink/bin/oauth2-proxy-v7.4.0.linux-$OSARCH/oauth2-proxy \
--client-id DUMMY \
--client-secret DUMMY \
--http-address http://0.0.0.0:$API_HTTP_PORT \
--http-address 0.0.0.0:$API_HTTP_PORT \
--oidc-issuer-url $OIDC_ISSUER \
--pass-authorization-header true \
--provider oidc \
Expand All @@ -86,10 +88,11 @@ start () {
--email-domain=* \
--cookie-secret 2ISpxtx19fm7kJlhbgC4qnkuTlkGrshY82L3nfCSKy4= \
--skip-auth-route="*='*'" \
--skip-jwt-bearer-tokens true &> $HOME/.local/interlink/logs/oauth2-proxy.log &
# --https-address http://0.0.0.0:$API_HTTPS_PORT \
# --tls-cert-file $HOME/.local/interlink/cert.pem \
# --tls-key-file $HOME/.local/interlink/key.pem \
--force-https \
--https-address 0.0.0.0:$API_HTTPS_PORT \
--tls-cert-file ${HOSTCERT} \
--tls-key-file ${HOSTKEY} \
--skip-jwt-bearer-tokens true > $HOME/.local/interlink/logs/oauth2-proxy.log 2>&1 &

echo $! > $HOME/.local/interlink/oauth2-proxy.pid

Expand Down
127 changes: 79 additions & 48 deletions pkg/sidecars/slurm/aux.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ type JidStruct struct {
Pod v1.Pod
}

var prefix string

func prepare_envs(container v1.Container) []string {
env := make([]string, 1)
env = append(env, "--env")
Expand Down Expand Up @@ -67,16 +69,28 @@ func prepare_mounts(container v1.Container, pod *v1.Pod) []string {

if podVolumeSpec != nil && podVolumeSpec.ConfigMap != nil {

configMapsPaths := mountConfigMaps(container, pod)
configMapsPaths, envs := mountConfigMaps(container, pod)
fmt.Println(configMapsPaths)
for _, path := range configMapsPaths {
for i, path := range configMapsPaths {
if strings.Compare(os.Getenv("SHARED_FS"), "true") != 0 {
dirs := strings.Split(path, ":")
splitDirs := strings.Split(dirs[0], "/")
dir := filepath.Join(splitDirs[:len(splitDirs)-1]...)
prefix += "\nmkdir -p " + dir + " && touch " + dirs[0] + " && echo $" + envs[i] + " > " + dirs[0]
}
mount_data += path
}

} else if podVolumeSpec != nil && podVolumeSpec.Secret != nil {
secretsPaths := mountSecrets(container, pod)
secretsPaths, envs := mountSecrets(container, pod)
fmt.Println(secretsPaths)
for _, path := range secretsPaths {
for i, path := range secretsPaths {
if strings.Compare(os.Getenv("SHARED_FS"), "true") != 0 {
dirs := strings.Split(path, ":")
splitDirs := strings.Split(dirs[0], "/")
dir := filepath.Join(splitDirs[:len(splitDirs)-1]...)
prefix += "\nmkdir -p " + dir + " && touch " + dirs[0] + " && echo $" + envs[i] + " > " + dirs[0]
}
mount_data += path
}
} else if podVolumeSpec != nil && podVolumeSpec.EmptyDir != nil {
Expand All @@ -87,7 +101,7 @@ func prepare_mounts(container v1.Container, pod *v1.Pod) []string {
/* path = filepath.Join(commonIL.InterLinkConfigInst.DataRootFolder, pod.Namespace+"-"+string(pod.UID)+"/", mount_var.Name)
path = (".knoc/" + strings.Join(pod_name, "-") + "/" + mount_var.Name + ":" + mount_var.MountPath + ",")
mount_data += path */
log.Println("To be implemented")
log.Println("\n*******************\n*To be implemented*\n*******************")
}
}
}
Expand All @@ -106,6 +120,8 @@ func produce_slurm_script(container v1.Container, metadata metav1.ObjectMeta, co
newpath := filepath.Join(".", ".tmp")
err := os.MkdirAll(newpath, os.ModePerm)
f, err := os.Create(".tmp/" + container.Name + ".sh")
postfix := ""

if err != nil {
log.Fatalln("Cant create slurm_script")
}
Expand All @@ -126,9 +142,6 @@ func produce_slurm_script(container v1.Container, metadata metav1.ObjectMeta, co
sbatch_flags_as_string += "\n#SBATCH " + slurm_flag
}

prefix := ""
postfix := ""

if commonIL.InterLinkConfigInst.Tsocks {
postfix += "\n\nkill -15 $SSH_PID &> log2.txt"

Expand Down Expand Up @@ -219,10 +232,10 @@ func delete_container(container v1.Container) {
exec.Command("rm", "-rf", " .knoc/"+container.Name)
}

func mountConfigMaps(container v1.Container, pod *v1.Pod) []string { //returns an array containing mount paths for configMaps

func mountConfigMaps(container v1.Container, pod *v1.Pod) ([]string, []string) { //returns an array containing mount paths for configMaps
configMaps := make(map[string]string)
var configMapNamePaths []string
var envs []string

if commonIL.InterLinkConfigInst.ExportPodData {
cmd := []string{"-rf " + commonIL.InterLinkConfigInst.DataRootFolder + "/configMaps"}
Expand Down Expand Up @@ -276,45 +289,55 @@ func mountConfigMaps(container v1.Container, pod *v1.Pod) []string { //returns a
path := filepath.Join(podConfigMapDir, key)
path += (":" + mountSpec.MountPath + "/" + key + ",")
configMapNamePaths = append(configMapNamePaths, path)

if strings.Compare(os.Getenv("SHARED_FS"), "true") != 0 {
env := string(container.Name) + "_CFG_" + key
os.Setenv(env, value)
envs = append(envs, env)
}

}
}

if configMaps == nil {
continue
}

cmd = []string{"-p " + podConfigMapDir}
shell = exec2.ExecTask{
Command: "mkdir",
Args: cmd,
Shell: true,
}
if strings.Compare(os.Getenv("SHARED_FS"), "true") == 0 {
cmd = []string{"-p " + podConfigMapDir}
shell = exec2.ExecTask{
Command: "mkdir",
Args: cmd,
Shell: true,
}

execReturn, err := shell.Execute()
if err != nil {
log.Panicln(err)
}
execReturn, _ = shell.Execute()
if strings.Compare(execReturn.Stdout, "") != 0 {
log.Panicln(err)
}

log.Printf("%v", "create dir for configmaps "+podConfigMapDir)
log.Printf("%v", "create dir for configmaps "+podConfigMapDir)

for k, v := range configMaps {
// TODO: Ensure that these files are deleted in failure cases
fullPath := filepath.Join(podConfigMapDir, k)
os.WriteFile(fullPath, []byte(v), mode)
if err != nil {
fmt.Printf("Could not write configmap file %s", fullPath)
for k, v := range configMaps {
// TODO: Ensure that these files are deleted in failure cases
fullPath := filepath.Join(podConfigMapDir, k)
os.WriteFile(fullPath, []byte(v), mode)
if err != nil {
fmt.Printf("Could not write configmap file %s", fullPath)
}
}
}
}
}
}
}
return configMapNamePaths
return configMapNamePaths, envs
}

func mountSecrets(container v1.Container, pod *v1.Pod) []string { //returns an array containing mount paths for secrets
func mountSecrets(container v1.Container, pod *v1.Pod) ([]string, []string) { //returns an array containing mount paths for secrets
secrets := make(map[string][]byte)
var secretNamePaths []string
var envs []string

if commonIL.InterLinkConfigInst.ExportPodData {
cmd := []string{"-rf " + commonIL.InterLinkConfigInst.DataRootFolder + "/secrets"}
Expand Down Expand Up @@ -369,39 +392,47 @@ func mountSecrets(container v1.Container, pod *v1.Pod) []string { //returns an a
path := filepath.Join(podSecretDir, key)
path += (":" + mountSpec.MountPath + "/" + key + ",")
secretNamePaths = append(secretNamePaths, path)

if strings.Compare(os.Getenv("SHARED_FS"), "true") != 0 {
env := string(container.Name) + "_SECRET_" + key
os.Setenv(env, string(value))
envs = append(envs, env)
}
}
}

if secrets == nil {
continue
}

cmd = []string{"-p " + podSecretDir}
shell = exec2.ExecTask{
Command: "mkdir",
Args: cmd,
Shell: true,
}
if strings.Compare(os.Getenv("SHARED_FS"), "true") == 0 {
cmd = []string{"-p " + podSecretDir}
shell = exec2.ExecTask{
Command: "mkdir",
Args: cmd,
Shell: true,
}

execReturn, err := shell.Execute()
if err != nil {
log.Print(err)
}
log.Printf("%v", "create dir for secrets "+podSecretDir)

for k, v := range secrets {
// TODO: Ensure that these files are deleted in failure cases
fullPath := filepath.Join(podSecretDir, k)
os.WriteFile(fullPath, v, mode)
if err != nil {
log.Printf("Could not write secrets file %s", fullPath)
execReturn, _ := shell.Execute()
if strings.Compare(execReturn.Stdout, "") != 0 {
log.Print(err)
}
log.Printf("%v", "create dir for secrets "+podSecretDir)

for k, v := range secrets {
// TODO: Ensure that these files are deleted in failure cases
fullPath := filepath.Join(podSecretDir, k)
os.WriteFile(fullPath, v, mode)
if err != nil {
log.Printf("Could not write secrets file %s", fullPath)
}
}
}
}
}
}
}
return secretNamePaths
return secretNamePaths, envs
}

func mountEmptyDir(container v1.Container, pod *v1.Pod) string {
Expand Down