From c449a5ddadd470aa7f8e6621436b07aac08c7f05 Mon Sep 17 00:00:00 2001 From: SouadSEBAA Date: Wed, 14 Dec 2022 10:00:14 +0100 Subject: [PATCH] initial commit --- .gitignore | 4 + .scripts/auto-all/challenge.py | 549 ++++++++++++++++++ .scripts/auto-all/conf.py | 101 ++++ .scripts/auto-all/script.py | 199 +++++++ .scripts/auto-all/templates/haproxy.cfg.j2 | 169 ++++++ .scripts/auto-all/templates/http-hosts.map.j2 | 6 + .scripts/auto-all/templates/sni.map.j2 | 2 + .scripts/commit.sh | 7 + .scripts/ctfd/all-challs.sh | 13 + .scripts/ctfd/cmd-all.sh | 9 + .scripts/ctfd/hide-all.sh | 9 + .scripts/ctfd/show-all.sh | 9 + .scripts/ctfd/state.sh | 34 ++ .scripts/ctfd/syncpush.sh | 18 + .scripts/deploy-chal/all.sh | 10 + .scripts/deploy-chal/config.py | 11 + .scripts/deploy-chal/gen-deploy.sh | 11 + .scripts/deploy-chal/generate.sh | 9 + .scripts/deploy-chal/script.py | 64 ++ .../deploy-chal/templates/deployment.yml.j2 | 45 ++ .scripts/deploy-chal/templates/service.yml.j2 | 18 + .scripts/gcp-firewall/allow.sh | 10 + .scripts/gcp-firewall/block-all.sh | 5 + .scripts/gcp-firewall/block-ctfd.sh | 5 + .../block-docker-challenges-all.sh | 5 + .scripts/gcp-firewall/block-http.sh | 5 + .scripts/gcp-firewall/create-all.sh | 20 + .scripts/gcp-firewall/create-deny-all.sh | 11 + .scripts/gcp-firewall/deny.sh | 10 + .scripts/gcp-firewall/unblock-all.sh | 5 + .scripts/gcp-firewall/unblock-ctfd.sh | 5 + .../unblock-docker-challenges-all.sh | 5 + .scripts/gcp-firewall/unblock-http.sh | 5 + .scripts/k8s/all-down.sh | 11 + .scripts/k8s/all-restart.sh | 12 + .scripts/k8s/all-up.sh | 11 + .scripts/k8s/reapply.sh | 4 + .scripts/launch.sh | 4 + .scripts/unique-flag.sh | 13 + CHALS.md | 13 + README.md | 2 + config/chals.json | 1 + config/ctfcli_chals.json | 2 + config/ports.yml | 8 + helm-charts/ctf-challenge-chart-0.1.0.tgz | Bin 0 -> 2348 bytes helm-charts/ctf-challenge-chart/.helmignore | 23 + helm-charts/ctf-challenge-chart/Chart.yaml | 24 + .../templates/_helpers.tpl | 62 ++ .../templates/deployment.yaml | 110 ++++ .../templates/service.yaml | 33 ++ helm-charts/ctf-challenge-chart/values.yaml | 47 ++ leetify.py | 28 + 52 files changed, 1796 insertions(+) create mode 100644 .gitignore create mode 100644 .scripts/auto-all/challenge.py create mode 100644 .scripts/auto-all/conf.py create mode 100755 .scripts/auto-all/script.py create mode 100644 .scripts/auto-all/templates/haproxy.cfg.j2 create mode 100644 .scripts/auto-all/templates/http-hosts.map.j2 create mode 100644 .scripts/auto-all/templates/sni.map.j2 create mode 100755 .scripts/commit.sh create mode 100755 .scripts/ctfd/all-challs.sh create mode 100755 .scripts/ctfd/cmd-all.sh create mode 100755 .scripts/ctfd/hide-all.sh create mode 100755 .scripts/ctfd/show-all.sh create mode 100755 .scripts/ctfd/state.sh create mode 100755 .scripts/ctfd/syncpush.sh create mode 100755 .scripts/deploy-chal/all.sh create mode 100644 .scripts/deploy-chal/config.py create mode 100755 .scripts/deploy-chal/gen-deploy.sh create mode 100755 .scripts/deploy-chal/generate.sh create mode 100755 .scripts/deploy-chal/script.py create mode 100644 .scripts/deploy-chal/templates/deployment.yml.j2 create mode 100644 .scripts/deploy-chal/templates/service.yml.j2 create mode 100755 .scripts/gcp-firewall/allow.sh create mode 100755 .scripts/gcp-firewall/block-all.sh create mode 100755 .scripts/gcp-firewall/block-ctfd.sh create mode 100755 .scripts/gcp-firewall/block-docker-challenges-all.sh create mode 100755 .scripts/gcp-firewall/block-http.sh create mode 100755 .scripts/gcp-firewall/create-all.sh create mode 100755 .scripts/gcp-firewall/create-deny-all.sh create mode 100755 .scripts/gcp-firewall/deny.sh create mode 100755 .scripts/gcp-firewall/unblock-all.sh create mode 100755 .scripts/gcp-firewall/unblock-ctfd.sh create mode 100755 .scripts/gcp-firewall/unblock-docker-challenges-all.sh create mode 100755 .scripts/gcp-firewall/unblock-http.sh create mode 100755 .scripts/k8s/all-down.sh create mode 100755 .scripts/k8s/all-restart.sh create mode 100755 .scripts/k8s/all-up.sh create mode 100755 .scripts/k8s/reapply.sh create mode 100755 .scripts/launch.sh create mode 100755 .scripts/unique-flag.sh create mode 100644 CHALS.md create mode 100644 README.md create mode 100644 config/chals.json create mode 100644 config/ctfcli_chals.json create mode 100644 config/ports.yml create mode 100644 helm-charts/ctf-challenge-chart-0.1.0.tgz create mode 100644 helm-charts/ctf-challenge-chart/.helmignore create mode 100644 helm-charts/ctf-challenge-chart/Chart.yaml create mode 100644 helm-charts/ctf-challenge-chart/templates/_helpers.tpl create mode 100644 helm-charts/ctf-challenge-chart/templates/deployment.yaml create mode 100644 helm-charts/ctf-challenge-chart/templates/service.yaml create mode 100644 helm-charts/ctf-challenge-chart/values.yaml create mode 100755 leetify.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ab13d6c --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.ctf/ +__pycache__ +node_modules +.vscode \ No newline at end of file diff --git a/.scripts/auto-all/challenge.py b/.scripts/auto-all/challenge.py new file mode 100644 index 0000000..8e16539 --- /dev/null +++ b/.scripts/auto-all/challenge.py @@ -0,0 +1,549 @@ +from genericpath import isfile +import yaml +import json +from conf import * +import os +from sys import stderr +import subprocess + +# Track if we're logged in to Helm registry or not +LOGGED_IN = False if USE_REMOTE_REPO else True + +class Challenge: + def __init__(self, name=None, type=None, category=None, autoban=None, path=None, nodeport=None, deployed=None, wave=None): + self.name = name + self.subdomain = name + self.type = type + self.category = category + self.autoban = autoban + self.path = path + self.nodeport = nodeport + self.port = Challenge.port_from_nodeport(nodeport) + self.deployed = deployed + self.wave = wave + + def __repr__(self): + return "%s(name=%r, category=%r, type=%r)" % ( + self.__class__.__name__, self.name, self.category, self.type) + + def todict(self): + return { + self.name: { + "name": self.name, + "subdomain": self.subdomain, + "type": self.type, + "category": self.category, + "autoban": self.autoban, + "path": self.path, + "nodeport": self.nodeport, + "port": self.port, + } + } + + @staticmethod + def nodeport_from_port(port): + if port is None: + return None + else: + return port - PORT_MOD + NODEPORT_START + + @staticmethod + def port_from_nodeport(nodeport): + if nodeport is None: + return None + else: + return nodeport + PORT_MOD - NODEPORT_START + +class DeployException(Exception): + pass + +class HelmDeployException(DeployException): + pass +class FirewallDeployException(DeployException): + pass + +def ymlpath(chalpath): + return f"{chalpath}/{YML_FILE}" + +def dockerfile_path(chalpath): + return f"{chalpath}/{DOCKERFILE_NAME}" + +def log(msg, debug=DEBUG): + debug and print(msg, file=stderr) + +# this if for any challenge +# chalpath as {category}/{challenge_name} +def load_any_chal(chalpath, quiet=False): + ymlfile = ymlpath(chalpath) + + if not os.path.isfile(ymlfile): + raise DeployException(f"No such file '{ymlfile}'") + + with open(ymlfile) as f: + chal_data = yaml.safe_load(f) + name = chal_data['name'] + not quiet and log(f"[*] Loading challenge '{name}'") + chal = Challenge( + name=chal_data.get("deployment", {"name": name})['name'], + type=chal_data.get("deployment", {"type": None})['type'], + category=chal_data['category'], + autoban=chal_data.get("deployment", {"autoban": None}).get('autoban'), + path=chalpath, + nodeport=chal_data.get("deployment", {"nodePort": None}).get('nodePort'), + deployed=chal_data.get("deployment", {"deployed": None}).get('deployed'), + wave=chal_data.get("wave", None), + ) + return chal + +# this if for dynamic deployable challenges +# chalpath as {category}/{challenge_name} +def load_chal(chalpath, warn=True): + ymlfile = ymlpath(chalpath) + + if not os.path.isfile(ymlfile): + raise DeployException(f"No such file '{ymlfile}'") + + with open(ymlfile) as f: + chal_data = yaml.safe_load(f) + name = chal_data['name'] + log(f"[*] Loading challenge '{name}'") + if not chal_data.get("deployment"): + warn and log(f"[!] Skipping challenge '{name}' as it doesn't contain a 'deployment' section") + chal = None + elif os.path.isfile(dockerfile_path(chalpath)): + chal = Challenge( + chal_data["deployment"]["name"], + chal_data["deployment"]["type"], + chal_data["category"], + chal_data["deployment"].get("autoban", AUTOBAN_DEFAULT), + chalpath, + chal_data["deployment"].get("nodePort", None), + chal_data["deployment"].get("deployed", False), + chal_data.get("wave", None), + ) + else: + warn and log(f"[!] Skipping challenge '{name}' as its type seems to be static (no deplyment section and no '{DOCKERFILE_NAME}')") + chal = None + return chal + +def update_chal_data(chal_data, chal: Challenge): + if chal_data["type"] == DYNAMIC_CHAL_TYPE: + chal_data["extra"]["decay"] = DECAY + if "deployment" in chal_data and os.path.isfile(dockerfile_path(chal.path)): + if chal is not None: + if chal_data.get("connection_info") and chal.port is not None and PORT_PLACE_HOLDER in chal_data["connection_info"]: + chal_data["connection_info"] = chal_data["connection_info"].replace(PORT_PLACE_HOLDER, f"{chal.port}") + log(f"[*] Updated connection info for '{chal.name}': {chal_data['connection_info']}") + if chal.nodeport is not None: + chal_data["deployment"]["nodePort"] = chal.nodeport + chal_data["deployment"]["deployed"] = chal.deployed + elif chal_data["type"] == DOCKER_CHAL_TYPE: + if chal_data.get("docker_image") and PROJECT_ID_PLACE_HOLDER in chal_data['docker_image']: + chal_data['docker_image'] = chal_data['docker_image'].replace(PROJECT_ID_PLACE_HOLDER, PROJECT_ID) + +def dump_chal(chal: Challenge, chalpath): + with open(ymlpath(chalpath)) as f: + chal_data = yaml.safe_load(f) + with open(ymlpath(chalpath), 'w') as f: + update_chal_data(chal_data, chal) + yaml.safe_dump(chal_data, f, default_flow_style=False) + +def load_chals_json(): + with open(CHALLENGES_JSON_PATH) as f: + chals_json = json.load(f) + return chals_json + +def dump_chals_json(chals, override=OVERRIDE_CHALS_JSON): + if len(chals) > 0: + existing_chals = load_chals_json() + with open(CHALLENGES_JSON_PATH, 'w') as f: + new_chals = {} + for chal in chals: + new_chals.update(chal.todict()) + if override: + for chal_name in new_chals: + if chal_name in existing_chals: + log(f"[!] Overriding challenge '{chal_name}' in '{CHALLENGES_JSON_PATH}'") + data = existing_chals | new_chals + else: + for chal_name in new_chals: + if chal_name in existing_chals: + log(f"[!] Skipping challenge '{chal_name}' as it already exists in '{CHALLENGES_JSON_PATH}'") + data = new_chals | existing_chals + json.dump(data, f, indent=INDENT) + log(f"[*] Challenges added to {CHALLENGES_JSON_PATH}") + else: + log(f"[*] No new challenges to add to '{CHALLENGES_JSON_PATH}'") + +def remove_chal_chals_json(chal: Challenge): + if chal is not None: + existing_chals = load_chals_json() + if chal.name not in existing_chals: + log(f"[!] Cannot remove '{chal.name}' from '{CHALLENGES_JSON_PATH}' as it does not exist") + else: + with open(CHALLENGES_JSON_PATH, 'w') as f: + existing_chals.pop(chal.name) + json.dump(existing_chals, f, indent=INDENT) + log(f"[*] '{chal.name}' removed from '{CHALLENGES_JSON_PATH}'") + +def load_ports(): + with open(PORTS_PATH) as f: + ports = yaml.safe_load(f) + return ports + +def dump_ports(ports): + with open(PORTS_PATH, 'w') as f: + yaml.safe_dump(ports, f, default_flow_style=False) + +def assign_port(chal: Challenge, ports): + if chal.nodeport is not None: + log(f"[!] Ports (port={chal.port}, nodeport={chal.nodeport}) were already set for challenge '{chal.name}'") + else: + port = ports[chal.category] + nodeport = Challenge.nodeport_from_port(port) + chal.port = port + chal.nodeport = nodeport + log(f"[*] Ports (port={port}, nodeport={nodeport}) set to challenge '{chal.name}'") + ports[chal.category] += 1 + +def get_chalpaths(wave=None, reverse=False): + chalpaths = [] + for category in CHAL_DIRS: + if os.path.isdir(category): + for name in os.listdir(category): + chalpath = f"{category}/{name}" + if os.path.isdir(chalpath) and os.path.isfile(ymlpath(chalpath)): + if wave is not None: + chal = load_any_chal(chalpath, quiet=True) + if reverse ^ (chal.wave == wave): + chalpaths.append(chalpath) + else: + chalpaths.append(chalpath) + return chalpaths + +# def update_chal(chalpath): +# chals = update_chals([chalpath]) +# if len(chals) > 0: +# return chals[0] +# else: +# return None + +def _deploy_chal(chalpath, ports, build, deploy, createfw, push_ctfd, overridefw=OVERRIDE_FW): + global LOGGED_IN + + chal = load_chal(chalpath) + if chal is None: + if push_ctfd: + ctfcli_push(chalpath, update=True) + else: + try: + assign_port(chal, ports) + if build: + _build_image(chal) + if deploy: + if chal.deployed: + log(f"[!] Challenge {chal.name} has already been deployed (deployment.deployed is set to true)") + else: + if not LOGGED_IN: + if not helm_login(): + raise HelmDeployException("Could not login to helm registry") + LOGGED_IN = True + dump_chal(chal, chalpath) + if not helm_install(chal): + raise HelmDeployException(f"Could not deploy '{chal.name}' to Kubernetes cluster") + chal.deployed = True + if _istcp(chal) and createfw: + rule = _fw_rule_name(chal) + if _fw_rule_exists(chal): + if overridefw: + log(f"[!] Firewall rule '{rule}' already exists but it will be overriden") + if not _delete_fw_rule(chal): + raise FirewallDeployException(f"Could not delete firewall rule '{rule}'") + if not _create_fw_rule(chal): + raise FirewallDeployException(f"Could not create firewall rule '{rule}'") + else: + log(f"[!] Firewall rule '{rule}' already exists, it will be enabled if not already") + if not _enable_fw_rule(chal): + raise FirewallDeployException(f"Could not enable firewall rule '{rule}'") + else: + if not _create_fw_rule(chal): + raise FirewallDeployException(f"Could not create firewall rule '{rule}'") + dump_chal(chal, chalpath) + if push_ctfd: + ctfcli_push(chal.path, update=False) + except HelmDeployException as e: + raise e + except FirewallDeployException as e: + log(f"[-] {e}") + if deploy and chal.deployed and LOGGED_IN: + if not helm_uninstall(chal): + raise HelmDeployException(f"[!] Could not undeploy '{chal.name}' from Kubernetes cluster") + raise e + return chal + +def undeploy_chal(chalpath, deletefw=DEFAULT_DELETE_FW): + global LOGGED_IN + + chal = load_chal(chalpath) + if chal is not None: + if chal.deployed: + if not LOGGED_IN: + if not helm_login(): + raise HelmDeployException("Could not login to helm registry") + LOGGED_IN = True + if not helm_uninstall(chal): + raise HelmDeployException(f"[!] Could not undeploy '{chal.name}' from Kubernetes cluster") + chal.deployed = False + rule = _fw_rule_name(chal) + if _istcp(chal): + if not _fw_rule_exists(chal): + log(f"[!] Firewall rule '{rule}' does not exist") + else: + if deletefw: + if not _delete_fw_rule(chal): + raise FirewallDeployException(f"Could not delete firewall rule '{rule}'") + else: + if not _disable_fw_rule(chal): + raise FirewallDeployException(f"Could not disable firewall rule '{rule}'") + dump_chal(chal, chalpath) + else: + log(f"[!] Challenge {chal.name} was not already deployed (deployment.deployed is set to false)") + remove_chal_chals_json(chal) + return chal + +def dump(chals, ports): + dump_chals_json(chals) + dump_ports(ports) + +def deploy_chals(chalpaths, build, deploy, createfw, push_ctfd): + ports = load_ports() + chals = [] + ex = None + try: + for chalpath in chalpaths: + chal = _deploy_chal(chalpath, ports, build, deploy, createfw, push_ctfd) + if chal is not None: + chals.append(chal) + except DeployException as e: + ex = e + finally: + dump(chals, ports) + if ex is not None: + raise ex + return chals + +def undeploy_chals(chalpaths, deletefw=DEFAULT_DELETE_FW): + for chalpath in chalpaths: + undeploy_chal(chalpath, deletefw) + +# Helm + +def helm_login(): + if not USE_REMOTE_REPO: + log(f"[*] Using local chart") + return True + else: + log(f"[*] Login into Helm registry") + return os.system(f'gcloud auth print-access-token | helm registry login -u oauth2accesstoken --password-stdin {HELM_REGISTRY}') == 0 + +def helm_install(chal: Challenge): + # helm install ${challenge_name} oci://europe-west3-docker.pkg.dev/gdg-ctf-2022/gdg-ctf-helm-repo/ctf-challenge-chart --version 0.1.0 -f challenge.yml + log(f"[*] Deploying '{chal.name}' to Kubernetes cluster") + return subprocess.run(["helm", "install", chal.name, HELM_CHART_REPO, "--version", HELM_CHART_VERSION, "-f", ymlpath(chal.path)]).returncode == 0 + +def helm_uninstall(chal: Challenge): + log(f"[*] Undeploying '{chal.name}' from Kubernetes cluster") + return subprocess.run(["helm", "uninstall", chal.name]).returncode == 0 + +# Firewa ${PORT}ll + +# For fw functions, it is assumed that the challenge is of type TCP + +def _fw_rule_name(chal: Challenge): + return f"allow-{chal.category}-{chal.name}" + +def _istcp(chal: Challenge): + return chal.type == TCP_TYPE + +def _fw_rule_exists(chal: Challenge): + rule = _fw_rule_name(chal) + return subprocess.run(["gcloud", "compute", "firewall-rules", "describe", rule], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode == 0 + +def _create_fw_rule(chal: Challenge): + rule = _fw_rule_name(chal) + log(f"[*] Creating firewall rule '{rule}'") + return subprocess.run(["gcloud", "compute", "firewall-rules", "create", rule, "--direction=INGRESS", f"--priority={FW_PRIORITY}", "--network=default", "--action=ALLOW", f"--rules=tcp:{chal.port}", f"--source-ranges={FW_SOURCE_RANGES}", f"--target-tags={FW_TARGET_TAGS}"]).returncode == 0 + +def _delete_fw_rule(chal: Challenge): + rule = _fw_rule_name(chal) + log(f"[*] Deleting firewall rule '{rule}'") + return subprocess.run(["gcloud", "compute", "firewall-rules", "delete", rule]).returncode == 0 + +def _enable_fw_rule(chal: Challenge): + rule = _fw_rule_name(chal) + log(f"[*] Enabling firewall rule '{rule}'") + return subprocess.run(["gcloud", "compute", "firewall-rules", "update", rule, "--no-disabled"]).returncode == 0 + +def _disable_fw_rule(chal: Challenge): + rule = _fw_rule_name(chal) + log(f"[*] Disabling firewall rule '{rule}'") + return subprocess.run(["gcloud", "compute", "firewall-rules", "update", rule, "--disabled"]).returncode == 0 + +# "Exported" functions + +def create_fw(chalpath, skipcheck=FW_SKIP_CHECK): + chal = load_chal(chalpath) + if chal is None: + return None + if not _istcp(chal): + log(f"[!] '{chal.name}' is a '{chal.type}' challenge (not '{TCP_TYPE}'), no firewall rule to create") + else: + if not skipcheck and _fw_rule_exists(chal): + log(f"[!] Firewall rule already exists for '{chal.name}'") + else: + return _create_fw_rule(chal) + +def delete_fw(chalpath, skipcheck=FW_SKIP_CHECK): + chal = load_chal(chalpath) + if chal is None: + return None + if not _istcp(chal): + log(f"[!] '{chal.name}' is a '{chal.type}' challenge (not '{TCP_TYPE}'), no firewall rule to delete") + else: + if not skipcheck and not _fw_rule_exists(chal): + log(f"[!] No firewall rule found for '{chal.name}'") + else: + return _delete_fw_rule(chal) + +def enable_fw(chalpath, skipcheck=FW_SKIP_CHECK): + chal = load_chal(chalpath) + if chal is None: + return None + if not _istcp(chal): + log(f"[!] '{chal.name}' is a '{chal.type}' challenge (not '{TCP_TYPE}'), no firewall rule to enable") + else: + if not skipcheck and not _fw_rule_exists(chal): + log(f"[!] No firewall rule found for '{chal.name}'") + else: + return _enable_fw_rule(chal) + +def disable_fw(chalpath, skipcheck=FW_SKIP_CHECK): + chal = load_chal(chalpath) + if chal is None: + return None + if not _istcp(chal): + log(f"[!] '{chal.name}' is a '{chal.type}' challenge (not '{TCP_TYPE}'), no firewall rule to disable") + else: + if not skipcheck and not _fw_rule_exists(chal): + log(f"[!] No firewall rule found for '{chal.name}'") + else: + return _disable_fw_rule(chal) + +# Build + +def _build_image(chal: Challenge): + if chal is not None: + for dir in DOCKERFILE_DIRS: + dockerfile = f"{chal.path}/{dir}/{DOCKERFILE_NAME}" + if os.path.isfile(dockerfile): + return subprocess.run(["gcloud", "builds", "submit", "--tag", f"{GCR_REPO}/{chal.name}"], cwd=f"{chal.path}/{dir}").returncode == 0 + else: + raise DeployException(f"Cannot find {DOCKERFILE_NAME} in {chal.path}") + +def build_image(chalpath): + chal = load_chal(chalpath) + return _build_image(chal) + +# ssh/scp + +# scp local to remote +def gcloud_scp_l2r(localpath, remotepath, user, instance, zone): + return subprocess.run(["gcloud", "compute", "scp", localpath, f"{user}@{instance}:{remotepath}", "--zone", zone]).returncode == 0 + +# scp remote to local +def gcloud_scp_r2l(remotepath, localpath, user, instance, zone): + return subprocess.run(["gcloud", "compute", "scp", f"{user}@{instance}:{remotepath}", localpath, "--zone", zone]).returncode == 0 + +# run command through ssh +def gcloud_ssh_cmd(user, instance, zone, cmd): + return subprocess.run(["gcloud", "compute", "ssh", "--zone", zone, f"{user}@{instance}", f"--command={cmd}"]).returncode == 0 + +# ctfcli + +def _load_ctfcli_tracker(): + if not os.path.isfile(CTFCLI_CHAL_TRACKER_PATH): + raise DeployException(f"Cannot open ctfcli challenge tracker file: '{CTFCLI_CHAL_TRACKER_PATH}'") + with open(CTFCLI_CHAL_TRACKER_PATH) as f: + chalpaths = json.load(f) + return chalpaths + +def _ctfcli_chal_exists(chalpath): + chalpaths = _load_ctfcli_tracker() + return chalpath in chalpaths + +def _ctfcli_track_chal(chalpath): + chalpaths = _load_ctfcli_tracker() + if chalpath in chalpaths: + log(f"[!] '{chalpath}' is already tracked in '{CTFCLI_CHAL_TRACKER_PATH}'") + else: + with open(CTFCLI_CHAL_TRACKER_PATH, "w") as f: + chalpaths.append(chalpath) + json.dump(chalpaths, f, indent=INDENT) + +def _ctfcli_untrack_chal(chalpath): + chalpaths = _load_ctfcli_tracker() + if chalpath not in chalpaths: + log(f"[!] '{chalpath}' is not tracked in '{CTFCLI_CHAL_TRACKER_PATH}'") + else: + with open(CTFCLI_CHAL_TRACKER_PATH, "w") as f: + chalpaths.remove(chalpath) + json.dump(chalpaths, f, indent=INDENT) + +def _ctfcli_untrack_all(): + with open(CTFCLI_CHAL_TRACKER_PATH, "w") as f: + json.dump([], f, indent=INDENT) + +def _ctfcli_cmd(chalpath, cmd): + return subprocess.run([CTFCLI_CMD, "challenge", cmd, chalpath]).returncode + +def _ctfcli_plugin_chstate(chalpath, hide): + action = "hide" if hide else "unhide" + return subprocess.run([CTFCLI_CMD, "plugins", "chstate", action, chalpath]).returncode + +# def _ctfcli_add(chalpath): +# return _ctfcli_cmd(chalpath, "add") + +def _ctfcli_install(chalpath): + return _ctfcli_cmd(chalpath, "install") + +def _ctfcli_sync(chalpath): + return _ctfcli_cmd(chalpath, "sync") + +def ctfcli_push(chalpath, update): + if update: + log(f"[*] Updating '{ymlpath(chalpath)}'") + chal = load_chal(chalpath, warn=False) + dump_chal(chal, chalpath) + log(f"[*] Pushing '{chalpath}' to CTFd") + if _ctfcli_chal_exists(chalpath): + _ctfcli_sync(chalpath) + else: + _ctfcli_install(chalpath) + _ctfcli_track_chal(chalpath) + +def ctfcli_untrack(chalpath): + log(f"[*] Untracking '{chalpath}'") + return _ctfcli_untrack_chal(chalpath) + +def ctfcli_untrack_all(): + log(f"[*] Untracking all challenges") + return _ctfcli_untrack_all() + +def ctfcli_chstate(chalpath, state): + if state == CHSTATE_HIDDEN: + hide = True + elif state == CHSTATE_VISIBLE: + hide = False + else: + raise DeployException(f"Invalid state '{state}'") + return _ctfcli_plugin_chstate(chalpath, hide) diff --git a/.scripts/auto-all/conf.py b/.scripts/auto-all/conf.py new file mode 100644 index 0000000..b37284c --- /dev/null +++ b/.scripts/auto-all/conf.py @@ -0,0 +1,101 @@ +# script.py +DEBUG = True + +# GCP project +INSTANCE_NAME = "mp-haproxy-instance" +INSTANCE_ZONE = "europe-west1-b" +HAPROXY_USER = "root" +PROJECT_ID = "mentoring-program-371116" + +# HAProxy config +STATS_PORT = 8080 +STATS_USER = "shellmates" +STATS_PASSWORD = "4YALhtxXP4qAsqPNxRinok0UeXDu6H" +DOMAIN_NAME = "" +HAPROXY_ROOT = "/etc/haproxy" +HAPROXY_MAPS_ROOT = f"{HAPROXY_ROOT}/maps" +HTTP_HOSTS_MAP = "http-hosts.map" +SNI_MAP = "sni.map" +HTTP_HOSTS_MAP_PATH = f"{HAPROXY_MAPS_ROOT}/{HTTP_HOSTS_MAP}" +SNI_MAP_PATH = f"{HAPROXY_MAPS_ROOT}/{SNI_MAP}" +SSL_CERTIFICATE_PATH = f"/etc/haproxy/{DOMAIN_NAME}.pem" +NODES = [ + { + "name": "node1", + "ip": "10.132.0.5" + }, +] +HTTP_TYPE = "http" +TCP_TYPE = "tcp" +CHALLENGES_JSON_PATH = "config/chals.json" +TEMPLATES_DIR = "templates" +HAPROXY_CFG = "haproxy.cfg" +HAPROXY_CONFIG_DIR = "config/haproxy" +DEPLOY = True + +# Helm config +USE_REMOTE_REPO = False +HELM_REGISTRY = "" +HELM_CHART_REPO = "helm-charts/ctf-challenge-chart" +HELM_CHART_VERSION = "0.1.0" + +# IP blacklisting +IP_BAN_MINUTES = 2 # 2 minutes +CONN_RATE_SECONDS = 30 # 30 seconds +CONNS_PER_RATE = 50 # allow at most 50 connections in a 30s window (per IP) +CONCUR_CONNS = 25 # allow at most 25 concurrent connections (per IP) + +# CTFd decay +DECAY = 50 +MAX_NODES = 1 +# NODES_FQDN = "gke-nodes.internal" +NODES_FQDN = "10.132.0.5" +CTFD_LINK = "http://ctf.shellmates.club/" + +# challenge.py +NODEPORT_START = 30000 +PORT_MOD = 1000 +PORTS_PATH = "config/ports.yml" +YML_FILE = "challenge.yml" +INDENT = 4 +CHAL_DIRS = { + "crypto", + "forensics", + "jail", + "linux", + "misc", + "osint", + "pwn", + "reverse", + "web", + "general-skills", +} +AUTOBAN_DEFAULT = False +FW_PRIORITY = 1000 +FW_SOURCE_RANGES = "0.0.0.0/0" +FW_TARGET_TAGS = "haproxy" +FW_SKIP_CHECK = False +DYNAMIC_CHAL_TYPE = "dynamic" +DOCKER_CHAL_TYPE = "docker" +PORT_PLACE_HOLDER = "${PORT}" +PROJECT_ID_PLACE_HOLDER = "${PROJECT_ID}" + +# Override existing challenges in chals.json +OVERRIDE_CHALS_JSON = True + +# Override existing firewall rules +OVERRIDE_FW = False +DEFAULT_DELETE_FW = False +GCR_REPO = f"gcr.io/{PROJECT_ID}" +DOCKERFILE_DIRS = { + ".", + "challenge", + "app", +} +DOCKERFILE_NAME = "Dockerfile" + +# ctfcli +CTFCLI_CMD = "ctf" +CTFCLI_CHAL_TRACKER_PATH = "config/ctfcli_chals.json" +CHSTATE_VISIBLE = "visible" +CHSTATE_HIDDEN = "hidden" diff --git a/.scripts/auto-all/script.py b/.scripts/auto-all/script.py new file mode 100755 index 0000000..0c23ae7 --- /dev/null +++ b/.scripts/auto-all/script.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 + +from conf import * +import jinja2 +from os import path +import json +import re +from challenge import * +import argparse +import sys + +SCRIPT_ROOT = path.dirname(__file__) +JINJA_ENV = jinja2.Environment(loader=jinja2.FileSystemLoader(path.join(SCRIPT_ROOT, TEMPLATES_DIR)), trim_blocks=True, lstrip_blocks=True) +SUBDOMAIN_REGEX = "^[A-Za-z0-9](?:[A-Za-z0-9\-]{0,61}[A-Za-z0-9])?$" + +def argument_parser(): + parser = argparse.ArgumentParser(description="Auto assign challenge ports, update HAProxy configuration and deploy challenges to Kubernetes") + + parser.add_argument("-c", "--challenges", metavar="CHALLENGE", nargs="*", help="Paths to challenges") + parser.add_argument("-d", "--deploy", default=True, action="store_true", help="Assign port, deploy to HAProxy and Kubernetes, and create firewall rule (this is done by default)") + parser.add_argument("-u", "--undeploy", action="store_true", help="Undeploy from HAProxy and Kubernetes, and disable or delete firewall rule (default is to disable)") + parser.add_argument("--only-haproxy", action="store_true", help="Only update HAProxy configuration") + parser.add_argument("-A", "--all", action="store_true", help="Apply to all challenges") + parser.add_argument("--deploy-no-create-fw", action="store_true", help="Do not create firewall rule when using --deploy (default is to create)") + parser.add_argument("--undeploy-delete-fw", action="store_true", help="Delete firewall rule when using --undeploy (default is to disable not delete)") + parser.add_argument("--create-fw", action="store_true", help="Create firewall rule") + parser.add_argument("--delete-fw", action="store_true", help="Delete firewall rule") + parser.add_argument("--enable-fw", action="store_true", help="Enable firewall rule") + parser.add_argument("--disable-fw", action="store_true", help="Disable firewall rule") + parser.add_argument("--no-build", action="store_true", help="Do not build container image (default is to build)") + parser.add_argument("--only-build", action="store_true", help="Only build container image") + parser.add_argument("--only-push-ctfd", action="store_true", help="Only push to CTFd") + parser.add_argument("--no-push-ctfd", action="store_true", help="Do not push to CTFd (default is to push)") + parser.add_argument("--only-ctfcli-untrack", action="store_true", help="Untrack from ctfcli tracker") + parser.add_argument("-s", "--state", choices=[CHSTATE_HIDDEN, CHSTATE_VISIBLE], help="Change visibility state on CTFd") + parser.add_argument("-w", "--wave", metavar="WAVE", type=int, help="Apply to specified wave of challenges") + parser.add_argument("--not-wave", metavar="WAVE", type=int, help="Apply to challenges not of specified wave") + + return parser + +def parse_chals(filename): + with open(filename) as f: + challenges = json.load(f) + for chal in challenges.values(): + subdomain = chal["subdomain"] or chal["name"] + if not re.match(SUBDOMAIN_REGEX, subdomain): + raise ValueError(f"'{subdomain}' does not match subdomain regular expression") + return challenges.values() + +def haproxy_cfg(filename, challenges, directory): + filepath = f"{directory}/{filename}" + if path.exists(filepath): + log(f"[!] Warning: '{filepath}' already exists") + + template = JINJA_ENV.get_template(f"{filename}.j2") + content = template.render({ + "challenges": challenges, + "HTTP_HOSTS_MAP_PATH": HTTP_HOSTS_MAP_PATH, + # "SNI_MAP_PATH": SNI_MAP_PATH, + "TCP_TYPE": TCP_TYPE, + "HTTP_TYPE": HTTP_TYPE, + "NODES": NODES, + "STATS_PORT": STATS_PORT, + "STATS_USER": STATS_USER, + "STATS_PASSWORD": STATS_PASSWORD, + # "SSL_CERTIFICATE_PATH": SSL_CERTIFICATE_PATH, + "IP_BAN_MINUTES": IP_BAN_MINUTES, + "CONN_RATE_SECONDS": CONN_RATE_SECONDS, + "CONNS_PER_RATE": CONNS_PER_RATE, + "CONCUR_CONNS": CONCUR_CONNS, + "MAX_NODES": MAX_NODES, + "NODES_FQDN": NODES_FQDN, + "CTFD_LINK": CTFD_LINK, + }) + with open(filepath, 'w') as f: + f.write(content) + +def hosts_map(filename, challenges, directory): + filepath = f"{directory}/{filename}" + if path.exists(filepath): + log(f"[!] Warning: '{filepath}' already exists") + + template = JINJA_ENV.get_template(f"{filename}.j2") + content = template.render({ + "challenges": challenges, + "HTTP_TYPE": HTTP_TYPE, + "DOMAIN_NAME": DOMAIN_NAME + }) + with open(filepath, 'w') as f: + f.write(content) + +def update_haproxy(): + # Generate HAProxy config files + challenges = parse_chals(CHALLENGES_JSON_PATH) + haproxy_cfg(HAPROXY_CFG, challenges, HAPROXY_CONFIG_DIR) + hosts_map(HTTP_HOSTS_MAP, challenges, HAPROXY_CONFIG_DIR) + # hosts_map(SNI_MAP, challenges, directory) + + # Copy the files to remote server and restart HAProxy + gcloud_scp_l2r(f"{HAPROXY_CONFIG_DIR}/{HAPROXY_CFG}", HAPROXY_ROOT, HAPROXY_USER, INSTANCE_NAME, INSTANCE_ZONE) + gcloud_scp_l2r(f"{HAPROXY_CONFIG_DIR}/{HTTP_HOSTS_MAP}", HTTP_HOSTS_MAP_PATH, HAPROXY_USER, INSTANCE_NAME, INSTANCE_ZONE) + gcloud_ssh_cmd(HAPROXY_USER, INSTANCE_NAME, INSTANCE_ZONE, "systemctl restart haproxy; systemctl status haproxy") + +if __name__ == "__main__": + parser = argument_parser() + args = parser.parse_args() + + if args.only_haproxy: + log(f"[!] Ignoring all other options as --only-haproxy was specified") + update_haproxy() + sys.exit(0) + + if args.deploy_no_create_fw and not args.deploy: + log(f"[!] Ignoring option --deploy-no-create-fw as --deploy was not specified") + if args.undeploy_delete_fw and not args.undeploy: + log(f"[!] Ignoring option --undeploy-delete-fw as --undeploy was not specified") + if args.create_fw and args.delete_fw: + log(f"[-] Can only specify one of --create-fw and --delete-fw") + sys.exit(1) + if args.enable_fw and args.disable_fw: + log(f"[-] Can only specify one of --enable-fw and --disable-fw") + sys.exit(1) + if args.wave and args.not_wave: + log(f"[-] Can only specify one of --wave and --not-wave") + sys.exit(1) + + if args.all: + log(f"[*] Loading all challenges") + chalpaths = get_chalpaths() + elif args.wave: + log(f"[*] Loading challenges of wave {args.wave}") + chalpaths = get_chalpaths(wave=args.wave) + elif args.not_wave: + log(f"[*] Loading challenges not of wave {args.not_wave}") + chalpaths = get_chalpaths(wave=args.not_wave, reverse=True) + elif len(args.challenges) > 0: + chalpaths = args.challenges + else: + log(f"[-] No challenge(s) specified, --wave not specified or --all not specified") + sys.exit(1) + + if args.only_build: + log(f"[!] Ignoring all other options as --only-build was specified") + for chalpath in chalpaths: + build_image(chalpath) + sys.exit(0) + + if args.only_ctfcli_untrack: + log(f"[!] Ignoring all other options as --only-ctfcli-untrack was specified") + if args.all: + ctfcli_untrack_all() + else: + for chalpath in chalpaths: + ctfcli_untrack(chalpath) + sys.exit(0) + + if args.only_push_ctfd: + log(f"[!] Ignoring all other options as --only-push-ctfd was specified") + for chalpath in chalpaths: + ctfcli_push(chalpath, update=True) + sys.exit(0) + + if args.state: + log(f"[!] Ignoring all other options as --state was specified") + for chalpath in chalpaths: + ctfcli_chstate(chalpath, args.state) + sys.exit(0) + + fw_action = None + if args.create_fw: + log(f"[!] Ignoring all other options as --create-fw was specified") + fw_action = create_fw + elif args.delete_fw: + log(f"[!] Ignoring all other options as --delete-fw was specified") + fw_action = delete_fw + elif args.enable_fw: + log(f"[!] Ignoring all other options as --enable-fw was specified") + fw_action = enable_fw + elif args.disable_fw: + log(f"[!] Ignoring all other options as --disable-fw was specified") + fw_action = disable_fw + + if fw_action is not None: + for chalpath in chalpaths: + fw_action(chalpath) + sys.exit(0) + + build = not args.no_build + deploy = args.deploy + undeploy = args.undeploy + createfw = not args.deploy_no_create_fw + deletefw = args.undeploy_delete_fw + push_ctfd = not args.no_push_ctfd + + if args.undeploy: + undeploy_chals(chalpaths, deletefw) + else: + deploy_chals(chalpaths, build, deploy, createfw, push_ctfd) + update_haproxy() diff --git a/.scripts/auto-all/templates/haproxy.cfg.j2 b/.scripts/auto-all/templates/haproxy.cfg.j2 new file mode 100644 index 0000000..af6ee89 --- /dev/null +++ b/.scripts/auto-all/templates/haproxy.cfg.j2 @@ -0,0 +1,169 @@ +global + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + + # Default SSL material locations + ca-base /etc/ssl/certs + crt-base /etc/ssl/private + + # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate + ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 + ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets + +defaults + log global + mode http + option httplog + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + errorfile 400 /etc/haproxy/errors/400.http + errorfile 403 /etc/haproxy/errors/403.http + errorfile 408 /etc/haproxy/errors/408.http + errorfile 500 /etc/haproxy/errors/500.http + errorfile 502 /etc/haproxy/errors/502.http + errorfile 503 /etc/haproxy/errors/503.http + errorfile 504 /etc/haproxy/errors/504.http + +# Setup a haproxy table to store connection information for each user IP adress +# We'll use it in each challenge that needs autoban to limit no of connections and +# the connection rate for users +backend ip_blacklist + stick-table type ip size 1m expire {{ IP_BAN_MINUTES }}m store gpc0 + + +# Stats listener + +listen stats + bind *:{{ STATS_PORT }} + mode http + stats enable + stats uri / + stats auth {{ STATS_USER }}:{{ STATS_PASSWORD }} + +# DNS resolver to retrieve backends dynamically ## + +resolvers nameservers + nameserver ns 127.0.0.1:53 + accepted_payload_size 8192 + + hold nx 3s + hold timeout 3s + + timeout retry 1s + timeout resolve 1s + + resolve_retries 3 + + +# HTTP frontends + +defaults + mode http + default_backend be_redirect_to_ctfd + timeout connect 20000 + timeout client 120000 + timeout server 120000 + log global + option httplog + errorfile 400 /etc/haproxy/errors/400.http + errorfile 403 /etc/haproxy/errors/403.http + errorfile 408 /etc/haproxy/errors/408.http + errorfile 500 /etc/haproxy/errors/500.http + errorfile 502 /etc/haproxy/errors/502.http + errorfile 503 /etc/haproxy/errors/503.http + errorfile 504 /etc/haproxy/errors/504.http + +frontend http + # Reject connection if source IP is blacklisted + tcp-request connection reject if { src -f /etc/haproxy/blacklist.lst } + + # Automatic temporary IP ban + stick-table type ip size 1m expire {{ CONN_RATE_SECONDS }}s store conn_rate({{ CONN_RATE_SECONDS }}s),conn_cur + tcp-request connection reject if { src_get_gpc0(ip_blacklist) gt 0 } + tcp-request connection track-sc0 src + tcp-request connection track-sc1 src table ip_blacklist + tcp-request connection sc-inc-gpc0(1) if { sc0_conn_rate gt {{ CONNS_PER_RATE }} } or { sc0_conn_cur gt {{ CONCUR_CONNS }} } + + bind *:80 + use_backend %[req.hdr(host),lower,map_dom({{ HTTP_HOSTS_MAP_PATH }})] + +# TCP frontends + +defaults + mode tcp + default_backend be_default + timeout connect 20000 + timeout client 120000 + timeout server 120000 + +{% for chal in challenges %} +{% if chal["type"] == TCP_TYPE %} +frontend {{ chal["name"] }} + # Ban connection if source IP is blacklisted + tcp-request connection reject if { src -f /etc/haproxy/blacklist.lst } + + {% if chal["autoban"] %} + # Automatic temporary IP ban + stick-table type ip size 1m expire {{ CONN_RATE_SECONDS }}s store conn_rate({{ CONN_RATE_SECONDS }}s),conn_cur + tcp-request connection reject if { src_get_gpc0(ip_blacklist) gt 0 } + tcp-request connection track-sc0 src + tcp-request connection track-sc1 src table ip_blacklist + tcp-request connection sc-inc-gpc0(1) if { sc0_conn_rate gt {{ CONNS_PER_RATE }} } or { sc0_conn_cur gt {{ CONCUR_CONNS }} } + {% endif %} + + bind *:{{ chal["port"] }} + use_backend be_{{ chal["name"] }} +{% endif %} +{% endfor %} + +# HTTP backends + +defaults + mode http + option prefer-last-server + balance leastconn + option httpchk HEAD / + timeout connect 20000 + timeout client 120000 + timeout server 120000 + +{% for chal in challenges %} +{% if chal["type"] == HTTP_TYPE %} +backend be_{{ chal["name"] }} + server-template node {{ MAX_NODES }} {{ NODES_FQDN }}:{{ chal["nodeport"] }} check resolvers nameservers init-addr libc,none +{% endif %} +{% endfor %} + +backend be_default + tcp-request content reject + +# TCP backends + +defaults + mode tcp + option tcp-check + balance roundrobin + timeout connect 20000 + timeout client 120000 + timeout server 120000 + +{% for chal in challenges %} +{% if chal["type"] == TCP_TYPE %} +backend be_{{ chal["name"] }} + server-template node {{ MAX_NODES }} {{ NODES_FQDN }}:{{ chal["nodeport"] }} check resolvers nameservers init-addr libc,none +{% endif %} +{% endfor %} + +backend be_redirect_to_ctfd + mode http + http-request redirect location {{ CTFD_LINK }} diff --git a/.scripts/auto-all/templates/http-hosts.map.j2 b/.scripts/auto-all/templates/http-hosts.map.j2 new file mode 100644 index 0000000..9f488ef --- /dev/null +++ b/.scripts/auto-all/templates/http-hosts.map.j2 @@ -0,0 +1,6 @@ +#domainname backendname +{% for chal in challenges %} +{% if chal["type"] == HTTP_TYPE %} +{{ chal["subdomain"] }}.{{ DOMAIN_NAME }} be_{{ chal["name"] }} +{% endif %} +{% endfor %} diff --git a/.scripts/auto-all/templates/sni.map.j2 b/.scripts/auto-all/templates/sni.map.j2 new file mode 100644 index 0000000..f766775 --- /dev/null +++ b/.scripts/auto-all/templates/sni.map.j2 @@ -0,0 +1,2 @@ +#domainname backendname{% for chal in challenges %} +{{ chal["subdomain"] or chal["name"] }}.{{ DOMAIN_NAME }} be_{{ chal["name"] }}{% endfor %} diff --git a/.scripts/commit.sh b/.scripts/commit.sh new file mode 100755 index 0000000..90e6edb --- /dev/null +++ b/.scripts/commit.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +if [ $# = 0 ]; then + git add . && git commit && git push +else + git add "$@" && git commit "$@" && git push +fi diff --git a/.scripts/ctfd/all-challs.sh b/.scripts/ctfd/all-challs.sh new file mode 100755 index 0000000..00b11c8 --- /dev/null +++ b/.scripts/ctfd/all-challs.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +CHALLENGES_FILE="challenges.txt" +DEPTH="3" + +if [ -f "${CHALLENGES_FILE}" ]; then + cat "${CHALLENGES_FILE}" +else + for c in $(find -mindepth "${DEPTH}" -maxdepth "${DEPTH}" -name challenge.yml); do + c="${c%/challenge.yml}" + echo "${c#./}" + done +fi diff --git a/.scripts/ctfd/cmd-all.sh b/.scripts/ctfd/cmd-all.sh new file mode 100755 index 0000000..b646f0a --- /dev/null +++ b/.scripts/ctfd/cmd-all.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +ALL_CHALLS_SCRIPT="./all-challs.sh" +DIRNAME="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ;pwd -P)" + +for chal in $("${DIRNAME}/${ALL_CHALLS_SCRIPT}");do + echo "Challenge: '${chal}'" + ~/.local/bin/ctf "$@" "${chal}" +done diff --git a/.scripts/ctfd/hide-all.sh b/.scripts/ctfd/hide-all.sh new file mode 100755 index 0000000..cdbc436 --- /dev/null +++ b/.scripts/ctfd/hide-all.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +ALL_CHALLS_SCRIPT="all-challs.sh" +STATE_SCRIPT="state.sh" +DIRNAME="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ;pwd -P)" + +for chal in $("${DIRNAME}/${ALL_CHALLS_SCRIPT}");do + "${DIRNAME}/${STATE_SCRIPT}" "${chal}" hidden +done diff --git a/.scripts/ctfd/show-all.sh b/.scripts/ctfd/show-all.sh new file mode 100755 index 0000000..9250b55 --- /dev/null +++ b/.scripts/ctfd/show-all.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +ALL_CHALLS_SCRIPT="all-challs.sh" +STATE_SCRIPT="state.sh" +DIRNAME="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ;pwd -P)" + +for chal in $("${DIRNAME}/${ALL_CHALLS_SCRIPT}");do + "${DIRNAME}/${STATE_SCRIPT}" "${chal}" visible +done diff --git a/.scripts/ctfd/state.sh b/.scripts/ctfd/state.sh new file mode 100755 index 0000000..e25dd26 --- /dev/null +++ b/.scripts/ctfd/state.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +[ $# -lt 2 ] && { echo "Usage: $0 CHALLENGE STATE" >&2; exit 1; } + +challenge="$1" +state="$2" + +if [ "$state" != "hidden" -a "$state" != "visible" ]; then + echo "Invalid state: \"$state\"" >&2 + echo "Valid states: hidden, visible" >&2 + exit 1 +else + case "$state" in + hidden) + other_state=visible + ;; + visible) + other_state=hidden + ;; + *) + echo "Invalid state: \"$state\"" >&2 + echo "Valid states: hidden, visible" >&2 + exit 1 + esac + + [ ! -d "${challenge}" -o ! -f "${challenge}/challenge.yml" ] && { echo "Cannot find challenge \"$challenge\"" >&2; exit 1; } + + sum="$(md5sum "${challenge}/challenge.yml")" + sed -i "s/state: ${other_state}/state: ${state}/" "${challenge}/challenge.yml" + if [ "$(md5sum "${challenge}/challenge.yml")" != "${sum}" ]; then + ~/.local/bin/ctf challenge sync "${challenge}" + echo "'${challenge}' is now ${state}" + fi +fi diff --git a/.scripts/ctfd/syncpush.sh b/.scripts/ctfd/syncpush.sh new file mode 100755 index 0000000..b09c92d --- /dev/null +++ b/.scripts/ctfd/syncpush.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +if [ "${1}" = "-d" -o "${1}" = "--deploy" ]; then + DEPLOY=true + shift +fi + +for chal in "$@"; do + ctf challenge sync "$chal" + if [ "$DEPLOY" = true ] && [ -f "${chal}/docker-compose.yml" ]; then + cd "$chal" + docker-compose build + docker-compose push + cd - + fi +done + +git add . && git commit . && git push diff --git a/.scripts/deploy-chal/all.sh b/.scripts/deploy-chal/all.sh new file mode 100755 index 0000000..1ebe9d5 --- /dev/null +++ b/.scripts/deploy-chal/all.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +DEPLOY_DIR="deploy" + +# apply or delete +K8S_CMD="${1}" + +for dir in $(find -name "${DEPLOY_DIR}" -type d); do + kubectl "${K8S_CMD}" -f "${dir}" +done diff --git a/.scripts/deploy-chal/config.py b/.scripts/deploy-chal/config.py new file mode 100644 index 0000000..f98ec32 --- /dev/null +++ b/.scripts/deploy-chal/config.py @@ -0,0 +1,11 @@ +TEMPLATES_DIR = "templates" +DEFAULT_REPLICAS = 1 +DOCKER_REGISTRY_BASE_LINK = "gcr.io/gdg-ctf-2022" +CPU_LIMIT = 100 +MEMORY_LIMIT = 100 +CPU_REQUEST = 10 +MEMORY_REQUEST = 30 +INITIAL_DELAY_SECONDS = 3 +PERIOD_SECONDS = 30 +DEPLOYMENT_FILE = "deployment.yml" +SERVICE_FILE = "service.yml" diff --git a/.scripts/deploy-chal/gen-deploy.sh b/.scripts/deploy-chal/gen-deploy.sh new file mode 100755 index 0000000..deab7c8 --- /dev/null +++ b/.scripts/deploy-chal/gen-deploy.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +DEPLOY_DIR="deploy" +SCRIPTNAME="script.py" +DIRNAME="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ;pwd -P)" + +mkdir -p "$DEPLOY_DIR" && cd "$DEPLOY_DIR" + +"${DIRNAME}/${SCRIPTNAME}" "$@" + +kubectl apply -f . diff --git a/.scripts/deploy-chal/generate.sh b/.scripts/deploy-chal/generate.sh new file mode 100755 index 0000000..72bec08 --- /dev/null +++ b/.scripts/deploy-chal/generate.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +DEPLOY_DIR="deploy" +SCRIPTNAME="script.py" +DIRNAME="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ;pwd -P)" + +mkdir -p "$DEPLOY_DIR" && cd "$DEPLOY_DIR" + +"${DIRNAME}/${SCRIPTNAME}" "$@" diff --git a/.scripts/deploy-chal/script.py b/.scripts/deploy-chal/script.py new file mode 100755 index 0000000..5a599e4 --- /dev/null +++ b/.scripts/deploy-chal/script.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 + +import argparse +import jinja2 +from os import path +from config import * + +SCRIPT_ROOT = path.dirname(__file__) +JINJA_ENV = jinja2.Environment(loader=jinja2.FileSystemLoader(path.join(SCRIPT_ROOT, TEMPLATES_DIR))) + +def argument_parser(): + parser = argparse.ArgumentParser(description="Generate deployment.yml and optionally service.yml of a challenge") + + parser.add_argument("--name", required=True, help="Challenge name") + parser.add_argument("--category", required=True, help="Challenge category") + parser.add_argument("--type", required=True, choices=["http", "tcp"], help="Challenge type (http or tcp)") + parser.add_argument("--replicas", default=DEFAULT_REPLICAS, help="Number of pod replicas") + parser.add_argument("--docker-registry", default=DOCKER_REGISTRY_BASE_LINK, help="Docker registry link") + parser.add_argument("--docker-registry-challenge-name", help="Docker registry challenge name") + parser.add_argument("--cpu-limit", type=int, default=CPU_LIMIT, help="Container CPU limit") + parser.add_argument("--memory-limit", type=int, default=MEMORY_LIMIT, help="Container memory limit") + parser.add_argument("--cpu-request", type=int, default=CPU_REQUEST, help="Container CPU request") + parser.add_argument("--memory-request", type=int, default=MEMORY_REQUEST, help="Container memory request") + parser.add_argument("--container-port", type=int, help="Container port") + parser.add_argument("--initial-delay-seconds", type=int, default=INITIAL_DELAY_SECONDS, help="Number of initial delay seconds before starting the liveness probe") + parser.add_argument("--period-seconds", type=int, default=PERIOD_SECONDS, help="Period in seconds before of the liveness probe") + parser.add_argument("--node-port", type=int, help="Node port") + + return parser + +def generate_file(filename): + if path.exists(filename): + raise Exception(f"'{filename}' already exists") + else: + template = JINJA_ENV.get_template(f"{filename}.j2") + content = template.render(challenge=challenge) + with open(filename, 'w') as f: + f.write(content) + +if __name__ == "__main__": + parser = argument_parser() + args = parser.parse_args() + + challenge = { + "name": args.name, + "category": args.category, + "type": args.type, + "replicas": args.replicas, + "docker_registry": args.docker_registry, + "docker_registry_challenge_name": args.docker_registry_challenge_name if args.docker_registry_challenge_name else args.name, + "cpu_limit": args.cpu_limit, + "memory_limit": args.memory_limit, + "cpu_request": args.cpu_request, + "memory_request": args.memory_request, + "container_port": args.container_port, + "initial_delay_seconds": args.initial_delay_seconds, + "period_seconds": args.period_seconds, + "node_port": args.node_port, + } + + generate_file(DEPLOYMENT_FILE) + + if challenge['container_port'] is not None and challenge['node_port'] is not None: + generate_file(SERVICE_FILE) diff --git a/.scripts/deploy-chal/templates/deployment.yml.j2 b/.scripts/deploy-chal/templates/deployment.yml.j2 new file mode 100644 index 0000000..09bcf1c --- /dev/null +++ b/.scripts/deploy-chal/templates/deployment.yml.j2 @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ challenge['name'] }} + labels: + category: {{ challenge['category'] }} + challenge: {{ challenge['name'] }} +spec: + replicas: {{ challenge['replicas'] }} + selector: + matchLabels: + category: {{ challenge['category'] }} + challenge: {{ challenge['name'] }} + template: + metadata: + labels: + category: {{ challenge['category'] }} + challenge: {{ challenge['name'] }} + spec: + automountServiceAccountToken: false + enableServiceLinks: false{% if challenge['type'] == 'http' %} + containers: + - name: web{% elif challenge['type'] == 'tcp' %} + containers: + - name: server{% endif %} + image: {{ challenge['docker_registry'] }}/{{ challenge['docker_registry_challenge_name'] }} + resources: + limits: + cpu: {{ challenge['cpu_limit'] }}m + memory: {{ challenge['memory_limit'] }}Mi + requests: + cpu: {{ challenge['cpu_request'] }}m + memory: {{ challenge['memory_request'] }}Mi{% if challenge['container_port'] %} + ports: + - containerPort: {{ challenge['container_port'] }}{% if challenge['type'] == 'http' %} + name: http{% elif challenge['type'] == 'tcp' %} + name: tcp{% endif %} + livenessProbe:{% if challenge['type'] == 'http' %} + httpGet: + path: / + port: http{% elif challenge['type'] == 'tcp' %} + tcpSocket: + port: tcp{% endif %} + initialDelaySeconds: {{ challenge['initial_delay_seconds'] }} + periodSeconds: {{ challenge['period_seconds'] }}{% endif %} diff --git a/.scripts/deploy-chal/templates/service.yml.j2 b/.scripts/deploy-chal/templates/service.yml.j2 new file mode 100644 index 0000000..600f824 --- /dev/null +++ b/.scripts/deploy-chal/templates/service.yml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ challenge['name'] }} + labels: + category: {{ challenge['category'] }} + challenge: {{ challenge['name'] }} +spec: + type: NodePort + selector: + category: {{ challenge['category'] }} + challenge: {{ challenge['name'] }} + ports: + - port: {{ challenge['container_port'] }}{% if challenge['type'] == 'http' %} + name: http{% elif challenge['type'] == 'tcp' %} + name: tcp{% endif %} + targetPort: {{ challenge['container_port'] }} + nodePort: {{ challenge['node_port'] }} diff --git a/.scripts/gcp-firewall/allow.sh b/.scripts/gcp-firewall/allow.sh new file mode 100755 index 0000000..c0f1027 --- /dev/null +++ b/.scripts/gcp-firewall/allow.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +CHALLENGE="${1}" + +[ -d "${CHALLENGE}" ] || { echo "No such challenge: '${CHALLENGE}'" >&2; exit 1; } + +RULE_NAME="allow-${CHALLENGE/\//-}" +echo "Enabling firewall rule : '${RULE_NAME}'" + +gcloud compute firewall-rules update "${RULE_NAME}" --no-disabled diff --git a/.scripts/gcp-firewall/block-all.sh b/.scripts/gcp-firewall/block-all.sh new file mode 100755 index 0000000..7b4917f --- /dev/null +++ b/.scripts/gcp-firewall/block-all.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +RULE_NAME="deny-challenges" + +gcloud compute firewall-rules update "${RULE_NAME}" --no-disabled diff --git a/.scripts/gcp-firewall/block-ctfd.sh b/.scripts/gcp-firewall/block-ctfd.sh new file mode 100755 index 0000000..3561b0d --- /dev/null +++ b/.scripts/gcp-firewall/block-ctfd.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +RULE_NAME="deny-ctfd" + +gcloud compute firewall-rules update "${RULE_NAME}" --no-disabled diff --git a/.scripts/gcp-firewall/block-docker-challenges-all.sh b/.scripts/gcp-firewall/block-docker-challenges-all.sh new file mode 100755 index 0000000..02fd628 --- /dev/null +++ b/.scripts/gcp-firewall/block-docker-challenges-all.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +RULE_NAME="deny-docker-challenges" + +gcloud compute firewall-rules update "${RULE_NAME}" --no-disabled diff --git a/.scripts/gcp-firewall/block-http.sh b/.scripts/gcp-firewall/block-http.sh new file mode 100755 index 0000000..9cce666 --- /dev/null +++ b/.scripts/gcp-firewall/block-http.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +RULE_NAME="allow-http-haproxy" + +gcloud compute firewall-rules update "${RULE_NAME}" --disabled diff --git a/.scripts/gcp-firewall/create-all.sh b/.scripts/gcp-firewall/create-all.sh new file mode 100755 index 0000000..b95e733 --- /dev/null +++ b/.scripts/gcp-firewall/create-all.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +CHALS_JSON="config/chals.json" +PRIORITY="1000" +TCP_TYPE="tcp" +SOURCE_RANGES="0.0.0.0/0" +TARGET_TAGS="haproxy" + +for chal in $(jq -c ".[]" "${CHALS_JSON}"); do + name="$(echo "$chal" | jq -r ".name")" + category="$(echo "$chal" | jq -r ".category")" + port="$(echo "$chal" | jq -r ".port")" + type="$(echo "$chal" | jq -r ".type")" + + rule="allow-${category}-${name}" + + if [ "${type}" = "${TCP_TYPE}" ] && ! gcloud compute firewall-rules describe "${rule}" >/dev/null 2>&1; then + gcloud compute firewall-rules create "${rule}" --direction=INGRESS --priority="${PRIORITY}" --network=default --action=ALLOW --rules=tcp:"${port}" --source-ranges="${SOURCE_RANGES}" --target-tags="${TARGET_TAGS}" + fi +done diff --git a/.scripts/gcp-firewall/create-deny-all.sh b/.scripts/gcp-firewall/create-deny-all.sh new file mode 100755 index 0000000..f3cac4f --- /dev/null +++ b/.scripts/gcp-firewall/create-deny-all.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +CHALS_JSON="config/chals.json" +PRIORITY="950" +TCP_TYPE="tcp" +SOURCE_RANGES="0.0.0.0/0" +TARGET_TAGS="haproxy" +RULE_NAME="deny-challenges" +RULES="tcp:1100-1899,tcp:80" + +gcloud compute firewall-rules create "${RULE_NAME}" --direction=INGRESS --priority="${PRIORITY}" --network=default --action=DENY --rules="${RULES}" --source-ranges="${SOURCE_RANGES}" --target-tags="${TARGET_TAGS}" diff --git a/.scripts/gcp-firewall/deny.sh b/.scripts/gcp-firewall/deny.sh new file mode 100755 index 0000000..b9bad58 --- /dev/null +++ b/.scripts/gcp-firewall/deny.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +CHALLENGE="${1}" + +[ -d "${CHALLENGE}" ] || { echo "No such challenge: '${CHALLENGE}'" >&2; exit 1; } + +RULE_NAME="allow-${CHALLENGE/\//-}" +echo "Disabling firewall rule : '${RULE_NAME}'" + +gcloud compute firewall-rules update "${RULE_NAME}" --disabled diff --git a/.scripts/gcp-firewall/unblock-all.sh b/.scripts/gcp-firewall/unblock-all.sh new file mode 100755 index 0000000..86c456d --- /dev/null +++ b/.scripts/gcp-firewall/unblock-all.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +RULE_NAME="deny-challenges" + +gcloud compute firewall-rules update "${RULE_NAME}" --disabled diff --git a/.scripts/gcp-firewall/unblock-ctfd.sh b/.scripts/gcp-firewall/unblock-ctfd.sh new file mode 100755 index 0000000..897277b --- /dev/null +++ b/.scripts/gcp-firewall/unblock-ctfd.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +RULE_NAME="deny-ctfd" + +gcloud compute firewall-rules update "${RULE_NAME}" --disabled diff --git a/.scripts/gcp-firewall/unblock-docker-challenges-all.sh b/.scripts/gcp-firewall/unblock-docker-challenges-all.sh new file mode 100755 index 0000000..0c31428 --- /dev/null +++ b/.scripts/gcp-firewall/unblock-docker-challenges-all.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +RULE_NAME="deny-docker-challenges" + +gcloud compute firewall-rules update "${RULE_NAME}" --disabled diff --git a/.scripts/gcp-firewall/unblock-http.sh b/.scripts/gcp-firewall/unblock-http.sh new file mode 100755 index 0000000..0847779 --- /dev/null +++ b/.scripts/gcp-firewall/unblock-http.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +RULE_NAME="allow-http-haproxy" + +gcloud compute firewall-rules update "${RULE_NAME}" --no-disabled diff --git a/.scripts/k8s/all-down.sh b/.scripts/k8s/all-down.sh new file mode 100755 index 0000000..de4bab5 --- /dev/null +++ b/.scripts/k8s/all-down.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +DEPLOY_DIR="deploy" +ALL_CHALLS_SCRIPT="../ctfd/all-challs.sh" +DIRNAME="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ;pwd -P)" + +for chal in $("${DIRNAME}/${ALL_CHALLS_SCRIPT}");do + if [ -d "${chal}/${DEPLOY_DIR}" ]; then + kubectl delete -f "${chal}/${DEPLOY_DIR}" + fi +done diff --git a/.scripts/k8s/all-restart.sh b/.scripts/k8s/all-restart.sh new file mode 100755 index 0000000..9f3b04d --- /dev/null +++ b/.scripts/k8s/all-restart.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +DEPLOY_DIR="deploy" +ALL_CHALLS_SCRIPT="../ctfd/all-challs.sh" +DIRNAME="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ;pwd -P)" + +for chal in $("${DIRNAME}/${ALL_CHALLS_SCRIPT}");do + if [ -d "${chal}/${DEPLOY_DIR}" ]; then + kubectl delete -f "${chal}/${DEPLOY_DIR}" + kubectl apply -f "${chal}/${DEPLOY_DIR}" + fi +done diff --git a/.scripts/k8s/all-up.sh b/.scripts/k8s/all-up.sh new file mode 100755 index 0000000..e2c0c38 --- /dev/null +++ b/.scripts/k8s/all-up.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +DEPLOY_DIR="deploy" +ALL_CHALLS_SCRIPT="../ctfd/all-challs.sh" +DIRNAME="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ;pwd -P)" + +for chal in $("${DIRNAME}/${ALL_CHALLS_SCRIPT}");do + if [ -d "${chal}/${DEPLOY_DIR}" ]; then + kubectl apply -f "${chal}/${DEPLOY_DIR}" + fi +done diff --git a/.scripts/k8s/reapply.sh b/.scripts/k8s/reapply.sh new file mode 100755 index 0000000..aed6d68 --- /dev/null +++ b/.scripts/k8s/reapply.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +kubectl delete -f "$@" +kubectl apply -f "$@" diff --git a/.scripts/launch.sh b/.scripts/launch.sh new file mode 100755 index 0000000..b8ba08e --- /dev/null +++ b/.scripts/launch.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +.scripts/gcp-firewall/unblock-all.sh +.scripts/ctfd/show-all.sh diff --git a/.scripts/unique-flag.sh b/.scripts/unique-flag.sh new file mode 100755 index 0000000..145415c --- /dev/null +++ b/.scripts/unique-flag.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +PREFIX="CyberErudites" + +LINES="$(grep -Proh "[^\^]\K${PREFIX}\{.*?\}" | wc -l)" +(( LINES > 1 )) || { echo "Flag not found or found only once" >&2; grep -Pr "[^\^]\K${PREFIX}\{.*?\}" >&2; exit 1; } + +UNIQUE="$(grep -Proh "[^\^]\K${PREFIX}\{.*?\}" | sort -u | wc -l)" +(( UNIQUE > 1 )) && { echo "Found non matching flags" >&2; grep -Pr "[^\^]\K${PREFIX}\{.*?\}" >&2; exit 1; } + +FLAG="$(grep -Proh "[^\^]\K${PREFIX}\{.*?\}" | sort -u)" + +echo "Flag '${FLAG}' is unique!" diff --git a/CHALS.md b/CHALS.md new file mode 100644 index 0000000..6ceb640 --- /dev/null +++ b/CHALS.md @@ -0,0 +1,13 @@ +# Challenges + +## Port ranges + +| Category | External HAProxy port range | Kubernetes node port range | Notes | +|-------------|-----------------------------|----------------------------|--------------------------------------------| +| crypto | 1000-1099 | 30000-30099 | | +| linux | 1100-1199 | 30100-30199 | | +| misc | 1200-1299 | 30200-30299 | | +| jail | 1300-1399 | 30300-30399 | | +| pwn | 1400-1499 | 30400-30499 | | +| reverse | 1500-1599 | 30500-30599 | | +| web | 1600-1699 | 30600-30699 | in case hostname based matching isn't used | diff --git a/README.md b/README.md new file mode 100644 index 0000000..b2fe344 --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# Mentoring Program 2022/2023 +This repository contains challenges for 2022/2023 mentoring program diff --git a/config/chals.json b/config/chals.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/config/chals.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/config/ctfcli_chals.json b/config/ctfcli_chals.json new file mode 100644 index 0000000..32960f8 --- /dev/null +++ b/config/ctfcli_chals.json @@ -0,0 +1,2 @@ +[ +] \ No newline at end of file diff --git a/config/ports.yml b/config/ports.yml new file mode 100644 index 0000000..f77df23 --- /dev/null +++ b/config/ports.yml @@ -0,0 +1,8 @@ +crypto: 1000 +general-skills: 1700 +jail: 1300 +linux: 1100 +misc: 1200 +pwn: 1400 +reverse: 1500 +web: 1600 diff --git a/helm-charts/ctf-challenge-chart-0.1.0.tgz b/helm-charts/ctf-challenge-chart-0.1.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..aed99bca5bbb0eed055a7b10ea1e963b1cc46a6a GIT binary patch literal 2348 zcmV+{3Dfo;iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI>{kK4Er&$B+o)Z0UoZ7s{*e}RD(2(pJZXf7Ar1}XX!(9+oA zEk!aU<&EoXKYKw^k|kMd%h`03UY8HDOw9~uejLt_BgJMuj8noG@&(#EWkxUUHC&O5 zJ^3mvilXTJ^wj-EQPlp8CZ{J)#-}Hf=wuvCE}|#V`1IoJ>mXm-;S^XV{3qyBmU zV9)xWj8D&6>;G&rK7CyO57GRpwTpHd04Q{;P`}Bu8LBCa1ArrVw~}a=v|Z(uuK-e$ zq5>035DHRbBsglnp3DDLN|gj!8u5v!1j(IQz4t@BNy$;9z_%2yd#;1mpM zGo<$3J$uz2JQ4g42Jn*<8iDW?NTH0S;lPkA5vT#oq**!kSa3rqNA*@HGll1o>wW84 zh=Ci*7|gJcmw@1aBu?S_xdtv0^oH3xFW|&_Z2-V%Mvb<>0mL$&!X%0^_bi6?f0rAU}bU#1vePhAH9<o%Nwf@Kp zdcl~u8C@b@0WK734NpaGVv$k4cm_t1`JBcuS0V!`62~$imS+INzj$t_7)p9imLOj_ zDkr^*$d5QJC0dPy^_nR-!;e4`MKUy~v~@5k5+Ixl>Q>M0ZtcQ|E>qHsnlWmx%8Mbqt7wrzWg@82wqs!O33xr(t;-L{Qi zl|4dObQ0e*7c}Ph5(%v96Zul?j96(d})p zUJoI`Ipqk04xb4v-vEXiN|G}Jp+8E#u|mW5ep_iNe_o#nGVlmd`1&L<3RigD@sw1ela+csg4-R4(6KSz4Ruw8Z#X7`&Q;LWV>a z@D+(c7Y8NRM>o5S+-@<52I7=2wCx45rEdaF=M}Ey!wTDi1C^7Cax;g)4|@269<*fnLHoN}_ie)gYcp$CPH|$jv+8EI za1~F*$P8J}8Ftsr^tR2aBku2u(>i>1dv_8e4E{KP!H2=&OG{*#;9gtZPg_sx)lHFQ zp-xBc$rRRWpgd-IvfI$mtFoI`Bg5DT^|}}?ta(-e!;7kJVV@Bx!|Q5g+l^gSQdqA$ zSU$tYTo`P@vMnVeoGdUI&Q@Mf>19`_mNahjdswduKkc1dw&w*9YDHlM^M@r|1BG?thj?%sMvjN!c+QOYiYqVu& zNmP~nIps-op+2~d)1C_$OI3xZ^GS*P# zdDEmK))u9d2I{?WKN$H#vA-w!mC2=(It@6PF)TJCuPMLQQlW)zARA}>pQBpId>cR@=Ay7Gm!FX~JMqw0=2bI;9*dkY(5=-6q~HVdjzD=dfiLG1=-P#eYqw@0>#dy2NZ1XnujvcZ+re5rBJ z4Me+O3AeYSU6ihH3AeX{+6Z?}X`_zW)Jh6v8Z%|Z9E5ihhNAS@Q0icun<7oTEs-ZJ z;;mwBH+T)AhKRe@!=_5wgpScAa@6{*5;JT=*vn_3WrXegE1GG`GD%E2g;BfLnRy?) z+ryh!URmebR=^F_Jr8Xmoz>$z7XJp>u!F5Fxm9)EFSEe_?b+Vay{oTLysuCF=C#nK zx!QZTTAm?{WxnONWo>>ZqbF)3(tPFAOOcYrs?rv>PnU{!qfIT>-LZVSUgETqq#}HQ zeVJ>#!`fc8ovfPyw>!MHV<~LMgD(u7_Yai+#jvwHiH>sueize(F&|0#-F_xu!S@Ap4ZwDtb)bn;mLd5G5d z{;w?k6vxB+)_wXMwGTRGKWyE{f35MR_?9DRr1W8tCVHGB4nC`H7`FBTUvgY(?+4o1 z!4S1T)B9Ld)2{l;<&K#vo>vocz#gzRi3e@}O6}_N(^elEz-;dh+ko^kf6LfLo~dKr zEDe6=#nD|gsMG)3r1=|aUHLyuk!5tjg~G2g#=h_W&rY_!{~ez{e*gO*?FilyV^DDo zMu4AmxJi-2ET=4?d{KVrufyO7-lbH7&ZSF*pi^WF7EH|iD-OyRe*%S!7`jAvZmabZ zo&-n0ap9`^@Kh?C(@&WA7X-h17Q)Y*tw3KbtI6k(=_SnAX S_MZR%0RR80b;ywbH~;_wjgK(^ literal 0 HcmV?d00001 diff --git a/helm-charts/ctf-challenge-chart/.helmignore b/helm-charts/ctf-challenge-chart/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/helm-charts/ctf-challenge-chart/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm-charts/ctf-challenge-chart/Chart.yaml b/helm-charts/ctf-challenge-chart/Chart.yaml new file mode 100644 index 0000000..24536a4 --- /dev/null +++ b/helm-charts/ctf-challenge-chart/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: ctf-challenge-chart +description: Helm chart package for the deployement of ctf challenges + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/helm-charts/ctf-challenge-chart/templates/_helpers.tpl b/helm-charts/ctf-challenge-chart/templates/_helpers.tpl new file mode 100644 index 0000000..0bb552d --- /dev/null +++ b/helm-charts/ctf-challenge-chart/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "ctf-challenge-chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ctf-challenge-chart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ctf-challenge-chart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ctf-challenge-chart.labels" -}} +helm.sh/chart: {{ include "ctf-challenge-chart.chart" . }} +{{ include "ctf-challenge-chart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ctf-challenge-chart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ctf-challenge-chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ctf-challenge-chart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "ctf-challenge-chart.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm-charts/ctf-challenge-chart/templates/deployment.yaml b/helm-charts/ctf-challenge-chart/templates/deployment.yaml new file mode 100644 index 0000000..68e87cc --- /dev/null +++ b/helm-charts/ctf-challenge-chart/templates/deployment.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.deployment.name }} + labels: + challenge: {{ .Values.deployment.name }} + category: {{ .Values.category }} + egress: {{ .Values.deployment.egress }} + {{- include "ctf-challenge-chart.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deployment.replicasNumber }} + selector: + matchLabels: + category: {{ .Values.category }} + challenge: {{ .Values.deployment.name }} + egress: {{ .Values.deployment.egress }} + {{- include "ctf-challenge-chart.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.deployment.other.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + category: {{ .Values.category }} + challenge: {{ .Values.deployment.name }} + egress: {{ .Values.deployment.egress }} + {{- include "ctf-challenge-chart.selectorLabels" . | nindent 8 }} + spec: + enableServiceLinks: false + automountServiceAccountToken: false + {{ if .Values.deployment.hostAlias }} + hostAliases: + - ip: "127.0.0.1" + hostnames: + - {{ .Values.deployment.hostAlias }} + {{ end }} + {{ if eq .Values.deployment.type "http"}} + containers: + - name: web + {{ else if eq .Values.deployment.type "tcp"}} + containers: + - name: server + {{ end }} + image: "{{ .Values.deployment.dockerRepository }}/{{ .Values.deployment.dockerImage }}" + {{ if .Values.deployment.containerPort }} + ports: + {{if eq .Values.deployment.type "http"}} + - name: http + {{ else if eq .Values.deployment.type "tcp"}} + - name: tcp + {{ end}} + containerPort: {{ .Values.deployment.containerPort}} + {{ if .Values.deployment.healthCheck }} + livenessProbe: + {{ if eq .Values.deployment.type "http" }} + httpGet: + path: / + port: http + {{ else if eq .Values.deployment.type "tcp" }} + tcpSocket: + port: tcp + {{ end }} + initialDelaySeconds: {{ .Values.deployment.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.deployment.healthCheck.periodSeconds }} + {{ end }} + {{ end }} + resources: + limits: + cpu: {{ .Values.deployment.limits.cpu}} + memory: {{ .Values.deployment.limits.memory }} + requests: + cpu: {{ .Values.deployment.requests.cpu }} + memory: {{ .Values.deployment.requests.memory }} + + {{ if .Values.deployment.additionalContainers }} + {{- range .Values.deployment.additionalContainers}} + - name: {{ .name }} + image: "{{ .dockerRepository }}/{{.dockerImage }}" + {{ if .containerPort }} + ports: + {{if eq .type "http"}} + - name: http + {{ else if eq .type "tcp"}} + - name: tcp + {{ end}} + containerPort: {{ .containerPort}} + {{ if .healthCheck }} + livenessProbe: + {{ if eq .type "http" }} + httpGet: + path: / + port: http + {{ else if eq .type "tcp" }} + tcpSocket: + port: tcp + {{ end }} + initialDelaySeconds: {{ .healthCheck.initialDelaySeconds }} + periodSeconds: {{ .healthCheck.periodSeconds }} + {{ end }} + {{ end }} + resources: + limits: + cpu: {{ .resources.limits.cpu}} + memory: {{ .resources.limits.memory }} + requests: + cpu: {{ .resources.requests.cpu }} + memory: {{ .resources.requests.memory }} + {{- end }} + {{ end }} diff --git a/helm-charts/ctf-challenge-chart/templates/service.yaml b/helm-charts/ctf-challenge-chart/templates/service.yaml new file mode 100644 index 0000000..cb3a21d --- /dev/null +++ b/helm-charts/ctf-challenge-chart/templates/service.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.deployment.name }} + labels: + category: {{ .Values.category }} + challenge: {{ .Values.deployment.name }} + egress: {{ .Values.deployment.egress }} + {{- include "ctf-challenge-chart.labels" . | nindent 4 }} +spec: + type: NodePort + ports: + - port: {{ .Values.deployment.containerPort }} + {{ if eq .Values.deployment.type "http" }} + name: http + {{ else if eq .Values.deployment.type "tcp"}} + name: tcp + {{ end }} + targetPort: {{ .Values.deployment.containerPort }} + nodePort: {{ .Values.deployment.nodePort}} + {{ if .Values.deployment.additionalContainers}} + {{- range .Values.deployment.additionalContainers }} + - port: {{ .containerPort }} + name: {{ .name }} + targetPort: {{ .containerPort }} + nodePort: {{ .nodePort}} + {{- end }} + {{ end }} + selector: + challenge: {{ .Values.deployment.name }} + category: {{ .Values.category }} + egress: {{ .Values.deployment.egress }} + {{- include "ctf-challenge-chart.selectorLabels" . | nindent 4 }} diff --git a/helm-charts/ctf-challenge-chart/values.yaml b/helm-charts/ctf-challenge-chart/values.yaml new file mode 100644 index 0000000..4aabd3b --- /dev/null +++ b/helm-charts/ctf-challenge-chart/values.yaml @@ -0,0 +1,47 @@ +deployment: + replicasNumber: 1 + # Type of the challenge either tcp or http + # type: http + dockerRepository: gcr.io/gdg-ctf-2022 + # dockerImage: test-image + # If the challenge don't expose any port then leave it empty + # containerPort: 80 + # Port that will be exposed on each k8s node + # nodePort: 30002 + limits: + cpu: 200m + memory: 500Mi + requests: + cpu: 100m + memory: 200Mi + + # healthCheck: + # initialDelaySeconds: 10 + # periodSeconds: 100 + + # Allow/Deny egress (outcoming) traffic from pod + # "allow" -> Allow outcoming traffic + # "deny" -> Deny outcoming traffic + egress: "allow" + + other: + # parameters of pod annotations + podAnnotations: {} + + # additionalContainers: + # - name: nameContainer + # dockerRepository: gcr.io/ + # dockerImage: name-container + # containerPort: 80 + # nodePort: 30002 + # type: http + # resources: + # limits: + # cpu: 100m + # memory: 150Mi + # requests: + # cpu: 50m + # memory: 100Mi + # healthCheck: + # initialDelaySeconds: 10 + # periodSeconds: 30 diff --git a/leetify.py b/leetify.py new file mode 100755 index 0000000..b31421b --- /dev/null +++ b/leetify.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 + +import random +import sys + +leets = { + 'o': '0', + 'i': '1', + 'e': '3', + 'a': '4', + 's': '$', +} + +def leetify(s): + s = ''.join(random.choice((leets.get(c, c), c)) for c in s) + return ''.join(random.choice((c.swapcase(), c)) for c in s) + +if __name__ == "__main__": + if len(sys.argv) < 3: + print(f"Usage: {sys.argv[0]} FLAG_FORMAT FLAG_STRING", file=sys.stderr) + sys.exit(1) + + flag_format = sys.argv[1] + flag_string = sys.argv[2] + + flag = f"{flag_format}{{{leetify(flag_string)}}}" + + print(flag)