Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add TLS support #808

Merged
merged 1 commit into from
Aug 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
174 changes: 173 additions & 1 deletion .github/workflows/build-container.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ jobs:
strategy:
fail-fast: false
matrix:
test: ["cli", "cli_change_lb", "state", "multi_gateway", "server", "grpc", "omap_lock", "old_omap", "log_files", "nsid"]
test: ["cli", "cli_change_lb", "state", "multi_gateway", "server", "grpc", "omap_lock", "old_omap", "log_files", "nsid", "psk"]
runs-on: ubuntu-latest
env:
HUGEPAGES: 512 # for multi gateway test, approx 256 per gateway instance
Expand Down Expand Up @@ -350,6 +350,178 @@ jobs:
make down
make clean

demo-secure:
needs: [build, build-ceph]
runs-on: ubuntu-latest
env:
HUGEPAGES: 512
steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Setup huge-pages
run: make setup HUGEPAGES=$HUGEPAGES

- name: Download container images
uses: actions/download-artifact@v4
with:
pattern: container_images*
merge-multiple: true

- name: Load container images
run: |
docker load < nvmeof.tar
docker load < nvmeof-cli.tar
docker load < ceph.tar
docker load < bdevperf.tar

- name: Start containers
timeout-minutes: 3
run: |
if ! docker-compose --version 2>&1 > /dev/null ; then
sudo apt update
sudo apt install -y docker-compose
fi
docker-compose --version
make up

- name: Wait for the Gateway to be listening
timeout-minutes: 3
run: |
. .env

echo using gateway $NVMEOF_IP_ADDRESS port $NVMEOF_GW_PORT
until nc -z $NVMEOF_IP_ADDRESS $NVMEOF_GW_PORT; do
echo -n .
sleep ${{ env.WAIT_INTERVAL_SECS }}
done

- name: List containers
if: success() || failure()
run: make ps

- name: List processes
if: success() || failure()
run: make top

- name: Test
run: |
. .env
port2=`expr ${NVMEOF_IO_PORT} + 10`
make demosecure OPTS=-T NVMEOF_CONTAINER_NAME="ceph-nvmeof_nvmeof_1" HOSTNQN="${NQN}host" HOSTNQN2="${NQN}host2" NVMEOF_IO_PORT2=${port2}

- name: List resources
run: |
# https://github.com/actions/toolkit/issues/766
shopt -s expand_aliases
eval $(make alias)
cephnvmf get_subsystems
cephnvmf subsystem list
subs=$(cephnvmf --output stdio --format json subsystem list | grep nqn | sed 's/"nqn": "//' | sed 's/",$//')
for sub in $subs
do
cephnvmf namespace list --subsystem $sub
cephnvmf listener list --subsystem $sub
cephnvmf host list --subsystem $sub
done

- name: Run bdevperf
run: |
# see https://spdk.io/doc/nvmf_multipath_howto.html
shopt -s expand_aliases
eval $(make alias)
. .env
set -x
echo -n "ℹ️ Starting bdevperf container"
docker-compose up -d bdevperf
sleep 10
echo "ℹ️ bdevperf start up logs"
make logs SVC=bdevperf
eval $(make run SVC=bdevperf OPTS="--entrypoint=env" | grep BDEVPERF_SOCKET | tr -d '\n\r' )
psk_path_prefix="/tmp/psk/"
psk_path="${psk_path_prefix}${NQN}"
mkdir -p ${psk_path}
echo -n "NVMeTLSkey-1:01:YzrPElk4OYy1uUERriPwiiyEJE/+J5ckYpLB+5NHMsR2iBuT:" > ${psk_path}/${NQN}host
chmod 600 ${psk_path}/${NQN}host
docker cp ${psk_path_prefix} ceph-nvmeof_bdevperf_1:${psk_path_prefix}

rpc="/usr/libexec/spdk/scripts/rpc.py"
port2=`expr ${NVMEOF_IO_PORT} + 10`
echo "ℹ️ bdevperf bdev_nvme_set_options"
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_set_options -r -1"
echo "ℹ️ bdevperf tcp connect ip: $NVMEOF_IP_ADDRESS port: $NVMEOF_IO_PORT nqn: $NQN"
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_attach_controller -b Nvme0 -t tcp -a $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT -f ipv4 -n $NQN -q ${NQN}host -l -1 -o 10 --psk ${psk_path}/${NQN}host"
echo "ℹ️ verify connection list"
conns=$(cephnvmf --output stdio --format json connection list --subsystem $NQN)
echo $conns | grep -q '"status": 0'
echo $conns | grep -q "\"nqn\": \"${NQN}host\""
echo $conns | grep -q "\"trsvcid\": ${NVMEOF_IO_PORT}"
echo $conns | grep -q "\"traddr\": \"${NVMEOF_IP_ADDRESS}\""
echo $conns | grep -q "\"adrfam\": \"ipv4\""
echo $conns | grep -q "\"trtype\": \"TCP\""
echo $conns | grep -q "\"qpairs_count\": 1"
echo $conns | grep -q "\"connected\": true"
echo $conns | grep -q "\"secure\": true"
echo $conns | grep -q -v "\"secure\": false"
echo $conns | grep -q "\"use_psk\": true"
echo $conns | grep -q "\"use_psk\": false"
con_cnt=$(echo $conns | xargs -n 2 | grep traddr | grep -v "n/a" | wc -l)
if [ $con_cnt -ne 1 ]; then
echo "Number of connections ${con_cnt}, expected 1, list: ${conns}"
exit 1
fi
echo "ℹ️ bdevperf tcp connect ip: $NVMEOF_IP_ADDRESS port: ${port2} nqn: ${NQN}host2"
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_attach_controller -b Nvme1 -t tcp -a $NVMEOF_IP_ADDRESS -s ${port2} -f ipv4 -n $NQN -q "${NQN}host2" -l -1 -o 10"
echo "ℹ️ verify connection list again"
conns=$(cephnvmf --output stdio --format json connection list --subsystem $NQN)
con_cnt=$(echo $conns | xargs -n 2 | grep traddr | grep -v "n/a" | wc -l)
if [ $con_cnt -ne 2 ]; then
echo "Number of connections ${con_cnt}, expected 2, list: ${conns}"
exit 1
fi
echo $conns | grep -q "\"nqn\": \"${NQN}host2\""
echo $conns | grep -q "\"trsvcid\": ${port2}"
echo $conns | grep -q "\"secure\": true"
echo $conns | grep -q "\"secure\": false"
echo $conns | grep -q "\"use_psk\": true"
echo $conns | grep -q "\"use_psk\": false"
echo "ℹ️ bdevperf tcp connect ip: $NVMEOF_IP_ADDRESS port: ${port2} nqn: ${NQN}host2"
echo "ℹ️ bdevperf perform_tests"
eval $(make run SVC=bdevperf OPTS="--entrypoint=env" | grep BDEVPERF_TEST_DURATION | tr -d '\n\r' )
timeout=$(expr $BDEVPERF_TEST_DURATION \* 2)
bdevperf="/usr/libexec/spdk/scripts/bdevperf.py"
make exec SVC=bdevperf OPTS=-T CMD="$bdevperf -v -t $timeout -s $BDEVPERF_SOCKET perform_tests"

- name: Check coredump existence
if: success() || failure()
id: check_coredumps
uses: andstor/file-existence-action@20b4d2e596410855db8f9ca21e96fbe18e12930b # v2, pinned to SHA for security reasons
with:
files: "/tmp/coredump/core.*"

- name: Upload demo core dumps
if: steps.check_coredumps.outputs.files_exists == 'true'
uses: actions/upload-artifact@v4
with:
name: core_demo
path: /tmp/coredump/core.*

# For debugging purposes (provides an SSH connection to the runner)
# - name: Setup tmate session
# uses: mxschmitt/action-tmate@v3
# with:
# limit-access-to-actor: true

- name: Display logs
if: success() || failure()
run: make logs OPTS=''

- name: Tear down
if: success() || failure()
run: |
make down
make clean

discovery:
needs: [build, build-ceph]
strategy:
Expand Down
2 changes: 2 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ TARGET_ARCH := $(shell uname -m | sed -e 's/aarch64/arm64/')
include .env
include mk/containerized.mk
include mk/demo.mk
include mk/demosecure.mk
include mk/misc.mk
include mk/autohelp.mk

Expand All @@ -19,6 +20,7 @@ setup: ## Configure huge-pages (requires sudo/root password)
@echo Setup core dump pattern as /tmp/coredump/core.*
mkdir -p /tmp/coredump
sudo mkdir -p /var/log/ceph
sudo chmod 0755 /var/log/ceph
sudo bash -c 'echo "|/usr/bin/env tee /tmp/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern'
sudo bash -c 'echo $(HUGEPAGES) > $(HUGEPAGES_DIR)'
@echo Actual Hugepages allocation: $$(cat $(HUGEPAGES_DIR))
Expand Down
30 changes: 22 additions & 8 deletions control/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -855,6 +855,7 @@ def listener_add(self, args):
adrfam=adrfam,
traddr=traddr,
trsvcid=args.trsvcid,
secure=args.secure,
)

try:
Expand Down Expand Up @@ -960,14 +961,15 @@ def listener_list(self, args):
for l in listeners_info.listeners:
adrfam = GatewayEnumUtils.get_key_from_value(pb2.AddressFamily, l.adrfam)
adrfam = self.format_adrfam(adrfam)
listeners_list.append([l.host_name, l.trtype, adrfam, f"{l.traddr}:{l.trsvcid}"])
secure = "Yes" if l.secure else "No"
listeners_list.append([l.host_name, l.trtype, adrfam, f"{l.traddr}:{l.trsvcid}", secure])
if len(listeners_list) > 0:
if args.format == "text":
table_format = "fancy_grid"
else:
table_format = "plain"
listeners_out = tabulate(listeners_list,
headers = ["Host", "Transport", "Address Family", "Address"],
headers = ["Host", "Transport", "Address Family", "Address", "Secure"],
tablefmt=table_format)
out_func(f"Listeners for {args.subsystem}:\n{listeners_out}")
else:
Expand Down Expand Up @@ -1000,6 +1002,7 @@ def listener_list(self, args):
argument("--traddr", "-a", help="NVMe host IP", required=True),
argument("--trsvcid", "-s", help="Port number", type=int, required=False),
argument("--adrfam", "-f", help="Address family", default="", choices=get_enum_keys_list(pb2.AddressFamily)),
argument("--secure", help="Use secure channel", action='store_true', required=False),
]
listener_del_args = listener_common_args + [
argument("--host-name", "-t", help="Host name", required=True),
Expand Down Expand Up @@ -1033,7 +1036,10 @@ def host_add(self, args):
out_func, err_func = self.get_output_functions(args)
if not args.host:
self.cli.parser.error("--host argument is mandatory for add command")
req = pb2.add_host_req(subsystem_nqn=args.subsystem, host_nqn=args.host)
if args.host == "*" and args.psk:
self.cli.parser.error("PSK is only allowed for specific hosts")

req = pb2.add_host_req(subsystem_nqn=args.subsystem, host_nqn=args.host, psk=args.psk)
try:
ret = self.stub.add_host(req)
except Exception as ex:
Expand Down Expand Up @@ -1127,16 +1133,17 @@ def host_list(self, args):
if hosts_info.status == 0:
hosts_list = []
if hosts_info.allow_any_host:
hosts_list.append(["Any host"])
hosts_list.append(["Any host", "n/a"])
for h in hosts_info.hosts:
hosts_list.append([h.nqn])
use_psk = "Yes" if h.use_psk else "No"
hosts_list.append([h.nqn, use_psk])
if len(hosts_list) > 0:
if args.format == "text":
table_format = "fancy_grid"
else:
table_format = "plain"
hosts_out = tabulate(hosts_list,
headers = [f"Host NQN"],
headers = ["Host NQN", "Uses PSK"],
tablefmt=table_format, stralign="center")
out_func(f"Hosts allowed to access {args.subsystem}:\n{hosts_out}")
else:
Expand Down Expand Up @@ -1166,6 +1173,7 @@ def host_list(self, args):
]
host_add_args = host_common_args + [
argument("--host", "-t", help="Host NQN", required=True),
argument("--psk", help="Host's PSK key", required=False),
]
host_del_args = host_common_args + [
argument("--host", "-t", help="Host NQN", required=True),
Expand Down Expand Up @@ -1204,18 +1212,24 @@ def connection_list(self, args):
if connections_info.status == 0:
connections_list = []
for conn in connections_info.connections:
conn_secure = "<n/a>"
conn_psk = "Yes" if conn.use_psk else "No"
if conn.connected:
conn_secure = "Yes" if conn.secure else "No"
connections_list.append([conn.nqn,
f"{conn.traddr}:{conn.trsvcid}" if conn.connected else "<n/a>",
"Yes" if conn.connected else "No",
conn.qpairs_count if conn.connected else "<n/a>",
conn.controller_id if conn.connected else "<n/a>"])
conn.controller_id if conn.connected else "<n/a>",
conn_secure,
conn_psk])
if len(connections_list) > 0:
if args.format == "text":
table_format = "fancy_grid"
else:
table_format = "plain"
connections_out = tabulate(connections_list,
headers = ["Host NQN", "Address", "Connected", "QPairs Count", "Controller ID"],
headers = ["Host NQN", "Address", "Connected", "QPairs Count", "Controller ID", "Secure", "PSK"],
tablefmt=table_format)
out_func(f"Connections for {args.subsystem}:\n{connections_out}")
else:
Expand Down
Loading
Loading