forked from opiproject/opi-spdk-bridge
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
113 lines (105 loc) · 3.97 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2022-2023 Dell Inc, or its subsidiaries.
---
version: "3.7"
services:
spdk:
image: ghcr.io/opiproject/spdk:main@sha256:aec0957cf166afc52b9b6b5b1e30c2f58758be599f6226b94e74999e555c96aa
volumes:
- /dev/hugepages:/dev/hugepages
- /dev/shm:/dev/shm
- /proc:/proc
- /var/tmp:/var/tmp
ports:
- "9009:9009"
- "4444:4444"
- "5555:5555"
- "7777:7777"
- "8888:8888"
privileged: true
networks:
- opi
working_dir: /usr/libexec/spdk/scripts
command: |
sh -x -c 'sync; echo 1 > /proc/sys/vm/drop_caches && \
mkdir -p /mnt/huge && \
grep hugetlbfs /proc/mounts || mount -t hugetlbfs nodev /mnt/huge && \
echo 1024 > /proc/sys/vm/nr_hugepages && \
grep "" /sys/kernel/mm/hugepages/hugepages-*/nr_hugepages && \
dd if=/dev/zero of=/tmp/aio_bdev_file bs=512 count=64 && \
echo -n NVMeTLSkey-1:01:MDAxMTIyMzM0NDU1NjY3Nzg4OTlhYWJiY2NkZGVlZmZwJEiQ: > /tmp/opikey.txt && \
chmod 0600 /tmp/opikey.txt && \
/usr/local/bin/spdk_tgt -m 0x1 -s 512 --no-pci -S /var/tmp |& tee /tmp/spdk.log & \
for i in `seq 1 10`; do ./rpc.py spdk_get_version && break || sleep 1; done && \
./rpc.py bdev_malloc_create -b Malloc0 64 512 && \
./rpc.py bdev_malloc_create -b Malloc1 64 512 && \
./rpc.py bdev_malloc_create -b Malloc2 64 512 && \
./rpc.py nvmf_create_transport -t TCP -u 8192 -m 4 -c 0 && \
./rpc.py nvmf_create_transport -t VFIOUSER && \
./rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 -d SPDK_Controller1 && \
./rpc.py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1 --disable && \
./rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t tcp -a `hostname -i` -f ipv4 -s 4444 && \
./rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t tcp -a `hostname -i` -f ipv4 -s 5555 --secure-channel && \
./rpc.py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode1 nqn.2014-08.org.nvmexpress:uuid:feb98abe-d51f-40c8-b348-2753f3571d3c --psk /tmp/opikey.txt && \
./rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc2 -n 3 --nguid ABCDEF0123456789ABCDEF0123456789 && \
./rpc_http_proxy.py 0.0.0.0 9009 spdkuser spdkpass'
healthcheck:
test: ["CMD-SHELL", "python3 /usr/libexec/spdk/scripts/rpc.py spdk_get_version || exit 1"]
interval: 6s
retries: 5
start_period: 20s
timeout: 10s
opi-spdk-server:
build:
context: .
volumes_from:
- spdk
environment:
- OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4317
ports:
- "8082:8082"
- "50051:50051"
networks:
- opi
depends_on:
spdk:
condition: service_healthy
redis:
condition: service_healthy
command: sh -c "/opi-spdk-bridge -grpc_port=50051 -http_port=8082 -spdk_addr=/var/tmp/spdk.sock"
healthcheck:
test: grpcurl -plaintext localhost:50051 list || exit 1
redis:
image: redis:7.2.1-alpine3.18
networks:
- opi
healthcheck:
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
jaeger:
image: jaegertracing/all-in-one:1.50.0
ports:
- "16686:16686"
- "4317:4317"
- "4318:4318"
environment:
- COLLECTOR_OTLP_ENABLED=true
networks:
- opi
opi-gw-test:
image: curlimages/curl:8.3.0
networks:
- opi
depends_on:
opi-spdk-server:
condition: service_healthy
command: curl -qkL http://opi-spdk-server:8082/v1/inventory/1/inventory/2
opi-spdk-client:
image: ghcr.io/opiproject/godpu:main@sha256:36ba77c6a6fa62419428e7c8455a728447df8bbd075918c460dbbd1808d43fb6
networks:
- opi
depends_on:
opi-spdk-server:
condition: service_healthy
command: storage test --addr=opi-spdk-server:50051
networks:
opi: