diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5f22399..b60551a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -7,7 +7,7 @@ on: push: branches: - 'master' - - 'dev_security' + - 'dev_stats' tags: - 'v*' diff --git a/Dockerfile b/Dockerfile index d77763e..3814132 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,12 +11,12 @@ RUN yarn install --production # elm doesn't work under alpine 6 or 8 FROM node:10.16.0-buster-slim AS elm-build -WORKDIR /home/node/app RUN npm install --unsafe-perm -g elm@latest-0.18.0 --silent RUN apt-get update; apt-get install -y netbase -COPY elm-package.json ./ +WORKDIR /home/node/app/elm-client +COPY ./elm-client/elm-package.json . RUN elm package install -y -COPY . . +COPY ./elm-client/ /home/node/app/elm-client/ RUN elm make Main.elm --output=client/index.js FROM base AS release @@ -28,11 +28,12 @@ RUN wget -O lego_v4.14.2_linux_amd64.tar.gz https://github.com/go-acme/lego/rele ENV LEGO_PATH=/lego-files COPY --from=dependencies /home/node/app/node_modules node_modules -COPY --from=elm-build /home/node/app/client/ client +COPY --from=elm-build /home/node/app/elm-client/client/ client COPY server server COPY server.sh server.sh COPY crontab /var/spool/cron/crontabs/root +# ENV PORT=8080 # HEALTHCHECK --interval=5s --timeout=3s \ # CMD curl --fail http://localhost:$PORT/_health || exit 1 # HEALTHCHECK --interval=5s --timeout=3s \ diff --git a/README.md b/README.md index 28e4a2e..89412ee 100644 --- a/README.md +++ b/README.md @@ -4,24 +4,15 @@ A simple monitoring dashboard for Docker in Swarm Mode. ![Example Dashboard](./swarm.gif) -## About +Swarm Dashboard shows you all the tasks running on a Docker Swarm organized +by service and node. It provides a space-efficient visualization +and works well at a glance. You can use it as a simple live dashboard of the state of your Swarm. -Swarm dashboard shows you all the tasks running on a Docker Swarm organized -by service and node. It provides a visualization that's space-efficient -and works well at a glance. +It also shows the CPU/Memory/Disk usage of your swarm node and containers. -You can use it as a simple live dashboard of the state of your Swarm. +## Usage -The Dashboard has a node.js server which streams swarm updates to an Elm client -over a WebSocket. - -### Prior art - -* Heavily inspired by [Docker Swarm Visualiser](https://github.com/dockersamples/docker-swarm-visualizer) - -## Running - -At the moment, the dashboard needs to be deployed on one of the swarm managers. +The dashboard needs to be deployed on one of the swarm managers. You can configure it with the following Docker compose file: ```yml @@ -29,43 +20,79 @@ You can configure it with the following Docker compose file: version: "3" services: - dashboard: - image: mohsenasm/swarm-dashboard + swarm-dashboard: + image: mohsenasm/swarm-dashboard:dev_stats volumes: - - "/var/run/docker.sock:/var/run/docker.sock" - - lego-files:/lego-files + - /var/run/docker.sock:/var/run/docker.sock ports: - - 8081:8081 + - 8080:8080 environment: - PORT: 8081 + PORT: 8080 ENABLE_AUTHENTICATION: "false" - # ENABLE_AUTHENTICATION: "true" - # AUTHENTICATION_REALM: "KuW2i9GdLIkql" - # USERNAME: "admin" - # PASSWORD: "supersecret" ENABLE_HTTPS: "false" - # ENABLE_HTTPS: "true" - # HTTPS_HOSTNAME: "example.com" - # LEGO_NEW_COMMAND_ARGS: "--accept-tos --email=you@example.com --domains=example.com --dns cloudflare run" - # LEGO_RENEW_COMMAND_ARGS: "--accept-tos --email=you@example.com --domains=example.com --dns cloudflare renew" - # CLOUDFLARE_EMAIL: "you@example.com" - # CLOUDFLARE_API_KEY: "yourprivatecloudflareapikey" + NODE_EXPORTER_SERVICE_NAME_REGEX: "node-exporter" + CADVISOR_SERVICE_NAME_REGEX: "cadvisor" deploy: - replicas: 1 placement: constraints: - node.role == manager + + node-exporter: + image: quay.io/prometheus/node-exporter:v1.6.1 + volumes: + - '/:/host:ro' + command: + - '--path.rootfs=/host' + deploy: + mode: global -volumes: - lego-files: + cadvisor: + image: gcr.io/cadvisor/cadvisor + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + - /dev/disk/:/dev/disk:ro + deploy: + mode: global ``` and deploy with ``` -$ docker stack deploy -c compose.yml svc +$ docker stack deploy -c compose.yml sd ``` +Note that the usage of `node-exporter` and `cadvisor` are optional, to fetch node CPU/Memory/Disk usage and containers' CPU/Memory usage respectively. If you don't need this feature, make sure to remove `NODE_EXPORTER_SERVICE_NAME_REGEX` and `CADVISOR_SERVICE_NAME_REGEX` envs. + +## Advance Usage + +List of environment variables for more customization: + +| Enviroment Varibles | Example | Considration | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| PORT | 8080 | HTTP / HTTPS port | +| ENABLE_AUTHENTICATION | true | false by default | +| AUTHENTICATION_REALM | MyRealm | Use this env if ENABLE_AUTHENTICATION is `true`. | +| USERNAME | admin | Use this env if ENABLE_AUTHENTICATION is `true`. | +| PASSWORD | supersecret | Use this env if ENABLE_AUTHENTICATION is `true`. | +| ENABLE_HTTPS | true | false by default | +| LEGO_PATH | /lego-files | Use this env if ENABLE_HTTPS is `true`. Lego is used to create the SSL certificates. Create a named volume for this path to avoid the creation of a new certificate on each run. | +| HTTPS_HOSTNAME | swarm-dashboard.example.com | Use this env if ENABLE_HTTPS is `true`. | +| LEGO_NEW_COMMAND_ARGS | --accept-tos --email=you@swarm-dashboard.example.com --domains=swarm-dashboard.example.com --dns cloudflare run | Use this env if ENABLE_HTTPS is `true`. | +| LEGO_RENEW_COMMAND_ARGS | --accept-tos --email=you@swarm-dashboard.example.com --domains=swarm-dashboard.example.com --dns cloudflare renew | Use this env if ENABLE_HTTPS is `true`. | +| CLOUDFLARE_EMAIL | you@example.com | You can use any [DNS provider that Lego supports](https://go-acme.github.io/lego/dns/). | +| CLOUDFLARE_API_KEY | yourprivatecloudflareapikey | You can use any [DNS provider that Lego supports](https://go-acme.github.io/lego/dns/). | +| DOCKER_UPDATE_INTERVAL | 1000 | Refresh interval in ms. | +| METRICS_UPDATE_INTERVAL | 5000 | Refresh interval in ms. | +| NODE_EXPORTER_SERVICE_NAME_REGEX | node-exporter | Use this env to enable `node-exporter` integration. | +| NODE_EXPORTER_INTERESTED_MOUNT_POINT | /rootfs | You may need this config if you have not specified `--path.rootfs` for `node-exporter`. | +| NODE_EXPORTER_PORT | 9100 | | +| CADVISOR_SERVICE_NAME_REGEX | cadvisor | Use this env to enable `cadvisor` integration. | +| CADVISOR_PORT | 8080 | | + + ## Security + We redact docker event data before sending them to the client. The previous version was sending the whole docker event data, including environment variables (someone might have stored some passwords in them, by mistake!). So, please consider using the newer version. @@ -93,12 +120,15 @@ There are two considerations for any serious deployment of the dashboard: * Show more service details (published port, image name, and version) * Node / Service / Task details panel -* Show node / task resources (CPU & Memory) -* Improve security for potential production use -Both feature requests and pull requests are welcome +Both feature requests and pull requests are welcome. + +### Prior art + +* Heavily inspired by [Docker Swarm Visualiser](https://github.com/dockersamples/docker-swarm-visualizer) ## Contributors -* Viktor Charypar (owner, BDFL) - code, docs +* Mohammad-Mohsen Aseman-Manzar (current maintainer) - code, docs +* Viktor Charypar (previous repo owner) - code, docs * Clementine Brown - design diff --git a/compose.yml b/compose.yml index 8f45d73..b17ccea 100644 --- a/compose.yml +++ b/compose.yml @@ -23,6 +23,7 @@ services: # LEGO_RENEW_COMMAND_ARGS: "--accept-tos --email=you@example.com --domains=example.com --dns cloudflare renew" # CLOUDFLARE_EMAIL: "you@example.com" # CLOUDFLARE_API_KEY: "yourprivatecloudflareapikey" + NODE_EXPORTER_SERVICE_NAME_REGEX: "node-exporter" deploy: replicas: 1 update_config: diff --git a/Components.elm b/elm-client/Components.elm similarity index 66% rename from Components.elm rename to elm-client/Components.elm index 96f2b4d..d2d343c 100644 --- a/Components.elm +++ b/elm-client/Components.elm @@ -17,7 +17,7 @@ statusString state desiredState = task : Service -> AssignedTask -> Html msg -task service { status, desiredState, containerSpec, slot } = +task service { status, desiredState, containerSpec, slot, info } = let classes = [ ( status.state, True ) @@ -32,12 +32,39 @@ task service { status, desiredState, containerSpec, slot } = Nothing -> "" + + cpuInfo = + case info.cpu of + Just s -> + [ + div [ class "tag left" ] [ text s ] + ] + + Nothing -> + [] + + memoryInfo = + case info.memory of + Just s -> + [ + div [ class "tag right" ] [ text s ] + ] + + Nothing -> + [] in li [ classList classes ] - [ text (service.name ++ slotLabel slot) - , br [] [] - , text (statusString status.state desiredState) - ] + (List.concat [ + cpuInfo + , (List.concat [ + memoryInfo + , [ text (service.name ++ slotLabel slot) + , br [] [] + , text (statusString status.state desiredState) + ] + ]) + ]) + serviceNode : Service -> TaskIndex -> Node -> Html msg @@ -77,14 +104,29 @@ node node = nodeRole = String.join " " [ node.role, iff leader "(leader)" "" ] + + info = + case node.info of + Just s -> + [ + br [] [] + , text (s) + ] + + Nothing -> + [] in th [ classList classes ] - [ strong [] [ text node.name ] - , br [] [] - , text nodeRole - , br [] [] - , text node.status.address - ] + (List.concat [ + [ + strong [] [ text node.name ] + , br [] [] + , text nodeRole + , br [] [] + , text node.status.address + ] + , info + ]) swarmHeader : List Node -> List Network -> Html msg diff --git a/Components/NetworkConnections.elm b/elm-client/Components/NetworkConnections.elm similarity index 100% rename from Components/NetworkConnections.elm rename to elm-client/Components/NetworkConnections.elm diff --git a/Components/Networks.elm b/elm-client/Components/Networks.elm similarity index 68% rename from Components/Networks.elm rename to elm-client/Components/Networks.elm index a1b152e..84de17a 100644 --- a/Components/Networks.elm +++ b/elm-client/Components/Networks.elm @@ -62,8 +62,8 @@ columnStart i = -- SVG shorthand -svgLine : ( Float, Float ) -> ( Float, Float ) -> Float -> String -> Svg msg -svgLine ( ox, oy ) ( dx, dy ) width colour = +svgLine : ( Float, Float ) -> ( Float, Float ) -> Float -> String -> String -> Svg msg +svgLine ( ox, oy ) ( dx, dy ) width colour name = line [ x1 (toString ox) , y1 (toString oy) @@ -72,48 +72,52 @@ svgLine ( ox, oy ) ( dx, dy ) width colour = , strokeWidth (toString width) , stroke colour ] - [] + [ + Svg.title [] [ text name ] + ] -svgCircle : ( Float, Float ) -> Float -> String -> Svg msg -svgCircle ( cenx, ceny ) rad colour = +svgCircle : ( Float, Float ) -> Float -> String -> String -> Svg msg +svgCircle ( cenx, ceny ) rad colour name = circle [ cx (toString cenx) , cy (toString ceny) , r (toString rad) , fill colour ] - [] + [ + Svg.title [] [ text name ] + ] -- Symbol pieces -topLine : Int -> Color -> Svg msg -topLine i color = - svgLine ( columnCenter i, 0 ) ( columnCenter i, 31 ) 2 color +topLine : Int -> Color -> String -> Svg msg +topLine i color name = + svgLine ( columnCenter i, 0 ) ( columnCenter i, 31 ) 2 color name -bottomLine : Int -> Color -> Svg msg -bottomLine i color = - svgLine ( columnCenter i, 31 ) ( columnCenter i, 62 ) 2 color +bottomLine : Int -> Color -> String -> Svg msg +bottomLine i color name = + svgLine ( columnCenter i, 31 ) ( columnCenter i, 62 ) 2 color name -dot : Int -> Color -> Svg msg -dot i color = - svgCircle ( columnCenter i, 31 ) (widthStep / 3) color +dot : Int -> Color -> String -> Svg msg +dot i color name = + svgCircle ( columnCenter i, 31 ) (widthStep / 3) color name -fullLine : Int -> Color -> Svg msg -fullLine i color = - svgLine ( columnCenter i, 0 ) ( columnCenter i, 1 ) 2 color +fullLine : Int -> Color -> String -> Svg msg +fullLine i color name = + svgLine ( columnCenter i, 0 ) ( columnCenter i, 1 ) 2 color name -tcap : Int -> Color -> List (Svg msg) -tcap i color = - [ (svgLine ( (columnStart i) + widthStep / 6, 0 ) ( (columnStart i) + widthStep * 5 / 6, 0 ) 4 color) - , svgLine ( columnCenter i, 0 ) ( columnCenter i, widthStep ) 2 color +tcap : Int -> Color -> String -> List (Svg msg) +tcap i color name = + [ (svgLine ( (columnStart i) + widthStep / 6, 0 ) ( (columnStart i) + widthStep * 5 / 6, 0 ) 4 color name) + , svgLine ( columnCenter i, 0 ) ( columnCenter i, widthStep ) 2 color name ] @@ -126,7 +130,7 @@ head networks = let cap i network = if network.ingress then - tcap i "white" + tcap i "white" network.name else [] in @@ -138,30 +142,33 @@ head networks = (networks |> List.indexedMap cap >> List.concat) -attachments : List Connection -> Array Color -> Svg msg -attachments connections colors = +attachments : List Connection -> Array Color -> Array String -> Svg msg +attachments connections colors names = let symbol : Int -> Connection -> List (Svg msg) symbol i connection = let color = Maybe.withDefault "white" (Array.get i colors) + + name = + Maybe.withDefault "" (Array.get i names) in case connection of Through -> - [ topLine i color, bottomLine i color ] + [ topLine i color name, bottomLine i color name ] Start -> - [ dot i color, bottomLine i color ] + [ dot i color name, bottomLine i color name ] Middle -> - [ topLine i color, dot i color, bottomLine i color ] + [ topLine i color name, dot i color name, bottomLine i color name ] End -> - [ topLine i color, dot i color ] + [ topLine i color name, dot i color name ] Only -> - [ dot i color ] + [ dot i color name ] None -> [] @@ -171,16 +178,19 @@ attachments connections colors = (connections |> List.indexedMap symbol >> List.concat) -tails : List Connection -> Array Color -> Svg msg -tails connections colors = +tails : List Connection -> Array Color -> Array String -> Svg msg +tails connections colors names = let symbol i connection = let color = Maybe.withDefault "white" (Array.get i colors) + + name = + Maybe.withDefault "" (Array.get i names) in if List.member connection [ Start, Middle, Through ] then - [ fullLine i color ] + [ fullLine i color name ] else [] in @@ -215,8 +225,11 @@ connections service networkConnections = colors = networkConnections.networks |> Array.fromList << List.indexedMap (\i n -> iff n.ingress "white" (networkColor i)) + + names = + networkConnections.networks |> Array.fromList << List.indexedMap (\i n -> n.name) in H.td [ class "networks" ] - [ attachments connections colors - , H.div [] [ tails connections colors ] + [ attachments connections colors names + , H.div [] [ tails connections colors names ] ] diff --git a/Docker.elm b/elm-client/Docker.elm similarity index 100% rename from Docker.elm rename to elm-client/Docker.elm diff --git a/Docker/Json.elm b/elm-client/Docker/Json.elm similarity index 89% rename from Docker/Json.elm rename to elm-client/Docker/Json.elm index d8ab2ed..c55a345 100644 --- a/Docker/Json.elm +++ b/elm-client/Docker/Json.elm @@ -27,12 +27,13 @@ managerStatus = node : Json.Decoder Node node = - Json.map5 Node + Json.map6 Node (Json.at [ "ID" ] Json.string) (Json.at [ "Description", "Hostname" ] Json.string) (Json.at [ "Spec", "Role" ] Json.string) (Json.at [ "Status" ] nodeStatus) (Json.maybe (Json.at [ "ManagerStatus" ] managerStatus)) + (Json.maybe (Json.at [ "info" ] Json.string)) network : Json.Decoder Network @@ -73,9 +74,16 @@ taskStatus = (Json.at [ "State" ] Json.string) +taskInfo : Json.Decoder TaskInfo +taskInfo = + Json.map2 TaskInfo + (Json.maybe (Json.at [ "cpu" ] Json.string)) + (Json.maybe (Json.at [ "memory" ] Json.string)) + + task : Json.Decoder Task task = - Json.map7 Task + Json.map8 Task (Json.at [ "ID" ] Json.string) (Json.at [ "ServiceID" ] Json.string) (Json.maybe (Json.at [ "NodeID" ] Json.string)) @@ -83,6 +91,7 @@ task = (Json.at [ "Status" ] taskStatus) (Json.at [ "DesiredState" ] Json.string) (Json.at [ "Spec", "ContainerSpec" ] containerSpec) + (Json.at [ "info" ] taskInfo) dockerApi : Json.Decoder DockerApiData diff --git a/Docker/Types.elm b/elm-client/Docker/Types.elm similarity index 92% rename from Docker/Types.elm rename to elm-client/Docker/Types.elm index bfbf683..61210a9 100644 --- a/Docker/Types.elm +++ b/elm-client/Docker/Types.elm @@ -38,6 +38,7 @@ type alias Node = , role : String , status : NodeStatus , managerStatus : Maybe ManagerStatus + , info : Maybe String } @@ -70,6 +71,12 @@ type alias TaskStatus = } +type alias TaskInfo = + { cpu : Maybe String + , memory : Maybe String + } + + type alias Task = { id : String , serviceId : String @@ -78,6 +85,7 @@ type alias Task = , status : TaskStatus , desiredState : String , containerSpec : ContainerSpec + , info : TaskInfo } @@ -104,12 +112,13 @@ type alias AssignedTask = , status : TaskStatus , desiredState : String , containerSpec : ContainerSpec + , info : TaskInfo } assignedTask : Task -> AssignedTask -assignedTask { id, serviceId, nodeId, slot, status, desiredState, containerSpec } = - AssignedTask id serviceId (Maybe.withDefault "" nodeId) slot status desiredState containerSpec +assignedTask { id, serviceId, nodeId, slot, status, desiredState, containerSpec, info } = + AssignedTask id serviceId (Maybe.withDefault "" nodeId) slot status desiredState containerSpec info type alias Docker = diff --git a/Main.elm b/elm-client/Main.elm similarity index 100% rename from Main.elm rename to elm-client/Main.elm diff --git a/Util.elm b/elm-client/Util.elm similarity index 100% rename from Util.elm rename to elm-client/Util.elm diff --git a/client/docker_logo.svg b/elm-client/client/docker_logo.svg similarity index 100% rename from client/docker_logo.svg rename to elm-client/client/docker_logo.svg diff --git a/client/index.html b/elm-client/client/index.html similarity index 93% rename from client/index.html rename to elm-client/client/index.html index e230188..a0383e0 100644 --- a/client/index.html +++ b/elm-client/client/index.html @@ -163,6 +163,20 @@ border-color: rgb(228, 0, 52); background-color: rgba(228, 0, 52, 0.6); } + + .tag.left { + float: left; + } + .tag.right { + float: right; + } + .tag { + border: 0.05rem solid; + border-radius: 0.4rem; + padding: 0.1rem 0.2rem; + border-color: white; + font-size: 0.7rem; + } diff --git a/elm-package.json b/elm-client/elm-package.json similarity index 100% rename from elm-package.json rename to elm-client/elm-package.json diff --git a/package-lock.json b/package-lock.json index 981b4cd..aa24b37 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,6 +11,7 @@ "dependencies": { "express": "^4.15.2", "express-basic-auth": "^1.2.1", + "parse-prometheus-text-format": "^1.1.1", "ramda": "^0.24.1", "uuid": "^9.0.1", "ws": "^8.14.2" @@ -1632,6 +1633,14 @@ "node": ">=0.10.0" } }, + "node_modules/parse-prometheus-text-format": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/parse-prometheus-text-format/-/parse-prometheus-text-format-1.1.1.tgz", + "integrity": "sha512-dBlhYVACjRdSqLMFe4/Q1l/Gd3UmXm8ruvsTi7J6ul3ih45AkzkVpI5XHV4aZ37juGZW5+3dGU5lwk+QLM9XJA==", + "dependencies": { + "shallow-equal": "^1.2.0" + } + }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", @@ -1961,6 +1970,11 @@ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" }, + "node_modules/shallow-equal": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/shallow-equal/-/shallow-equal-1.2.1.tgz", + "integrity": "sha512-S4vJDjHHMBaiZuT9NPb616CSmLf618jawtv3sufLl6ivK8WocjAo58cXwbRV1cgqxH0Qbv+iUt6m05eqEa2IRA==" + }, "node_modules/shebang-command": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", diff --git a/package.json b/package.json index 99aa21e..31de52f 100644 --- a/package.json +++ b/package.json @@ -6,6 +6,7 @@ "dependencies": { "express": "^4.15.2", "express-basic-auth": "^1.2.1", + "parse-prometheus-text-format": "^1.1.1", "ramda": "^0.24.1", "uuid": "^9.0.1", "ws": "^8.14.2" diff --git a/rebuild.sh b/rebuild.sh deleted file mode 100755 index a5413bc..0000000 --- a/rebuild.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env sh - -docker-compose -f compose.yml build -docker-compose -f compose.yml push - -docker stack deploy -c compose.yml svc diff --git a/server/index.js b/server/index.js index 915e579..962d627 100644 --- a/server/index.js +++ b/server/index.js @@ -2,6 +2,7 @@ var fs = require('fs'); var http = require('http'); var https = require('https'); const { createHash } = require('crypto'); +const parsePrometheusTextFormat = require('parse-prometheus-text-format'); const ws = require('ws'); const express = require('express'); @@ -15,25 +16,52 @@ const realm = process.env.AUTHENTICATION_REALM || "KuW2i9GdLIkql"; const enableAuthentication = process.env.ENABLE_AUTHENTICATION === "true" const username = process.env.USERNAME || "admin"; const password = process.env.PASSWORD || "supersecret"; -const enableHTTPS = process.env.ENABLE_HTTPS === "true" -const legoPath = process.env.LEGO_PATH -const httpsHostname = process.env.HTTPS_HOSTNAME +const enableHTTPS = process.env.ENABLE_HTTPS === "true"; +const legoPath = process.env.LEGO_PATH || "/lego-files"; +const httpsHostname = process.env.HTTPS_HOSTNAME; +const dockerUpdateInterval = parseInt(process.env.DOCKER_UPDATE_INTERVAL || "1000"); +const metricsUpdateInterval = parseInt(process.env.METRICS_UPDATE_INTERVAL|| "5000"); + +const _nodeExporterServiceNameRegex = process.env.NODE_EXPORTER_SERVICE_NAME_REGEX || ""; +const useNodeExporter = _nodeExporterServiceNameRegex !== ""; +const nodeExporterServiceNameRegex = new RegExp(_nodeExporterServiceNameRegex); +const nodeExporterInterestedMountPoint = process.env.NODE_EXPORTER_INTERESTED_MOUNT_POINT || "/"; +const nodeExporterPort = process.env.NODE_EXPORTER_PORT || "9100"; + +const _cadvisorServiceNameRegex = process.env.CADVISOR_SERVICE_NAME_REGEX || ""; +const useCadvisor = _cadvisorServiceNameRegex !== ""; +const cadvisorServiceNameRegex = new RegExp(_cadvisorServiceNameRegex); +const cadvisorPort = process.env.CADVISOR_PORT || "8080"; -const baseOptions = { - method: 'GET', - socketPath: '/var/run/docker.sock', -}; const sha1OfData = data => createHash('sha1').update(JSON.stringify(data)).digest('hex'); +const sum = (arr) => { + var res = 0; for (let i = 0; i < arr.length; i++) { res += arr[i]; } return res; +} + +function formatBytes(bytes, decimals = 0) { + if (!+bytes) return '0 Bytes' + const k = 1000 + const dm = decimals < 0 ? 0 : decimals + const sizes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return `${parseFloat((bytes / Math.pow(k, i)).toFixed(dm))}${sizes[i]}` +} + // Docker API integration +const dockerRequestBaseOptions = { + method: 'GET', + socketPath: '/var/run/docker.sock', +}; + const dockerAPIRequest = path => { return new Promise((res, rej) => { let buffer = ''; - const r = http.request({ ...baseOptions, path }, response => { + const r = http.request({ ...dockerRequestBaseOptions, path }, response => { response.on('data', chunk => (buffer = buffer + chunk)); response.on('end', () => res(buffer)); }); @@ -44,7 +72,7 @@ const dockerAPIRequest = path => { }); }; -const fetchData = () => +const fetchDockerData = () => Promise.all([ dockerAPIRequest('/nodes').then(JSON.parse), dockerAPIRequest('/services').then(JSON.parse), @@ -57,18 +85,49 @@ const fetchData = () => tasks, })); +// Fetch metrics + +const metricRequest = (url) => { + return new Promise((res, rej) => { + let buffer = ''; + + const r = http.request(url, response => { + response.on('data', chunk => (buffer = buffer + chunk)); + response.on('end', () => res(buffer)); + }); + + r.on('error', rej); + + r.end(); + }); +}; + +const fetchMetrics = (addresses) => { + let promises = []; + for (let i = 0; i < addresses.length; i++) { + promises.push(metricRequest(addresses[i]).then(parsePrometheusTextFormat)); + } + return Promise.all(promises); +} + // Docker API returns networks in an undefined order, this // stabilizes the order for effective caching const stabilize = data => { return { ...data, networks: sortBy(prop('Id'), data.networks) }; }; -const redact = data => { +const parseAndRedactDockerData = data => { let nodes = []; let networks = []; let services = []; let tasks = []; + let nodeExporterServiceIDs = []; + let runningNodeExportes = []; + let cadvisorServiceIDs = []; + let runningCadvisors = []; + let runningTasksID = []; + for (let i = 0; i < data.nodes.length; i++) { const baseNode = data.nodes[i]; let node = { @@ -132,6 +191,17 @@ const redact = data => { } } services.push(service); + + if (useNodeExporter) { + if (nodeExporterServiceNameRegex.test(baseService["Spec"]["Name"])) { + nodeExporterServiceIDs.push(baseService["ID"]); + } + } + if (useCadvisor) { + if (cadvisorServiceNameRegex.test(baseService["Spec"]["Name"])) { + cadvisorServiceIDs.push(baseService["ID"]); + } + } } for (let i = 0; i < data.tasks.length; i++) { @@ -149,17 +219,275 @@ const redact = data => { "Image": baseTask["Spec"]["ContainerSpec"]["Image"] } }, + "info": {} // for cpu and memory }; if (baseTask["NodeID"] !== undefined) task["NodeID"] = baseTask["NodeID"] if (baseTask["Slot"] !== undefined) task["Slot"] = baseTask["Slot"] tasks.push(task); + + // get addresses for metrics + if (nodeExporterServiceIDs.length > 0) { + if ((nodeExporterServiceIDs.includes(baseTask["ServiceID"])) && + (baseTask["Status"]["State"] === "running") && + (baseTask["NodeID"] !== undefined) && + (baseTask["NetworksAttachments"] !== undefined)) { + let ipList = []; + // TODO: we use ip of the accessible network instead of ipList[0] + for (let j = 0; j < baseTask["NetworksAttachments"].length; j++) { + for (let k = 0; k < baseTask["NetworksAttachments"][j]["Addresses"].length; k++) { + let ip = baseTask["NetworksAttachments"][j]["Addresses"][k]; + ipList.push(ip.split("/")[0]); + } + } + runningNodeExportes.push({ nodeID: baseTask["NodeID"], address: ipList[0] }); + } + } + if (cadvisorServiceIDs.length > 0) { + if ((cadvisorServiceIDs.includes(baseTask["ServiceID"])) && + (baseTask["Status"]["State"] === "running") && + (baseTask["NetworksAttachments"] !== undefined)) { + let ipList = []; + // TODO: we use ip of the accessible network instead of ipList[0] + for (let j = 0; j < baseTask["NetworksAttachments"].length; j++) { + for (let k = 0; k < baseTask["NetworksAttachments"][j]["Addresses"].length; k++) { + let ip = baseTask["NetworksAttachments"][j]["Addresses"][k]; + ipList.push(ip.split("/")[0]); + } + } + runningCadvisors.push({ address: ipList[0] }); + } + } + if (baseTask["Status"]["State"] === "running") { + runningTasksID.push(baseTask["ID"]); + } } - return { nodes, networks, services, tasks }; + return { + data: { nodes, networks, services, tasks }, + runningNodeExportes, runningCadvisors, runningTasksID + }; }; +const findMetricValue = (metrics, name, searchLabels) => { + let values = findAllMetricValue(metrics, name, searchLabels); + if (values.length > 0) { + return values[0] + } + return undefined; +} + + +const findAllMetricValue = (metrics, name, searchLabels) => { + let results = []; + for (let i = 0; i < metrics.length; i++) { + const metricsParent = metrics[i]; + if (metricsParent.name === name) { + for (let j = 0; j < metricsParent.metrics.length; j++) { + const metric = metricsParent.metrics[j]; + let allLabelsExists = true; + if (metric.labels !== undefined) { + for (let k = 0; k < searchLabels.length; k++) { + const label = searchLabels[k]; + if (label.value !== undefined) { + if (metric.labels[label.name] !== label.value) { + allLabelsExists = false; + } + } else if (label.notValue !== undefined) { + if (metric.labels[label.name] === label.notValue) { + allLabelsExists = false; + } + } + } + } + if (allLabelsExists) { + results.push(parseFloat(metric.value)); + } + } + } + } + return results; +} + + +const currentTime = () => Math.floor(Date.now() / 1000); + +const fetchNodeMetrics = ({ lastData, lastRunningNodeExportes, lastNodeMetrics }, callback) => { + let nodeMetrics = []; + if (lastRunningNodeExportes.length > 0) { // should fetch metrics + fetchMetrics(lastRunningNodeExportes.map(({ address }) => `http://${address}:${nodeExporterPort}/metrics`)) + .then(metricsList => { + for (let i = 0; i < lastData.nodes.length; i++) { + let node = lastData.nodes[i]; + for (let j = 0; j < lastRunningNodeExportes.length; j++) { + const nodeExporterTask = lastRunningNodeExportes[j]; + if (node["ID"] === nodeExporterTask.nodeID) { + const metricsOfThisNode = metricsList[j]; + const metricToSave = { nodeID: node["ID"], fetchTime: currentTime() }; + + // last metrics + let lastMetricsOfThisNode = {}; + let timeDiffFromLastMetrics = 0; + for (let k = 0; k < lastNodeMetrics.length; k++) { + if (lastNodeMetrics[k].nodeID === node["ID"]) { + lastMetricsOfThisNode = lastNodeMetrics[k]; + timeDiffFromLastMetrics = metricToSave.fetchTime - lastMetricsOfThisNode.fetchTime + break; + } + } + + // disk + let freeDisk = findMetricValue(metricsOfThisNode, "node_filesystem_avail_bytes", [{ name: "mountpoint", value: nodeExporterInterestedMountPoint }]); + let totalDisk = findMetricValue(metricsOfThisNode, "node_filesystem_size_bytes", [{ name: "mountpoint", value: nodeExporterInterestedMountPoint }]); + if ((freeDisk !== undefined) && (totalDisk !== undefined)) { + metricToSave.diskFullness = Math.round((totalDisk - freeDisk) * 100 / totalDisk); + } + + // cpu + metricToSave.cpuSecondsTotal = sum(findAllMetricValue(metricsOfThisNode, "node_cpu_seconds_total", [{ name: "mode", notValue: "idle" }])); + if ( + (metricToSave.cpuSecondsTotal !== undefined) && + (lastMetricsOfThisNode.cpuSecondsTotal !== undefined) && + (timeDiffFromLastMetrics > 0) + ) { + metricToSave.cpuPercent = Math.round((metricToSave.cpuSecondsTotal - lastMetricsOfThisNode.cpuSecondsTotal) * 100 / timeDiffFromLastMetrics); + } + + // memory + let node_memory_MemFree_bytes = findMetricValue(metricsOfThisNode, "node_memory_MemFree_bytes", []); + let node_memory_Cached_bytes = findMetricValue(metricsOfThisNode, "node_memory_Cached_bytes", []); + let node_memory_Buffers_bytes = findMetricValue(metricsOfThisNode, "node_memory_Buffers_bytes", []); + let node_memory_MemTotal_bytes = findMetricValue(metricsOfThisNode, "node_memory_MemTotal_bytes", []); + if ( + (node_memory_MemFree_bytes !== undefined) && + (node_memory_Cached_bytes !== undefined) && + (node_memory_Buffers_bytes !== undefined) && + (node_memory_MemTotal_bytes !== undefined) && + (node_memory_MemTotal_bytes > 0) + ) { + metricToSave.memoryPercent = Math.round( + 100 * (1 - ((node_memory_MemFree_bytes + node_memory_Cached_bytes + node_memory_Buffers_bytes) / node_memory_MemTotal_bytes)) + ); + } + + nodeMetrics.push(metricToSave); + } + } + } + callback(nodeMetrics); + }) + .catch(e => { + console.error('Could not fetch node metrics', e) + callback(nodeMetrics); + }); + } else { + callback(nodeMetrics); + } +} + +const fetchTasksMetrics = ({ lastRunningCadvisors, lastRunningTasksMetrics, lastRunningTasksID }, callback) => { + let runningTasksMetrics = []; + if (lastRunningCadvisors.length > 0) { // should fetch metrics + fetchMetrics(lastRunningCadvisors.map(({ address }) => `http://${address}:${cadvisorPort}/metrics`)) + .then(metricsList => { + let allMetrics = []; + for (let i = 0; i < metricsList.length; i++) { + allMetrics = allMetrics.concat(metricsList[i]); + } + for (let i = 0; i < lastRunningTasksID.length; i++) { + let taskID = lastRunningTasksID[i]; + const metricToSave = { taskID, fetchTime: currentTime() }; + + // last metrics + let lastMetricsOfThisTask = {}; + let timeDiffFromLastMetrics = 0; + for (let k = 0; k < lastRunningTasksMetrics.length; k++) { + if (lastRunningTasksMetrics[k].taskID === taskID) { + lastMetricsOfThisTask = lastRunningTasksMetrics[k]; + timeDiffFromLastMetrics = metricToSave.fetchTime - lastMetricsOfThisTask.fetchTime + break; + } + } + + // cpu + metricToSave.cpuSecondsTotal = sum(findAllMetricValue(allMetrics, "container_cpu_usage_seconds_total", [{ name: "container_label_com_docker_swarm_task_id", value: taskID }])); + if ( + (lastMetricsOfThisTask.cpuSecondsTotal !== undefined) && + (timeDiffFromLastMetrics > 0) + ) { + metricToSave.cpuPercent = Math.round((metricToSave.cpuSecondsTotal - lastMetricsOfThisTask.cpuSecondsTotal) * 100 / timeDiffFromLastMetrics); + } + + // memory + metricToSave.memoryBytes = findMetricValue(allMetrics, "container_memory_rss", [{ name: "container_label_com_docker_swarm_task_id", value: taskID }]); + // let memoryUsage = findMetricValue(allMetrics, "container_memory_usage_bytes", [{ name: "container_label_com_docker_swarm_task_id", value: taskID }]); + // let memoryCache = findMetricValue(allMetrics, "container_memory_cache", [{ name: "container_label_com_docker_swarm_task_id", value: taskID }]); + // console.log(memoryUsage, memoryCache); + // if ( + // (memoryUsage !== undefined) && + // (memoryCache !== undefined) + // ) { + // metricToSave.memoryBytes = memoryUsage - memoryCache + // } + + runningTasksMetrics.push(metricToSave); + } + callback(runningTasksMetrics); + }) + .catch(e => { + console.error('Could not fetch tasks metrics', e) + callback(runningTasksMetrics); + }); + } else { + callback(runningTasksMetrics); + } +} + +const addNodeMetricsToData = (data, lastNodeMetrics) => { + for (let i = 0; i < data.nodes.length; i++) { + const node = data.nodes[i]; + for (let j = 0; j < lastNodeMetrics.length; j++) { + const nodeMetric = lastNodeMetrics[j]; + if (nodeMetric.nodeID === node["ID"]) { + let info = ""; + if (nodeMetric.diskFullness !== undefined) { + info += `disk: ${nodeMetric.diskFullness}%`; + } + if (nodeMetric.cpuPercent !== undefined) { + if (info) + info += " | " + info += `cpu: ${nodeMetric.cpuPercent}%`; + } + if (nodeMetric.memoryPercent !== undefined) { + if (info) + info += " | " + info += `mem: ${nodeMetric.memoryPercent}%`; + } + if (info) { + node.info = info; + } + } + } + } +} +const addTaskMetricsToData = (data, lastRunningTasksMetrics) => { + for (let i = 0; i < data.tasks.length; i++) { + const task = data.tasks[i]; + for (let j = 0; j < lastRunningTasksMetrics.length; j++) { + const taskMetric = lastRunningTasksMetrics[j]; + if (taskMetric.taskID === task["ID"]) { + if (taskMetric.cpuPercent !== undefined) { + task.info.cpu = `cpu: ${taskMetric.cpuPercent}%`; + } + if (taskMetric.memoryBytes !== undefined) { + task.info.memory = `mem: ${formatBytes(taskMetric.memoryBytes)}`; + } + } + } + } +} + // WebSocket pub-sub const publish = (listeners, data) => { @@ -212,32 +540,71 @@ if (enableAuthentication) { }); } -// app.get('/data', (req, res) => { -// fetchData().then(it => res.send(redact(it))).catch(e => res.send(e.toString())); +// app.get('/debug-docker-data', (req, res) => { +// fetchDockerData().then(it => res.send(it)).catch(e => res.send(e.toString())); +// }); + +// app.get('/debug-metrics', (req, res) => { +// fetchMetrics(lastRunningNodeExportes.map(({ address }) => `http://${address}:9100/metrics`)).then(it => res.send(it)).catch(e => res.send(e.toString())); +// }); + +// app.get('/debug-log', (req, res) => { +// // console.log("lastRunningNodeExportes", lastRunningNodeExportes); +// // console.log("lastNodeMetrics", lastNodeMetrics); +// // console.log("lastRunningCadvisors", lastRunningCadvisors); +// console.log("lastRunningTasksID", lastRunningTasksID); +// // console.log("lastRunningTasksMetrics", lastRunningTasksMetrics); +// console.log("---------------"); +// res.send("logged.") // }); // start the polling +let lastRunningNodeExportes = []; +let lastNodeMetrics = []; +let lastRunningCadvisors = []; +let lastRunningTasksID = []; +let lastRunningTasksMetrics = []; + let listeners = []; let lastData = {}; let lastSha = ''; -setInterval(() => { - fetchData() +setInterval(() => { // update docker data + fetchDockerData() .then(it => { - listeners = dropClosed(listeners); + let { data, runningNodeExportes, runningCadvisors, runningTasksID } = parseAndRedactDockerData(it); + addNodeMetricsToData(data, lastNodeMetrics); // it makes fetching of main data and node metrics independent. + addTaskMetricsToData(data, lastRunningTasksMetrics); // it makes fetching of main data and node metrics independent. - const data = stabilize(redact(it)); + data = stabilize(data); const sha = sha1OfData(data); if (sha == lastSha) return; lastSha = sha; lastData = data; + lastRunningNodeExportes = runningNodeExportes; + lastRunningCadvisors = runningCadvisors; + lastRunningTasksID = runningTasksID; + + listeners = dropClosed(listeners); publish(listeners, data); }) .catch(e => console.error('Could not publish', e)); // eslint-disable-line no-console -}, 500); +}, dockerUpdateInterval); // refreshs each 1s + +setInterval(() => { // update node data + fetchNodeMetrics({ lastData, lastRunningNodeExportes, lastNodeMetrics }, (nodeMetrics) => { + lastNodeMetrics = nodeMetrics; + }) +}, metricsUpdateInterval); // refreshs each 5s + +setInterval(() => { // update node data + fetchTasksMetrics({ lastRunningCadvisors, lastRunningTasksMetrics, lastRunningTasksID }, (runningTasksMetrics) => { + lastRunningTasksMetrics = runningTasksMetrics; + }) +}, metricsUpdateInterval); // refreshs each 5s function onWSConnection(ws, req) { let params = undefined; diff --git a/swarm.gif b/swarm.gif index d402634..0436cc5 100644 Binary files a/swarm.gif and b/swarm.gif differ diff --git a/test-cluster/.gitignore b/test-cluster/.gitignore new file mode 100644 index 0000000..c22bcb2 --- /dev/null +++ b/test-cluster/.gitignore @@ -0,0 +1,2 @@ +data +.vagrant \ No newline at end of file diff --git a/test-cluster/Vagrantfile b/test-cluster/Vagrantfile new file mode 100644 index 0000000..db53e2d --- /dev/null +++ b/test-cluster/Vagrantfile @@ -0,0 +1,57 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + config.vm.define "manager1" do |node| + node.vm.box = "gusztavvargadr/docker-community-ubuntu-server" + node.vm.network :private_network, ip: "10.0.0.10" + node.vm.hostname = "manager1" + + node.vm.network "forwarded_port", guest: 8080, host: 8080 + + node.vm.synced_folder "..", "/vagrant_parent" + node.vm.synced_folder "./data", "/vagrant_data" + node.vm.provision "shell", path: "wait-for-docker.sh" + node.vm.provision "shell", inline: <<-SHELL + docker swarm init --advertise-addr 10.0.0.10 + docker swarm join-token manager -q > /vagrant_data/swarm-manager-token + docker swarm join-token worker -q > /vagrant_data/swarm-worker-token + SHELL + end + + config.vm.define "manager2" do |node| + node.vm.box = "gusztavvargadr/docker-community-ubuntu-server" + node.vm.network :private_network, ip: "10.0.0.11" + node.vm.hostname = "manager2" + + node.vm.synced_folder "./data", "/vagrant_data" + node.vm.provision "shell", path: "wait-for-docker.sh" + node.vm.provision "shell", inline: <<-SHELL + docker swarm join --token $(cat /vagrant_data/swarm-manager-token) 10.0.0.10:2377 + SHELL + end + + config.vm.define "worker1" do |node| + node.vm.box = "gusztavvargadr/docker-community-ubuntu-server" + node.vm.network :private_network, ip: "10.0.0.21" + node.vm.hostname = "worker1" + + node.vm.synced_folder "./data", "/vagrant_data" + node.vm.provision "shell", path: "wait-for-docker.sh" + node.vm.provision "shell", inline: <<-SHELL + docker swarm join --token $(cat /vagrant_data/swarm-worker-token) 10.0.0.10:2377 + SHELL + end + + config.vm.define "worker2" do |node| + node.vm.box = "gusztavvargadr/docker-community-ubuntu-server" + node.vm.network :private_network, ip: "10.0.0.22" + node.vm.hostname = "worker2" + + node.vm.synced_folder "./data", "/vagrant_data" + node.vm.provision "shell", path: "wait-for-docker.sh" + node.vm.provision "shell", inline: <<-SHELL + docker swarm join --token $(cat /vagrant_data/swarm-worker-token) 10.0.0.10:2377 + SHELL + end +end diff --git a/test-cluster/commands.md b/test-cluster/commands.md new file mode 100644 index 0000000..7365bc2 --- /dev/null +++ b/test-cluster/commands.md @@ -0,0 +1,17 @@ +# Up + vagrant up + +# Run swarm-dashboard (image from Docker Hub) + vagrant ssh manager1 + docker stack deploy -c /vagrant/compose-all.yml sd + +# Run swarm-dashboard (build locally) + vagrant ssh manager1 + docker stack deploy -c /vagrant_parent/test-cluster/compose-metrics.yml sd + docker-compose -f /vagrant_parent/test-cluster/compose-dashboard.yml up --build + +# Shutdown + vagrant halt + +# Destroy + vagrant destroy -f \ No newline at end of file diff --git a/test-cluster/compose-all.yml b/test-cluster/compose-all.yml new file mode 100644 index 0000000..2305ec1 --- /dev/null +++ b/test-cluster/compose-all.yml @@ -0,0 +1,39 @@ +version: "3" + +services: + swarm-dashboard: + image: mohsenasm/swarm-dashboard:dev_stats + volumes: + - /var/run/docker.sock:/var/run/docker.sock + ports: + - 8080:8080 + environment: + PORT: 8080 + ENABLE_AUTHENTICATION: "false" + ENABLE_HTTPS: "false" + NODE_EXPORTER_SERVICE_NAME_REGEX: "node-exporter" + CADVISOR_SERVICE_NAME_REGEX: "cadvisor" + deploy: + placement: + constraints: + - node.role == manager + + node-exporter: + image: quay.io/prometheus/node-exporter:v1.6.1 + volumes: + - '/:/host:ro' + command: + - '--path.rootfs=/host' + deploy: + mode: global + + cadvisor: + image: gcr.io/cadvisor/cadvisor + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + - /dev/disk/:/dev/disk:ro + deploy: + mode: global diff --git a/test-cluster/compose-dashboard.yml b/test-cluster/compose-dashboard.yml new file mode 100644 index 0000000..2310098 --- /dev/null +++ b/test-cluster/compose-dashboard.yml @@ -0,0 +1,22 @@ +version: "3.2" + +services: + swarm-dashboard: + build: .. + volumes: + - /var/run/docker.sock:/var/run/docker.sock + ports: + - 8080:8080 + environment: + PORT: 8080 + ENABLE_AUTHENTICATION: "false" + ENABLE_HTTPS: "false" + NODE_EXPORTER_SERVICE_NAME_REGEX: "node-exporter" + CADVISOR_SERVICE_NAME_REGEX: "cadvisor" + networks: + - monitoring_net + +networks: + monitoring_net: + external: + name: sd_monitoring_net \ No newline at end of file diff --git a/test-cluster/compose-metrics.yml b/test-cluster/compose-metrics.yml new file mode 100644 index 0000000..1443ee1 --- /dev/null +++ b/test-cluster/compose-metrics.yml @@ -0,0 +1,31 @@ +version: "3.2" + +services: + node-exporter: + image: quay.io/prometheus/node-exporter:v1.6.1 + volumes: + - "/:/host:ro" + command: + - "--path.rootfs=/host" + deploy: + mode: global + networks: + - monitoring_net + + cadvisor: + image: gcr.io/cadvisor/cadvisor + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + - /dev/disk/:/dev/disk:ro + deploy: + mode: global + networks: + - monitoring_net + +networks: + monitoring_net: + driver: overlay + attachable: true diff --git a/test-cluster/wait-for-docker.sh b/test-cluster/wait-for-docker.sh new file mode 100755 index 0000000..364acdf --- /dev/null +++ b/test-cluster/wait-for-docker.sh @@ -0,0 +1,5 @@ +until docker info > /dev/null +do + echo "waiting for docker info" + sleep 1 +done \ No newline at end of file diff --git a/yarn.lock b/yarn.lock index ddf50a1..28b8695 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1040,6 +1040,13 @@ os-tmpdir@~1.0.1: resolved "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz" integrity sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g== +parse-prometheus-text-format@^1.1.1: + version "1.1.1" + resolved "https://registry.npmjs.org/parse-prometheus-text-format/-/parse-prometheus-text-format-1.1.1.tgz" + integrity sha512-dBlhYVACjRdSqLMFe4/Q1l/Gd3UmXm8ruvsTi7J6ul3ih45AkzkVpI5XHV4aZ37juGZW5+3dGU5lwk+QLM9XJA== + dependencies: + shallow-equal "^1.2.0" + parseurl@~1.3.3: version "1.3.3" resolved "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz" @@ -1266,6 +1273,11 @@ setprototypeof@1.2.0: resolved "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz" integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== +shallow-equal@^1.2.0: + version "1.2.1" + resolved "https://registry.npmjs.org/shallow-equal/-/shallow-equal-1.2.1.tgz" + integrity sha512-S4vJDjHHMBaiZuT9NPb616CSmLf618jawtv3sufLl6ivK8WocjAo58cXwbRV1cgqxH0Qbv+iUt6m05eqEa2IRA== + shebang-command@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz"