diff --git a/.gitignore b/.gitignore index 0739eb8a..ff7161e5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,8 @@ *.out node_modules .DS_Store -.vscode \ No newline at end of file +.vscode + +# draw.io temp files +.$*.bkp +.$*.dtmp \ No newline at end of file diff --git a/cloudant-change-listener/Dockerfile b/cloudant-change-listener/Dockerfile index 865c194a..d2a4f865 100644 --- a/cloudant-change-listener/Dockerfile +++ b/cloudant-change-listener/Dockerfile @@ -1,10 +1,10 @@ -FROM registry.access.redhat.com/ubi9/nodejs-20:latest AS build-env +FROM registry.access.redhat.com/ubi9/nodejs-22:latest AS build-env WORKDIR /opt/app-root/src COPY --chown=default:root job/* . RUN npm install # Use a small distroless image for as runtime image -FROM gcr.io/distroless/nodejs20-debian12 +FROM gcr.io/distroless/nodejs22 COPY --from=build-env /opt/app-root/src /app WORKDIR /app ENTRYPOINT ["job.mjs"] \ No newline at end of file diff --git a/cloudant-change-listener/job/package.json b/cloudant-change-listener/job/package.json index 1d8230ca..0c7c5d14 100644 --- a/cloudant-change-listener/job/package.json +++ b/cloudant-change-listener/job/package.json @@ -4,8 +4,8 @@ "description": "Change event listener for cloudant DB", "main": "job.mjs", "engines": { - "node": ">=20", - "npm": "^10" + "node": "^22", + "npm": "^11" }, "scripts": { "test": "echo \"Error: no test specified\" && exit 1" diff --git a/gallery/README.md b/gallery/README.md index 4a340b28..7635d46c 100644 --- a/gallery/README.md +++ b/gallery/README.md @@ -53,10 +53,15 @@ OK Read the project guid from the CLI output and store it in a local variable. We'll need it later on to configure the bucket. ``` -> export CE_PROJECT_GUID=$(ibmcloud ce project current --output json|jq -r '.guid') -> echo "CE_PROJECT_GUID: $CE_PROJECT_GUID" +$ export CE_PROJECT_GUID=$(ibmcloud ce project current --output json|jq -r '.guid') +$ echo "CE_PROJECT_GUID: $CE_PROJECT_GUID" CE_PROJECT_GUID: 91efff97-1001-4144-997a-744ec8009303 + +$ export CE_PROJECT_CRN=$(ibmcloud ce project get --name gallery --output json|jq -r '.crn') +$ echo "CE_PROJECT_CRN: $CE_PROJECT_CRN" + +CE_PROJECT_CRN: crn:v1:bluemix:public:codeengine:eu-de:a/7658687ea07db8386963ebe2b8f1897a:91efff97-1001-4144-997a-744ec8009303:: ``` Once the project has become active, you are good to proceed with the next step. @@ -103,7 +108,7 @@ OK Service instance gallery-cos was created. Name: gallery-cos -ID: crn:v1:bluemix:public:cloud-object-storage:global:a/7658687ea07db8396963ebe2b8e1897d:c0f324be-33fd-4989-a4af-376a13abb316:: +ID: crn:v1:bluemix:public:cloud-object-storage:global:a/7658687ea07db8386963ebe2b8f1897a:c0f324be-33fd-4989-a4af-376a13abb316:: GUID: c0f324be-33fd-4989-a4af-376a13abb316 Location: global State: active @@ -233,10 +238,14 @@ Utilize local build capabilities, which is able to take your local source code a ``` $ ibmcloud ce fn create --name change-color \ --build-source . \ - --runtime nodejs-20 \ + --runtime nodejs-22 \ --memory 4G \ --cpu 1 \ - --env BUCKET=$BUCKET + --env TRUSTED_PROFILE_NAME=ce-gallery-to-cos \ + --env COS_BUCKET=$BUCKET \ + --env COS_REGION=$REGION \ + --trusted-profiles-enabled \ + --visibility project Preparing function 'change-color' for build push... Creating function 'change-color'... @@ -255,33 +264,14 @@ Run 'ibmcloud ce function get -n change-color' to see more details. https://change-color.172utxcdky5l.eu-de.codeengine.appdomain.cloud ``` -In order to allow the function to read and write to the bucket, we'll need to create a binding between the COS instance and the function to expose the Object Storage credentials to the functions code. As we already created such credentials for the application, we'll want to make sure to re-use it, as opposed to create new ones. - -List all service credentials of the Object Storage instance: -``` -$ ibmcloud resource service-keys --instance-id $COS_INSTANCE_ID +In order to allow the function to read and write to the bucket, we'll need to create an IAM trusted profile between the COS instance and the function to expose the Object Storage credentials to the functions code. -Retrieving all service keys in resource group default under account John Does's Account as abc@ibm.com... -OK -Name State Created At -gallery-ce-service-binding-prw1t active Fri Sep 8 07:56:19 UTC 2023 ``` +$ ibmcloud iam trusted-profile-create ce-gallery-to-cos -Extract the name of the service access secret, that has been created for the app -``` -$ export COS_SERVICE_CREDENTIAL=$(ibmcloud resource service-keys --instance-id $COS_INSTANCE_ID --output json|jq -r '.[0].name') -$ echo "COS_SERVICE_CREDENTIAL: $COS_SERVICE_CREDENTIAL" -``` +$ ibmcloud iam trusted-profile-link-create ce-gallery-to-cos --name ce-fn-change-color --cr-type CE --link-crn ${CE_PROJECT_CRN} --link-component-type function --link-component-name change-color -Finally expose the COS credentials to the function by binding the service access secret to the function -``` -$ ibmcloud ce function bind --name change-color \ - --service-instance gallery-cos \ - --service-credential $COS_SERVICE_CREDENTIAL - -Binding service instance... -Status: Done -OK +$ ibmcloud iam trusted-profile-policy-create ce-gallery-to-cos --roles "Writer" --service-name cloud-object-storage --service-instance ${COS_INSTANCE_ID} --resource-type bucket --resource ${BUCKET} ``` In order to complete this step, we'll update the app and make it aware that there is a function that allows to change the colors of individual images. diff --git a/gallery/app/app.js b/gallery/app/app.js index 5bc5dda4..8f133a0b 100644 --- a/gallery/app/app.js +++ b/gallery/app/app.js @@ -6,13 +6,20 @@ const { open, readFile, writeFile, readdir, unlink } = require("fs/promises"); const basePath = __dirname; // serving files from here -const GALLERY_PATH = process.env.MOUNT_LOCATION || "/app/tmp"; +let GALLERY_PATH = "/app/tmp"; + +// if the optional env var 'MOUNT_LOCATION' is not set, but a bucket has been mounted to /mnt/bucket assume it is a COS mount +let isCosEnabled = false; +if (process.env.MOUNT_LOCATION || existsSync("/mnt/bucket")) { + isCosEnabled = true; + GALLERY_PATH = process.env.MOUNT_LOCATION || "/mnt/bucket"; +} function getFunctionEndpoint() { if (!process.env.COLORIZER) { return undefined; } - return `https://${process.env.COLORIZER}.${process.env.CE_SUBDOMAIN}.${process.env.CE_DOMAIN}`; + return `http://${process.env.COLORIZER}.${process.env.CE_SUBDOMAIN}.function.cluster.local`; } async function invokeColorizeFunction(imageId) { @@ -67,7 +74,7 @@ async function handleHttpReq(req, res) { if (existsSync(GALLERY_PATH)) { enabledFeatures.fs = { - cos: !!process.env.MOUNT_LOCATION, + cos: isCosEnabled, }; } if (process.env.COLORIZER) { @@ -183,7 +190,6 @@ async function handleHttpReq(req, res) { console.log(`Error deleting gallery content: ${err}`); res.statusCode = 503; res.end(`Error deleting gallery content: ${err}`); - } return; } diff --git a/gallery/function/cos-service.js b/gallery/function/cos-service.js index e467f3a0..81eeaef6 100644 --- a/gallery/function/cos-service.js +++ b/gallery/function/cos-service.js @@ -6,23 +6,38 @@ * disclosure restricted by GSA ADP Schedule Contract with IBM Corp. ******************************************************************************/ -const ibm = require("ibm-cos-sdk"); +const { ContainerAuthenticator } = require("ibm-cloud-sdk-core"); +const { Readable } = require('node:stream'); +const responseToReadable = (response) => { + const reader = response.body.getReader(); + const rs = new Readable(); + rs._read = async () => { + const result = await reader.read(); + if (!result.done) { + rs.push(Buffer.from(result.value)); + } else { + rs.push(null); + return; + } + }; + return rs; +}; class CosService { - cos; config; + authenticator; constructor(config) { const fn = "constructor"; this.config = config; - this.cos = new ibm.S3(config); - console.debug( - `${fn}- initialized! instance: '${config.serviceInstanceId}'` - ); - } - getServiceInstanceId() { - return this.config.serviceInstanceId; + // create an authenticator based on a trusted profile + this.authenticator = new ContainerAuthenticator({ + iamProfileName: config.trustedProfileName, + }); + console.log( + `CosService init - region: '${this.config.cosRegion}', bucket: ${this.config.cosBucket}, trustedProfileName: '${this.config.trustedProfileName}'` + ); } getContentTypeFromFileName(fileName) { @@ -60,61 +75,64 @@ class CosService { /** * https://ibm.github.io/ibm-cos-sdk-js/AWS/S3.html#putObject-property */ - createObject(bucket, id, dataToUpload, mimeType, contentLength) { + async createObject(id, dataToUpload, mimeType, contentLength) { const fn = "createObject "; console.debug(`${fn}> id: '${id}', mimeType: '${mimeType}', contentLength: '${contentLength}'`); - return this.cos - .putObject({ - Bucket: bucket, - Key: id, - Body: dataToUpload, - ContentType: mimeType, - ContentLength: contentLength, - }) - .promise() - .then((obj) => { - console.debug(`${fn}< done`); - return true; - }) - .catch((err) => { - console.error(err); - console.debug(`${fn}< failed`); - throw err; - }); - } + // prepare the request to create the object files in the bucket + const requestOptions = { + method: "PUT", + body: dataToUpload, + headers: { + "Content-Type": mimeType, + "Content-Length": contentLength, + }, + }; - /** - * https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-node#node-examples-list-objects - */ - getBucketContents(bucketName, prefix) { - const fn = "getBucketContents "; - console.debug(`${fn}> bucket: '${bucketName}', prefix: '${prefix}'`); - return this.cos - .listObjects({ Bucket: bucketName, Prefix: prefix }) - .promise() - .then((data) => { - console.debug(`${fn}< done`); - if (data != null && data.Contents != null) { - return data.Contents; - } - }) - .catch((err) => { - console.error(err); - console.debug(`${fn}< failed`); - return undefined; - }); + // authenticate the request + await this.authenticator.authenticate(requestOptions); + + // perform the request + const response = await fetch( + `https://s3.direct.${this.config.cosRegion}.cloud-object-storage.appdomain.cloud/${this.config.cosBucket}/${id}`, + requestOptions + ); + + if (response.status !== 200) { + console.error(`Unexpected status code: ${response.status}`); + throw new Error(`Failed to upload image: '${response.status}'`); + } + return; } /** * https://ibm.github.io/ibm-cos-sdk-js/AWS/S3.html#getObject-property * @param id */ - getObjectAsStream(bucket, id) { + async getObjectAsStream(id) { const fn = "getObjectAsStream "; console.debug(`${fn}> id: '${id}'`); - return this.cos.getObject({ Bucket: bucket, Key: id }).createReadStream(); + // prepare the request to list the files in the bucket + const requestOptions = { + method: "GET", + }; + + // authenticate the request + await this.authenticator.authenticate(requestOptions); + + // perform the request + return fetch( + `https://s3.direct.${this.config.cosRegion}.cloud-object-storage.appdomain.cloud/${this.config.cosBucket}/${id}`, + requestOptions + ).then((response) => { + if (!response.ok) { + console.error(`${fn}< HTTP error, status = ${response.status}`); + throw new Error(`HTTP error, status = ${response.status}`); + } + console.debug(`${fn}< receiving response as readable stream`); + return responseToReadable(response); + }); } } diff --git a/gallery/function/function.js b/gallery/function/function.js index 05b9426e..8cbda901 100644 --- a/gallery/function/function.js +++ b/gallery/function/function.js @@ -3,24 +3,23 @@ const { changeColors } = require("./colorizer"); const { CosService } = require("./cos-service"); +// helper function to craft a proper function result object +function sendJSONResponse(statusCode, responseBody) { + return { + statusCode: statusCode, + headers: { + "Content-Type": "application/json", + }, + body: responseBody, + }; +} + // helper function to craft a proper COS config const getCosConfig = () => { - const endpoint = - process.env.CLOUD_OBJECT_STORAGE_ENDPOINT || - "s3.eu-de.cloud-object-storage.appdomain.cloud"; - const serviceInstanceId = - process.env.CLOUD_OBJECT_STORAGE_RESOURCE_INSTANCE_ID; - const apiKeyId = process.env.CLOUD_OBJECT_STORAGE_APIKEY; - console.log( - `getCosConfig - endpoint: '${endpoint}', serviceInstanceId: ${serviceInstanceId}, apiKeyId: '${ - apiKeyId && "*****" - }'` - ); - return { - endpoint, - apiKeyId, - serviceInstanceId, + cosBucket: process.env.COS_BUCKET, + cosRegion: process.env.COS_REGION || process.env.CE_REGION, + trustedProfileName: process.env.TRUSTED_PROFILE_NAME, }; }; @@ -40,66 +39,52 @@ const streamToBuffer = (inputStream) => { // initialize the COS service let cosService; -if (process.env.CLOUD_OBJECT_STORAGE_APIKEY) { +if (process.env.COS_BUCKET && process.env.TRUSTED_PROFILE_NAME) { cosService = new CosService(getCosConfig()); console.log(`Initialized COS Service`); } -const bucket = process.env.BUCKET; -console.log(`Target bucket: '${bucket}'`); async function main(args) { // Check whether COS has been configured properly - if (!cosService || !bucket) { + if (!cosService) { console.log( - `Aborting. COS has not been configured properly. Either the binding with the prefix 'CLOUD_OBJECT_STORAGE_' or the env var 'BUCKET' are missing.` + `Aborting. COS has not been configured properly. The env variables 'COS_BUCKET' and 'TRUSTED_PROFILE_NAME' must be set properly.` + ); + return sendJSONResponse( + 401, + `{"error":"COS has not been configured properly. The env variables 'COS_BUCKET' and 'TRUSTED_PROFILE_NAME' must be set properly"}` ); - return { - statusCode: 401, - headers: { - "Content-Type": "application/json", - }, - body: `{"error":"Target IBM Cloud Object Storage instance has not been bound properly"}`, - }; } // Obtain the COS ID of the image that should be transformed const imageId = args.imageId; if (!imageId) { - console.log( - `Aborting. Payload parameter imageId is not set properly` - ); - return { - statusCode: 400, - headers: { - "Content-Type": "application/json", - }, - body: `{"error":"Payload parameter imageId is not set properly"}`, - }; + console.log(`Aborting. Payload parameter imageId is not set properly`); + return sendJSONResponse(400, `{"error":"Payload parameter imageId is not set properly"}`); } console.log(`Changing colors of '${imageId}'`); try { + // // Fetch the object that should get transformed - const fileStream = await cosService.getObjectAsStream(bucket, imageId); + const fileStream = await cosService.getObjectAsStream(imageId); console.log(`Downloaded '${imageId}'`); + // // Convert the image stream to a buffer const imageBuf = await streamToBuffer(fileStream); console.log( `Converted to a buffer of size ${(imageBuf.length / 1024).toFixed(1)} KB` ); + // // Change the color tokens of the image const updatedImageBuf = await changeColors(imageBuf); - console.log( - `Adjusted colors of '${imageId}' - new size ${( - updatedImageBuf.length / 1024 - ).toFixed(1)} KB` - ); + console.log(`Adjusted colors of '${imageId}' - new size ${(updatedImageBuf.length / 1024).toFixed(1)} KB`); - // SIXTH upload the adjusted image back into the COS bucket + // + // Upload the adjusted image back into the COS bucket await cosService.createObject( - bucket, imageId, updatedImageBuf, cosService.getContentTypeFromFileName(imageId), @@ -107,22 +92,10 @@ async function main(args) { ); console.log(`Uploaded updated '${imageId}'`); - return { - statusCode: 200, - headers: { - "Content-Type": "application/json", - }, - body: `{"success": "true"}`, - }; + return sendJSONResponse(200, `{"success": "true"}`); } catch (reason) { console.error(`Error changing colors of ${imageId}`, reason); - return { - statusCode: 503, - headers: { - "Content-Type": "application/json", - }, - body: `{"error":"Error changing colors: ${reason}"}`, - }; + return sendJSONResponse(503, `{"error":"Error changing colors: ${reason}"}`); } } diff --git a/gallery/function/package.json b/gallery/function/package.json index 07fa9bdc..289a5873 100644 --- a/gallery/function/package.json +++ b/gallery/function/package.json @@ -9,6 +9,6 @@ "author": "", "license": "MIT", "dependencies": { - "ibm-cos-sdk": "^1.13.1" + "ibm-cloud-sdk-core": "^5.4.2" } } diff --git a/gallery/job/Dockerfile b/gallery/job/Dockerfile index 8e3eba4d..a6e8c129 100644 --- a/gallery/job/Dockerfile +++ b/gallery/job/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/nodejs-20:latest AS build-env +FROM registry.access.redhat.com/ubi9/nodejs-22:latest AS build-env WORKDIR /app # Define which files should be copied into the container image @@ -9,7 +9,7 @@ COPY package.json . RUN npm install # Use a small distroless image for as runtime image -FROM gcr.io/distroless/nodejs20-debian12 +FROM gcr.io/distroless/nodejs22 COPY --from=build-env /app /app WORKDIR /app CMD ["job.js"] diff --git a/hello/Dockerfile b/hello/Dockerfile index 71207c02..e0951421 100644 --- a/hello/Dockerfile +++ b/hello/Dockerfile @@ -1,10 +1,10 @@ -FROM registry.access.redhat.com/ubi9/nodejs-20:latest AS build-env +FROM registry.access.redhat.com/ubi9/nodejs-22:latest AS build-env WORKDIR /app RUN npm init -f && npm install COPY server.js . # Use a small distroless image for as runtime image -FROM gcr.io/distroless/nodejs20-debian12 +FROM gcr.io/distroless/nodejs22 COPY --from=build-env /app /app WORKDIR /app EXPOSE 8080 diff --git a/private-path-to-vpc-vsi/README.md b/private-path-to-vpc-vsi/README.md new file mode 100644 index 00000000..b996dba1 --- /dev/null +++ b/private-path-to-vpc-vsi/README.md @@ -0,0 +1,57 @@ +# Connect your Code Engine workload to IBM VPC infrastructure using Private Path + +The script provided in this folder installs an end-to-end working solution, which showcases how workload deployed on IBM Cloud Code Engine can connect to backends (e.g. databases, nginx, docling-serve) hosted on IBM VPC infrastructure. + +Mainly, this sample covers the following typical use case: +* As a user I want to access my database backend on TCP port XYZ from workload deployed on Code Engine, without exposing my VPC to the public internet. + +**Use case: Database backend** + +To simulate the database use case the script deploys a PostgreSQL database on the origin server VSI, a Code Engine app (see [Golang source code](./ce-app/main.go)), and a Code Engine job (see [Node.js source code](./ce-job/job.mjs)). Each job run instance will issue a single SQL insert statement storing a random greeting message in the database. The purpose of the application is to query all existing records and expose them as a JSON object through its HTTPS endpoint. The request flows look as depicted in the diagram below: + +![Database request flow](./docs/code-engine-private-path---database-flow.png) + +_Please note: The custom domain mentioned in the flow is not part of this scripted sample, but is a worthful addition in real-world solutions. Custom domains can be easily configured through Code Engine domain mappings (see https://cloud.ibm.com/docs/codeengine?topic=codeengine-app-domainmapping for further information)_ + +Following diagram depicts the component overview of the sample solution: +![Component diagram](./docs/code-engine-private-path---component-diagram.png) + +To learn more about Private Path services, please refer to https://cloud.ibm.com/docs/vpc?topic=vpc-private-path-service-intro + +## Lets get started + +To run this end-to-end sample, open a terminal, [login into your IBM Cloud account using the IBM Cloud CLI](https://cloud.ibm.com/docs/codeengine?topic=codeengine-install-cli) and execute the following command: +``` +./run +``` + +The script deletes all created resources right after the sample scenario has been verified by the script. However, for playing around with the setup, the cleanup can be skipped using the environment variable `CLEANUP_ON_SUCCESS`: +``` +CLEANUP_ON_SUCCESS=false ./run +``` + +Per default, the script will tear up all resources in the IBM Cloud location Washington (`eu-es`). To change the region, utilize the environment variable `REGION` +``` +REGION=eu-de ./run +``` + +To adjust naming of all IBM Cloud resources, the following environment variable `NAME_PREFIX`can be overriden (default: `ce-to-private-path`): +``` +NAME_PREFIX=my-prefix ./run +``` + +To analyze issues that may appear in your account, it could be useful to skip the deletion of IBM Cloud resources by setting environment variable `CLEANUP_ON_ERROR` to `false`: +``` +CLEANUP_ON_ERROR=false ./run +``` + +To clean up all IBM Cloud resources, that have been created as part of the provided script, run: +``` +./run clean +``` + +In order to connect to the VSI via ssh, you can specify the name of an VPC SSH key using the env variable `VPC_SSH_KEY` to configured enroll it on the created VSI. Furthermore, you'll need to set the env variable `DEBUG=true`, which will make sure that the VPC Floating IP will remain attached to the originserver VSI. + +``` +VPC_SSH_KEY= DEBUG=true CLEANUP_ON_SUCCESS=false ./run +``` diff --git a/private-path-to-vpc-vsi/build b/private-path-to-vpc-vsi/build new file mode 100755 index 00000000..5e505b69 --- /dev/null +++ b/private-path-to-vpc-vsi/build @@ -0,0 +1,23 @@ +#!/bin/bash + +# Env Vars: +# REGISTRY: name of the image registry/namespace to store the images +# +# NOTE: to run this you MUST set the REGISTRY environment variable to +# your own image registry/namespace otherwise the `docker push` commands +# will fail due to an auth failure. Which means, you also need to be logged +# into that registry before you run it. + +set -ex +export REGISTRY=${REGISTRY:-icr.io/codeengine} + +# Build and push the image +cd ce-app/ +KO_DOCKER_REPO="${REGISTRY}/ce-to-private-path/app" ko build . --bare --image-user 1001 --platform linux/amd64 --sbom=none +cd .. + +# Build and push the job +cd ce-job/ +docker build ${NOCACHE} -t ${REGISTRY}/ce-to-private-path/job . --platform linux/amd64 +docker push ${REGISTRY}/ce-to-private-path/job +cd .. \ No newline at end of file diff --git a/private-path-to-vpc-vsi/ce-app/Dockerfile b/private-path-to-vpc-vsi/ce-app/Dockerfile new file mode 100644 index 00000000..7ae1e082 --- /dev/null +++ b/private-path-to-vpc-vsi/ce-app/Dockerfile @@ -0,0 +1,10 @@ +FROM quay.io/projectquay/golang:1.23 AS build-env +WORKDIR /go/src/app +COPY . . + +RUN CGO_ENABLED=0 go build -o /go/bin/app . + +# Copy the exe into a smaller base image +FROM gcr.io/distroless/static-debian12 +COPY --from=build-env /go/bin/app / +ENTRYPOINT ["/app"] diff --git a/private-path-to-vpc-vsi/ce-app/go.mod b/private-path-to-vpc-vsi/ce-app/go.mod new file mode 100644 index 00000000..3f6dc51b --- /dev/null +++ b/private-path-to-vpc-vsi/ce-app/go.mod @@ -0,0 +1,5 @@ +module github.com/IBM/CodeEngine/ce-satellite-connector + +go 1.23.0 + +require github.com/lib/pq v1.10.9 diff --git a/private-path-to-vpc-vsi/ce-app/go.sum b/private-path-to-vpc-vsi/ce-app/go.sum new file mode 100644 index 00000000..aeddeae3 --- /dev/null +++ b/private-path-to-vpc-vsi/ce-app/go.sum @@ -0,0 +1,2 @@ +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= diff --git a/private-path-to-vpc-vsi/ce-app/main.go b/private-path-to-vpc-vsi/ce-app/main.go new file mode 100644 index 00000000..5c08a58a --- /dev/null +++ b/private-path-to-vpc-vsi/ce-app/main.go @@ -0,0 +1,103 @@ +package main + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + _ "github.com/lib/pq" +) + +var dbClient = connectToDb() + +type Friendship struct { + Name string `json:"name"` + Created int64 `json:"created"` + Greeting string `json:"greeting"` +} + +func Debug(format string, args ...interface{}) { + format = time.Now().Format("2006-01-02 15:04:05 ") + format + "\n" + fmt.Fprintf(os.Stderr, format, args...) +} + +func connectToDb() *sql.DB { + Debug("Connecting to PostgreSQL instance ...") + dbClient, err := sql.Open("postgres", "postgres://"+os.Getenv("PGUSER")+":"+os.Getenv("PGPASSWORD")+"@"+os.Getenv("PGHOST")+":"+os.Getenv("PGPORT")+"/"+os.Getenv("PGDATABASE")+"?sslmode=disable") + if err != nil { + log.Panicf("Cannot open connection to database: %v", err) + } + Debug("Connecting to PostgreSQL instance [done]") + return dbClient +} + +// This func will handle all incoming HTTP requests +func HandleHTTP(w http.ResponseWriter, r *http.Request) { + + friendships := []Friendship{} + var ( + name string + created_at int64 + greeting string + ) + Debug("Fetching all friendship records ...") + rows, sqlErr := dbClient.Query("SELECT name, created_at, greeting FROM myfriendships") + if sqlErr != nil { + log.Printf("Retrieving friendship records failed - err: " + sqlErr.Error()) + w.WriteHeader(500) + } + defer rows.Close() + for rows.Next() { + err := rows.Scan(&name, &created_at, &greeting) + if err != nil { + log.Printf("Scanning friendship record failed - err: " + err.Error()) + w.WriteHeader(500) + } + log.Println("Retrieved friendship records", name, created_at, greeting) + friendships = append(friendships, Friendship{Name: name, Created: created_at, Greeting: greeting}) + } + + Debug("Fetched %d friendship records", len(friendships)) + bytes, err := json.Marshal(&friendships) + if err != nil { + log.Printf("Failed to marshal response - err: " + err.Error()) + w.WriteHeader(500) + } + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, "%s", string(bytes)) + +} + +func main() { + ctx := context.Background() + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt, syscall.SIGTERM) + + srv := &http.Server{Addr: ":8080"} + + // Debug the http handler for all requests + http.HandleFunc("/", HandleHTTP) + + go func() { + Debug("Listening on port 8080") + + if err := srv.ListenAndServe(); err != http.ErrServerClosed { + log.Fatalf("failed to start server: %v", err) + } + }() + + <-signals + Debug("shutting down server") + if err := srv.Shutdown(ctx); err != nil { + log.Fatalf("failed to shutdown server: %v", err) + } + Debug("shutdown done") +} diff --git a/private-path-to-vpc-vsi/ce-job/Dockerfile b/private-path-to-vpc-vsi/ce-job/Dockerfile new file mode 100644 index 00000000..dbe44d0a --- /dev/null +++ b/private-path-to-vpc-vsi/ce-job/Dockerfile @@ -0,0 +1,14 @@ +FROM registry.access.redhat.com/ubi9/nodejs-22:latest AS build-env +WORKDIR /job + +# Define which files should be copied into the container image +COPY --chown=default:root *.mjs *.json . + +# Load all dependencies +RUN npm install + +# Use a small distroless image for as runtime image +FROM gcr.io/distroless/nodejs22 +COPY --from=build-env /job /job +WORKDIR /job +CMD ["job.mjs"] diff --git a/private-path-to-vpc-vsi/ce-job/job.mjs b/private-path-to-vpc-vsi/ce-job/job.mjs new file mode 100644 index 00000000..a5bcec4e --- /dev/null +++ b/private-path-to-vpc-vsi/ce-job/job.mjs @@ -0,0 +1,32 @@ +import pkg from "pg"; +import { LoremIpsum } from "lorem-ipsum"; + +const { Client } = pkg; + +console.log("Connecting to PostgreSQL instance..."); +try { + const client = new Client({ + user: process.env.PGUSER, + password: process.env.PGPASSWORD, + host: process.env.PGHOST, + database: process.env.PGDATABASE, + port: process.env.PGPORT, + }); + await client.connect(); + + console.log("Creating myfriendships table if it does not exist..."); + await client.query("CREATE TABLE IF NOT EXISTS myfriendships (id SERIAL PRIMARY KEY, name varchar(256) NOT NULL, created_at bigint NOT NULL, greeting text);"); + + console.log("Writing into myfriendships table..."); + await client.query("INSERT INTO myfriendships (name,created_at,greeting) VALUES ($1,$2,$3);", [ + process.env.HOSTNAME, + Date.now(), + new LoremIpsum().generateWords(5), + ]); + + await client.end(); + console.log("Done!"); +} catch (err) { + console.error("Failed to connect to PostgreSQL instance", err); + process.exit(1); +} diff --git a/private-path-to-vpc-vsi/ce-job/package-lock.json b/private-path-to-vpc-vsi/ce-job/package-lock.json new file mode 100644 index 00000000..f03ee770 --- /dev/null +++ b/private-path-to-vpc-vsi/ce-job/package-lock.json @@ -0,0 +1,176 @@ +{ + "name": "ce-job", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "ce-job", + "version": "1.0.0", + "license": "Apache-2.0", + "dependencies": { + "lorem-ipsum": "^2.0.8", + "pg": "^8.11.5" + }, + "engines": { + "node": "^20.*", + "npm": "^10.*" + } + }, + "node_modules/commander": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", + "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", + "engines": { + "node": "^12.20.0 || >=14" + } + }, + "node_modules/lorem-ipsum": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/lorem-ipsum/-/lorem-ipsum-2.0.8.tgz", + "integrity": "sha512-5RIwHuCb979RASgCJH0VKERn9cQo/+NcAi2BMe9ddj+gp7hujl6BI+qdOG4nVsLDpwWEJwTVYXNKP6BGgbcoGA==", + "dependencies": { + "commander": "^9.3.0" + }, + "bin": { + "lorem-ipsum": "dist/bin/lorem-ipsum.bin.js" + }, + "engines": { + "node": ">= 8.x", + "npm": ">= 5.x" + } + }, + "node_modules/pg": { + "version": "8.11.5", + "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.5.tgz", + "integrity": "sha512-jqgNHSKL5cbDjFlHyYsCXmQDrfIX/3RsNwYqpd4N0Kt8niLuNoRNH+aazv6cOd43gPh9Y4DjQCtb+X0MH0Hvnw==", + "dependencies": { + "pg-connection-string": "^2.6.4", + "pg-pool": "^3.6.2", + "pg-protocol": "^1.6.1", + "pg-types": "^2.1.0", + "pgpass": "1.x" + }, + "engines": { + "node": ">= 8.0.0" + }, + "optionalDependencies": { + "pg-cloudflare": "^1.1.1" + }, + "peerDependencies": { + "pg-native": ">=3.0.1" + }, + "peerDependenciesMeta": { + "pg-native": { + "optional": true + } + } + }, + "node_modules/pg-cloudflare": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz", + "integrity": "sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==", + "optional": true + }, + "node_modules/pg-connection-string": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.6.4.tgz", + "integrity": "sha512-v+Z7W/0EO707aNMaAEfiGnGL9sxxumwLl2fJvCQtMn9Fxsg+lPpPkdcyBSv/KFgpGdYkMfn+EI1Or2EHjpgLCA==" + }, + "node_modules/pg-int8": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz", + "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/pg-pool": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.6.2.tgz", + "integrity": "sha512-Htjbg8BlwXqSBQ9V8Vjtc+vzf/6fVUuak/3/XXKA9oxZprwW3IMDQTGHP+KDmVL7rtd+R1QjbnCFPuTHm3G4hg==", + "peerDependencies": { + "pg": ">=8.0" + } + }, + "node_modules/pg-protocol": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.6.1.tgz", + "integrity": "sha512-jPIlvgoD63hrEuihvIg+tJhoGjUsLPn6poJY9N5CnlPd91c2T18T/9zBtLxZSb1EhYxBRoZJtzScCaWlYLtktg==" + }, + "node_modules/pg-types": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz", + "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==", + "dependencies": { + "pg-int8": "1.0.1", + "postgres-array": "~2.0.0", + "postgres-bytea": "~1.0.0", + "postgres-date": "~1.0.4", + "postgres-interval": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pgpass": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.5.tgz", + "integrity": "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==", + "dependencies": { + "split2": "^4.1.0" + } + }, + "node_modules/postgres-array": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz", + "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/postgres-bytea": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz", + "integrity": "sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-date": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz", + "integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-interval": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz", + "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==", + "dependencies": { + "xtend": "^4.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/split2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", + "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", + "engines": { + "node": ">= 10.x" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } + } + } +} diff --git a/private-path-to-vpc-vsi/ce-job/package.json b/private-path-to-vpc-vsi/ce-job/package.json new file mode 100644 index 00000000..85d6caf1 --- /dev/null +++ b/private-path-to-vpc-vsi/ce-job/package.json @@ -0,0 +1,18 @@ +{ + "name": "ce-job", + "version": "1.0.0", + "description": "Code Engine job written in Node.js that connects to a PostgreSQL instance", + "scripts": { + "start": "node ./job.mjs" + }, + "author": "IBM", + "license": "Apache-2.0", + "dependencies": { + "lorem-ipsum": "^2.0.8", + "pg": "^8.16.3" + }, + "engines": { + "node": "^22.*", + "npm": "^11.*" + } +} diff --git a/private-path-to-vpc-vsi/docs/code-engine-private-path---component-diagram.drawio b/private-path-to-vpc-vsi/docs/code-engine-private-path---component-diagram.drawio new file mode 100644 index 00000000..241ee66b --- /dev/null +++ b/private-path-to-vpc-vsi/docs/code-engine-private-path---component-diagram.drawio @@ -0,0 +1,286 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/private-path-to-vpc-vsi/docs/code-engine-private-path---component-diagram.png b/private-path-to-vpc-vsi/docs/code-engine-private-path---component-diagram.png new file mode 100644 index 00000000..f1c7801b Binary files /dev/null and b/private-path-to-vpc-vsi/docs/code-engine-private-path---component-diagram.png differ diff --git a/private-path-to-vpc-vsi/docs/code-engine-private-path---database-flow.png b/private-path-to-vpc-vsi/docs/code-engine-private-path---database-flow.png new file mode 100644 index 00000000..783b1834 Binary files /dev/null and b/private-path-to-vpc-vsi/docs/code-engine-private-path---database-flow.png differ diff --git a/private-path-to-vpc-vsi/run b/private-path-to-vpc-vsi/run new file mode 100755 index 00000000..31124779 --- /dev/null +++ b/private-path-to-vpc-vsi/run @@ -0,0 +1,496 @@ +#!/bin/bash + +# Env vars +CLEANUP_ON_ERROR=${CLEANUP_ON_ERROR:=false} +CLEANUP_ON_SUCCESS=${CLEANUP_ON_SUCCESS:=false} +REGION="${REGION:=eu-es}" +NAME_PREFIX="${NAME_PREFIX:=ce-to-private-path}" +VPC_SSH_KEY="${VPC_SSH_KEY:=}" +DEBUG_MODE="${DEBUG_MODE:=false}" + +# Dependent variables +resource_group_name="${NAME_PREFIX}--rg" +ce_project_name="${NAME_PREFIX}--ce-project" +ce_job_name="friendship-book-writer" +ce_app_name="friendship-book-api" +ce_db_credentials="db-credentials" +vpc_name="${NAME_PREFIX}--is-vpc" +vsi_originserver_name="${NAME_PREFIX}--is-vsi-originserver" + +# ============================== +# COMMON FUNCTIONS +# ============================== +RED="\033[31m" +BLUE="\033[94m" +GREEN="\033[32m" +ENDCOLOR="\033[0m" + +function print_error { + echo -e "${RED}\n==========================================${ENDCOLOR}" + echo -e "${RED} FAILED${ENDCOLOR}" + echo -e "${RED}==========================================\n${ENDCOLOR}" + echo -e "${RED}$1${ENDCOLOR}" + echo "" +} +function print_msg { + echo -e "${BLUE}$1${ENDCOLOR}" +} +function print_success { + echo -e "${GREEN}$1${ENDCOLOR}" +} + +# Helper function to check whether prerequisites are installed +function check_prerequisites { + # Ensure that jq tool is installed + if ! command -v jq &>/dev/null; then + print_error "'jq' tool is not installed" + exit 1 + fi +} + +# helper function to check whether IBM Cloud CLI plugins should get updated, or not +function ensure_plugin_is_up_to_date() { + echo "Checking $1 ..." + # check whether plugin is installed + if ! ibmcloud plugin show $1 -q >/dev/null; then + # install it + ibmcloud plugin install $1 -f --quiet + else + # check whether there is an update available + ibmcloud plugin update $1 -f --quiet + fi +} + + +# Clean up previous run +function clean() { + ( + rm -f userdata-vsi-agent.sh + + ibmcloud is floating-ip-release $vsi_originserver_name-ip --force 2>/dev/null + ibmcloud is instance-delete $vsi_originserver_name --force 2>/dev/null + while [ $? == 0 ]; do + sleep 2 + ibmcloud is instance $vsi_originserver_name >/dev/null 2>&1 + done + ibmcloud is private-path-service-gateway-delete $vpc_name-pps --force + ibmcloud is load-balancer-delete $vpc_name-ppnlb --force + ibmcloud is subnet-delete $vpc_name-subnet --force + ibmcloud is network-acl-delete $vpc_name-acl --force + ibmcloud is public-gateway-delete $vpc_name-gateway --force + ibmcloud is security-group-delete $vpc_name-group --force + ibmcloud is vpc-delete $vpc_name --force + while [ $? == 0 ]; do + sleep 2 + ibmcloud is vpc $vpc_name + done + + ibmcloud ce project select --name $ce_project_name --quiet 2>/dev/null + if [ $? == 0 ]; then + ibmcloud ce project delete --name $ce_project_name --force --hard --no-wait + fi + + ibmcloud resource group $resource_group_name --quiet 2>/dev/null + if [[ $? == 0 ]]; then + COUNTER=0 + # some resources (e.g. boot volumes) are deleted with some delay. Hence, the script waits before exiting with an error + while (( "$(ibmcloud resource service-instances --type all -g $resource_group_name --output json | jq -r '. | length')" > 0 )); do + sleep 5 + COUNTER=$((COUNTER + 1)) + if ((COUNTER > 30)); then + print_error "Cleanup failed! Please make sure to delete remaining resources manually to avoid unwanted charges." + ibmcloud resource service-instances --type all -g $resource_group_name + exit 1 + fi + done + fi + + ibmcloud resource group-delete $resource_group_name --force 2>/dev/null + ) +} + +function abortScript() { + if [[ "${CLEANUP_ON_ERROR}" == true ]]; then + clean + else + print_msg "\nSkipping deletion of the created IBM Cloud resources. Please be aware that the created resources will occur costs in your account." + echo "$ ibmcloud resource service-instances --type all -g $resource_group_name" + ibmcloud resource service-instances --type all -g $resource_group_name + fi + exit 1 +} + +# ============================== +# MAIN SCRIPT FLOW +# ============================== + +print_msg "\n======================================================" +print_msg " Setting up \"Code Engine -> private backend\" sample" +print_msg "======================================================\n" + +echo "" +echo "Please note: This script will install various IBM Cloud resources within the resource group '$resource_group_name'." + +print_msg "\nChecking prerequisites ..." +check_prerequisites + +# Ensure that latest versions of used IBM Cloud ClI is installed +print_msg "\nPulling latest IBM Cloud CLI release ..." +ibmcloud update --force + +# Ensure that latest versions of used IBM Cloud CLI plugins are installed +print_msg "\nInstalling required IBM Cloud CLI plugins ..." +ensure_plugin_is_up_to_date code-engine +ensure_plugin_is_up_to_date vpc-infrastructure + +print_msg "\nCleaning up the remains of previous executions ..." +clean +[[ "$1" == "clean" ]] && print_success "\n==========================================\n DONE\n==========================================\n" && exit 0 + +print_msg "\nTargetting IBM Cloud region '$REGION' ..." +ibmcloud target -r $REGION + +# +# Create the resource group, if it does not exist +ibmcloud resource group $resource_group_name --quiet +if [ $? != 0 ]; then + print_msg "\nCreating resource group '$resource_group_name' ..." + ibmcloud resource group-create $resource_group_name +fi +print_msg "\nTargetting resource group '$resource_group_name' ..." +ibmcloud target -g $resource_group_name + +# +# Create the VPC +print_msg "Creating the VPC '$vpc_name' ..." +ibmcloud is vpc-create $vpc_name --resource-group-name $resource_group_name +if [ $? -ne 0 ]; then + print_error "VPC creation failed!" + abortScript +fi + +# +# Wait for the VPC to become available +print_msg "\nWaiting for the VPC $vpc_name to become available ..." +COUNTER=0 +while ! [[ $(ibmcloud is vpc $vpc_name --output json | jq -r '.status') == "available" ]]; do + sleep 2 + COUNTER=$((COUNTER + 1)) + if ((COUNTER > 10)); then + echo $(ibmcloud is vpc $vpc_name) + print_error "The VPC does not became ready as expected.\nRun 'ibmcloud is vpc $vpc_name' for further insights" + abortScript + fi +done +echo "VPC '$vpc_name' is now available, now!" + +# +# Create the Public gateway +print_msg "\nCreating the VPC Public gateway '$vpc_name-gateway' ..." +ibmcloud is public-gateway-create $vpc_name-gateway $vpc_name $REGION-1 --resource-group-name $resource_group_name +if [ $? -ne 0 ]; then + print_error "VPC Public gateway creation failed!" + abortScript +fi + +# +# Create the Network ACL +print_msg "\nCreating the VPC Network ACL '$vpc_name-acl' ..." +ibmcloud is network-acl-create $vpc_name-acl $vpc_name --rules '[{ "name": "egress", "action": "allow", "destination": "0.0.0.0/0", "direction": "outbound", "source": "0.0.0.0/0", "protocol": "all" }, { "name": "ingress", "action": "allow", "destination": "0.0.0.0/0", "direction": "inbound", "source": "0.0.0.0/0", "protocol": "all" }]' +if [ $? -ne 0 ]; then + print_error "VPC Network ACL creation failed!" + abortScript +fi + +# +# Create the VPC subnet +print_msg "\nCreating the VPC Subnet '$vpc_name-subnet' ..." +ibmcloud is subnet-create $vpc_name-subnet $vpc_name --zone $REGION-1 --resource-group-name $resource_group_name --ipv4-address-count 16 --pgw $vpc_name-gateway --acl $vpc_name-acl +if [ $? -ne 0 ]; then + print_error "VPC Subnet creation failed!" + abortScript +fi + +# Create the security group and its rules +print_msg "\nCreating the VPC Security group '$vpc_name-group' ..." +ibmcloud is security-group-create $vpc_name-group $vpc_name +if [ $? -ne 0 ]; then + print_error "VPC Security group creation failed!" + abortScript +fi + +print_msg "\nCreating required VPC Security group rules ..." +ibmcloud is security-group-rule-add $vpc_name-group outbound tcp --port-min 443 --port-max 443 --vpc $vpc_name >/dev/null +ibmcloud is security-group-rule-add $vpc_name-group outbound udp --port-min 53 --port-max 53 --vpc $vpc_name >/dev/null +ibmcloud is security-group-rule-add $vpc_name-group outbound tcp --port-min 22 --port-max 22 --vpc $vpc_name >/dev/null +ibmcloud is security-group-rule-add $vpc_name-group outbound icmp --icmp-type 8 --vpc $vpc_name >/dev/null +ibmcloud is security-group-rule-add $vpc_name-group outbound all --remote 166.9.0.0/16 --vpc $vpc_name >/dev/null +# from https://cloud.ibm.com/docs/vpc?topic=vpc-service-endpoints-for-vpc +ibmcloud is security-group-rule-add $vpc_name-group outbound all --remote 161.26.0.0/16 --vpc $vpc_name >/dev/null +ibmcloud is security-group-rule-add $vpc_name-group inbound tcp --port-min 22 --port-max 22 --vpc $vpc_name >/dev/null +ibmcloud is security-group-rule-add $vpc_name-group inbound tcp --port-min 80 --port-max 80 --vpc $vpc_name >/dev/null +ibmcloud is security-group-rule-add $vpc_name-group inbound icmp --icmp-type 8 --vpc $vpc_name >/dev/null +echo "Done" + +print_msg "\nPrinting the VPC Security group '$vpc_name-group' ..." +ibmcloud is security-group $vpc_name-group + +# +# Create the origin server VSI +print_msg "\nCreating the VPC VSI '$vsi_originserver_name', which acts as the origin server ..." +ibmcloud is instance-create $vsi_originserver_name $vpc_name $REGION-1 cx2-2x4 $vpc_name-subnet \ + --image ibm-centos-stream-9-amd64-6 \ + --boot-volume "{\"name\": \"boot-vol-attachment-name\", \"volume\": {\"name\": \"$vsi_originserver_name-boot-vol\", \"capacity\": 100, \"profile\": {\"name\": \"general-purpose\"}}, \"delete_volume_on_instance_delete\": true}" \ + --resource-group-name $resource_group_name \ + --host-failure-policy restart \ + --primary-network-interface "{\"name\": \"eth0\", \"allow_ip_spoofing\": false, \"auto_delete\": true, \"subnet\": {\"name\":\"${vpc_name}-subnet\"}, \"primary_ip\": {\"auto_delete\": true}, \"security_groups\": [{\"name\": \"${vpc_name}-group\"}]}" \ + --user-data @userdata-vsi-originserver.sh \ + --keys "$VPC_SSH_KEY" +if [ $? -ne 0 ]; then + print_error "VPC VSI creation failed!" + abortScript +fi + +print_msg "\nWaiting for the VSI '$vsi_originserver_name' to start ..." +COUNTER=0 +while ! [[ $(ibmcloud is instance $vsi_originserver_name --output json | jq -r '.status') == "running" ]]; do + sleep 2 + COUNTER=$((COUNTER + 1)) + if ((COUNTER > 10)); then + print_error "The VSI does not became ready as expected. Perform 'ibmcloud is instance $vsi_originserver_name' for further details." + abortScript + fi +done +echo "VSI '$vsi_originserver_name' is running, now!" + +# +# Assign the floating IP +print_msg "\nAssigning a VPC Floating IP to the primary network interface of VSI '$vsi_originserver_name' ..." +ibmcloud is floating-ip-reserve $vsi_originserver_name-ip --nic eth0 --in $vsi_originserver_name +if [ $? -ne 0 ]; then + print_error "VPC Floating IP assignment failed!" + abortScript +fi +public_ip_address=$(ibmcloud is instance $vsi_originserver_name --output json | jq -r '.primary_network_interface|.floating_ips|.[0]|.address') +private_ip_address=$(ibmcloud is instance $vsi_originserver_name --output json | jq -r '.primary_network_interface|.primary_ip|.address') + +# +# Verify that the originserver VSI exposes a HTTP server +print_msg "\nWaiting for the VSI '$vsi_originserver_name' to be fully initialized (This can take several minutes) ..." +COUNTER=0 +while ! [[ $(curl -s -o /dev/null -w "%{http_code}" http://$public_ip_address:80) == "200" ]]; do + sleep 10 + COUNTER=$((COUNTER + 1)) + if ((COUNTER > 50)); then + print_error "The VSI does not serve any HTTP traffic on port 80" + abortScript + fi + echo "Checking curl http://$public_ip_address:80 ..." +done +echo "VSI $vsi_originserver_name is fully initialized, now!" + +print_msg "\nVSI serves following payload on endpoint 'http://$public_ip_address:80':" +curl http://$public_ip_address:80 + +if [[ "${DEBUG_MODE}" != true ]]; then + # + # Detaching floating ip address + print_msg "\nDetaching VPC Floating IP '$vsi_originserver_name-ip' from the VSI '$vsi_originserver_name' ..." + ibmcloud is floating-ip-release $vsi_originserver_name-ip --force +fi + + +# +# Create the Private Path Service, the Private Path Load balancer and configure the origin pool +# + +# Create Private Path network load balancer +# see: https://cloud.ibm.com/docs/vpc?topic=vpc-ppnlb-ui-creating-private-path-network-load-balancer&interface=cli +print_msg "\nCreating the VPC Private Path network load balancer '$vpc_name-ppnlb' ..." +ibmcloud is load-balancer-create $vpc_name-ppnlb private-path --family network --subnet $vpc_name-subnet +if [ $? -ne 0 ]; then + print_error "VPC Private Path network load balancer creation failed!" + abortScript +fi + +# Create a LB pool +print_msg "\nCreating the VPC Network load balancer pool '$vpc_name-ppnlb-pg-pool' ..." +ibmcloud is load-balancer-pool-create $vpc_name-ppnlb-pg-pool $vpc_name-ppnlb weighted_round_robin tcp 10 2 5 tcp +if [ $? -ne 0 ]; then + print_error "VPC Network load balancer pool creation failed!" + abortScript +fi + +# Create a LB member +print_msg "\nAdd the VSI '$vsi_originserver_name' as a member to the load balancer pool '$vpc_name-ppnlb-pg-pool' ..." +ibmcloud is load-balancer-pool-member-create $vpc_name-ppnlb $vpc_name-ppnlb-pg-pool 5432 $vsi_originserver_name --weight 70 +if [ $? -ne 0 ]; then + print_error "Adding the VSI '$vsi_originserver_name' as a member to the load balancer pool failed!" + abortScript +fi + +# Obtain the ID of the default backend pool +print_msg "\nObtaining the ID of the default backend pool '$vpc_name-pool' ..." +ppnlb_pg_pool=$(ibmcloud is load-balancer-pool $vpc_name-ppnlb $vpc_name-ppnlb-pg-pool --output JSON) +ppnlb_pg_pool_id=$(echo "$ppnlb_pg_pool" | jq -r '.id') +echo "ppnlb_pg_pool_id: '$ppnlb_pg_pool_id'" + +# Create a LB listener +print_msg "\nCreating the listener for VPC Network load balancer '$vpc_name-ppnlb' ..." +ibmcloud is load-balancer-listener-create $vpc_name-ppnlb --port-min 5432 --port-max 5432 --protocol tcp --default-pool $ppnlb_pg_pool_id +if [ $? -ne 0 ]; then + print_error "VPC Network load balancer front-end listener creation for port 5432 failed!" + abortScript +fi + +# Create the Private Path service +# see: https://cloud.ibm.com/docs/vpc?topic=vpc-private-path-service-about&interface=cli +random_chars=$(openssl rand -hex 6) +print_msg "\nCreating the VPC Private Path service '$vpc_name-pps' for the service endpoint 'api.$random_chars.intra' ..." +pps_service_endpoint="api.ce-$random_chars.intra" +ibmcloud is private-path-service-gateway-create --name $vpc_name-pps --default-access-policy permit --zonal-affinity true --service-endpoints $pps_service_endpoint --load-balancer $vpc_name-ppnlb +if [ $? -ne 0 ]; then + print_error "VPC Private Path service creation failed!" + abortScript +fi + +# Obtain the Private Path service CRN +pps_instance=$(ibmcloud is private-path-service-gateway $vpc_name-pps --output JSON) +pps_instance_crn=$(echo "$pps_instance" | jq -r '.crn') +pps_instance_id=$(echo "$pps_instance" | jq -r '.id') +echo "pps_instance_crn: '$pps_instance_crn', pps_instance_id: '$pps_instance_id'" + +# Publish the Private Path service +# see: https://cloud.ibm.com/docs/vpc?topic=vpc-pps-activating&interface=cli +print_msg "\nPublish VPC Private Path service '$vpc_name-pps' so that it can be accessed from outside of the current account ..." +ibmcloud is private-path-service-gateway-publish $vpc_name-pps +if [ $? -ne 0 ]; then + print_error "Publishing the Private Path service '$vpc_name-pps' failed!" + abortScript +fi + + +# +# Create the Code Engine project +print_msg "\nCreating the Code Engine project '$ce_project_name' ..." +ibmcloud ce project create --name $ce_project_name +if [ $? -ne 0 ]; then + print_error "Code Engine project creation failed!" + abortScript +fi +project_guid=$(ibmcloud ce project current --output json | jq -r '.guid') + +# +# Obtain the kube context of the current project +print_msg "\nObtain the kube context of the Code Engine project '$ce_project_name' ..." +ibmcloud ce project select --name $ce_project_name --kubecfg + +# +# Create the private path integration +ce_vpegatewayconnection_name=friendship-book-api-integration +kubectl apply -f - < 30)); then + kubectl get vpegatewayconnection $ce_vpegatewayconnection_name -o YAML + print_error "The Private Path integration does not became ready as expected. Perform 'kubectl get vpegatewayconnection $ce_vpegatewayconnection_name -o yaml' for further details." + abortScript + fi +done +echo "Private Path integration '$ce_vpegatewayconnection_name' is ready, now!" + +# +# Creating a secret that contains the PostgreSQL credentials +print_msg "\nCreating a Code Engine secret '$ce_db_credentials' to store the database credentials ..." +ibmcloud ce secret create --name $ce_db_credentials --format generic \ + --from-literal PGHOST=$pps_service_endpoint \ + --from-literal PGPORT=5432 \ + --from-literal PGUSER=dbuser \ + --from-literal PGPASSWORD=myPassw0rd! \ + --from-literal PGDATABASE=friendshipdb +if [ $? -ne 0 ]; then + print_error "Code Engine secret creation failed!" + abortScript +fi + +print_msg "\nCreating a Code Engine job '$ce_job_name' that will connect to the database ..." +ibmcloud ce job create --name $ce_job_name \ + --build-source ./ce-job \ + --env-from-secret $ce_db_credentials \ + --memory 0.5G \ + --cpu 0.25 \ + --wait +if [ $? -ne 0 ]; then + print_error "Code Engine job creation failed!" + abortScript +fi + +print_msg "\nPrinting source code of the deployed job:" +cat ce-job/job.mjs +echo "" + +print_msg "\nSubmitting a single job run that starts 10 instances, to store some records in the database ..." +ibmcloud ce jobrun submit --job $ce_job_name --array-size 10 --wait + +print_msg "\nListing submitted job runs..." +ibmcloud ce jobrun list + +print_msg "\nCreating a Code Engine app '$ce_app_name' that retrieve records from the database ..." +ibmcloud ce app create --name $ce_app_name \ + --build-source ./ce-app \ + --env-from-secret $ce_db_credentials \ + --memory 0.5G \ + --cpu 0.25 +if [ $? -ne 0 ]; then + print_error "Code Engine app creation failed!" + abortScript +fi + +ce_app_endpoint=$(ibmcloud ce app get --name $ce_app_name -o url) + +# +# Verifying the end-to-end flow +print_msg "\nInvoking the Code Engine app by using 'curl $ce_app_endpoint'." +print_msg "The app will perform a SQL query towards the database hosted on the origin server and passthrough the result as JSON response payload ..." +curl --silent $ce_app_endpoint | jq + +if [[ $(curl -s -o /dev/null -w "%{http_code}" $ce_app_endpoint) != "200" ]]; then + print_error "Code Engine app could not get invoked properly!" + abortScript +fi + +print_msg "\nBefore cleaning up, this end-to-end sample created the following set of IBM Cloud resources:" +ibmcloud resource service-instances --type all -g $resource_group_name + +if [[ "${CLEANUP_ON_SUCCESS}" == true ]]; then + print_msg "\nCleaning up the created IBM Cloud resources ..." + clean +else + print_msg "\nSkipping deletion of the created IBM Cloud resources. Please be aware that the created resources will occur costs in your account." + echo "$ ibmcloud resource service-instances --type all -g $resource_group_name" + ibmcloud resource service-instances --type all -g $resource_group_name + + print_msg "\nFollowing commands can be used to further play around with the sample setup:" + echo "1. Submit another job run: 'ibmcloud ce jobrun submit --job $ce_job_name --array-size 10'" + echo "2. Invoke the app: 'curl $ce_app_endpoint'" + echo "3. Private Path service configuration https://cloud.ibm.com/infrastructure/network/privatePathServices/${REGION}~${pps_instance_id}/overview" + echo "4. Inspect the Code Engine project setup https://cloud.ibm.com/codeengine/project/$REGION/$project_guid" + echo "5. Tear down the sample setup: './run clean'" +fi + +print_success "\n==========================================" +print_success " SUCCESS" +print_success "==========================================\n" diff --git a/private-path-to-vpc-vsi/userdata-vsi-originserver.sh b/private-path-to-vpc-vsi/userdata-vsi-originserver.sh new file mode 100755 index 00000000..40e254c7 --- /dev/null +++ b/private-path-to-vpc-vsi/userdata-vsi-originserver.sh @@ -0,0 +1,38 @@ +#!/bin/bash +touch /tmp/init_started +# ========================== +# PostgreSQL installation +# ========================== +yum update -y +yum install postgresql-server postgresql-contrib -y +postgresql-setup initdb +systemctl start postgresql +systemctl enable postgresql +echo "host all all 0.0.0.0/0 md5" >> /var/lib/pgsql/data/pg_hba.conf +echo "listen_addresses = '*'" >> /var/lib/pgsql/data/postgresql.conf +sudo systemctl restart postgresql +# ========================== +# PostgreSQL init +# ========================== +useradd dbuser +sudo -i -u postgres bash << EOF +createuser dbuser +createdb friendshipdb -O dbuser +psql -c "ALTER USER dbuser PASSWORD 'myPassw0rd!';" +EOF +touch /tmp/postgresql_done + +# ========================== +# nginx installation +# ========================== +dnf -y update +dnf -y install nginx +rm -f /usr/share/nginx/html/index.html +echo "Hello world from `hostname`" > /usr/share/nginx/html/index.html +chmod go+r /usr/share/nginx/html/index.html +systemctl enable nginx +systemctl start nginx +systemctl status nginx +touch /tmp/nginx_done + +touch /tmp/init_done \ No newline at end of file diff --git a/satellite-connector-to-vpc-vsi/ce-job/Dockerfile b/satellite-connector-to-vpc-vsi/ce-job/Dockerfile index 07d58588..dbe44d0a 100644 --- a/satellite-connector-to-vpc-vsi/ce-job/Dockerfile +++ b/satellite-connector-to-vpc-vsi/ce-job/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/nodejs-20:latest AS build-env +FROM registry.access.redhat.com/ubi9/nodejs-22:latest AS build-env WORKDIR /job # Define which files should be copied into the container image @@ -8,7 +8,7 @@ COPY --chown=default:root *.mjs *.json . RUN npm install # Use a small distroless image for as runtime image -FROM gcr.io/distroless/nodejs20-debian12 +FROM gcr.io/distroless/nodejs22 COPY --from=build-env /job /job WORKDIR /job CMD ["job.mjs"] diff --git a/satellite-connector-to-vpc-vsi/ce-job/package.json b/satellite-connector-to-vpc-vsi/ce-job/package.json index 96845d63..85d6caf1 100644 --- a/satellite-connector-to-vpc-vsi/ce-job/package.json +++ b/satellite-connector-to-vpc-vsi/ce-job/package.json @@ -9,10 +9,10 @@ "license": "Apache-2.0", "dependencies": { "lorem-ipsum": "^2.0.8", - "pg": "^8.11.5" + "pg": "^8.16.3" }, "engines": { - "node": "^20.*", - "npm": "^10.*" + "node": "^22.*", + "npm": "^11.*" } }