diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/domain-resources/domain.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/domain-resources/domain.yaml old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/OAM.json b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/OAM.json old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/agl_jdbc.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/agl_jdbc.yaml old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/domainInfo.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/domainInfo.yaml old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/oam.properties b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/oam.properties old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/resource.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/resource.yaml old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/topology.yaml b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/wdt-artifacts/topology.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/domain-resources/domain.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/domain-resources/domain.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/OIG.json b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/OIG.json old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/agl_jdbc.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/agl_jdbc.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/domainInfo.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/domainInfo.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/oig.properties b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/oig.properties old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/resource.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/resource.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/topology.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/wdt-artifacts/topology.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/oud-storageclass-config.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/oud-storageclass-config.yaml old mode 100644 new mode 100755 diff --git a/docs/23.4.1/404.html b/docs/23.4.1/404.html new file mode 100644 index 000000000..1b3f11ce6 --- /dev/null +++ b/docs/23.4.1/404.html @@ -0,0 +1,57 @@ + + + +
+ + + + +The Enterprise Deployment Automation scripts allow you to deploy the entire Oracle Identity and Access Management suite in a production environment. You can use the scripts to:
+The Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster is a step by step guide that describes how to deploy the entire Oracle Identity and Access Management Suite in a production environment. It incorporates best practices learned over many years to ensure that your Identity and Access Management deployment maintains the highest levels of Availability and Security.
+It includes:
+Additionally, as per Enterprise Deployment Automation, all of the above can be automated using open source scripts.
+ + + + + + + + +The entire Oracle Identity and Access Management Suite can be deployed in a production environment. See the following sections:
+ + + + + + + +Oracle supports the deployment of the following Oracle Identity Management products on Kubernetes. Click on the appropriate document link below to get started on configuring the product.
+Please note the following:
+The individual product guides below for Oracle Access Management, Oracle Identity Governance, Oracle Unified Directory, and Oracle Unified Directory Services Manager, are for configuring that product on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For example, if you are deploying Oracle Access Management (OAM) only, then you can follow the Oracle Access Management guide. If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing one product, such as OAM for example.
+The individual product guides do not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor. If you need to understand how to configure a Kubernetes cluster ready for an Oracle Identity Management deployment, you should follow the Enterprise Deployment Guide in Enterprise Deployments.
+The Enterprise Deployment Automation section also contains details on automation scripts that can:
+The complete Oracle Identity Management suite can be deployed in a production environment
+ + + + + + + + + + + + +The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).
+ + + + + + + + + + + + +The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).
+ + + + + + + + + + + + +Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management
+ + + + + + + + + + + + +Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management
+ + + + + + + + + + + + +Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory
+ + + + + + + + +The instructions below explain how to set up NGINX as an ingress for the OAM domain with SSL termination.
+Note: All the steps below should be performed on the master node.
+Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.
+If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:
+$ mkdir <workdir>/ssl
+$ cd <workdir>/ssl
+$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=<nginx-hostname>"
+
For example:
+$ mkdir /scratch/OAMK8S/ssl
+$ cd /scratch/OAMK8S/ssl
+$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com"
+
Note: The CN
should match the host.domain of the master node in order to prevent hostname problems during certificate verification.
The output will look similar to the following:
+Generating a 2048 bit RSA private key
+..........................................+++
+.......................................................................................................+++
+writing new private key to 'tls.key'
+-----
+
Create a secret for SSL by running the following command:
+$ kubectl -n oamns create secret tls <domain_uid>-tls-cert --key <workdir>/tls.key --cert <workdir>/tls.crt
+
For example:
+$ kubectl -n oamns create secret tls accessdomain-tls-cert --key /scratch/OAMK8S/ssl/tls.key --cert /scratch/OAMK8S/ssl/tls.crt
+
The output will look similar to the following:
+secret/accessdomain-tls-cert created
+
Use helm to install NGINX.
+Add the helm chart repository for NGINX using the following command:
+$ helm repo add stable https://kubernetes.github.io/ingress-nginx
+
The output will look similar to the following:
+"stable" has been added to your repositories
+
Update the repository using the following command:
+$ helm repo update
+
The output will look similar to the following:
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "stable" chart repository
+Update Complete. ⎈ Happy Helming!⎈
+
If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort
parameter.
If you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer
parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.
To install NGINX use the following helm command depending on if you are using NodePort
or LoadBalancer
:
a) Using NodePort
+$ helm install nginx-ingress -n <domain_namespace> --set controller.extraArgs.default-ssl-certificate=<domain_namespace>/<ssl_secret> --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+
For example:
+$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+
The output will look similar to the following:
+NAME: nginx-ingress
+LAST DEPLOYED: <DATE>
+
+NAMESPACE: oamns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+The nginx-ingress controller has been installed.
+Get the application URL by running these commands:
+ export HTTP_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-controller)
+ export HTTPS_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-controller)
+ export NODE_IP=$(kubectl --namespace oamns get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
+
+ echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
+ echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
+
+An example Ingress that makes use of the controller:
+
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ name: example
+ namespace: foo
+ spec:
+ ingressClassName: example-class
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: exampleService
+ port: 80
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
+
b) Using LoadBalancer
+$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+
The output will look similar to the following:
+$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+
+NAME: nginx-ingress
+LAST DEPLOYED: <DATE>
+NAMESPACE: nginxssl
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+The ingress-nginx controller has been installed.
+It may take a few minutes for the LoadBalancer IP to be available.
+You can watch the status by running 'kubectl --namespace oamns get services -o wide -w nginx-ingress-ingress-nginx-controller'
+
+An example Ingress that makes use of the controller:
+
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ name: example
+ namespace: foo
+ spec:
+ ingressClassName: example-class
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: exampleService
+ port: 80
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
+
Navigate to the following directory:
+$ cd $WORKDIR/kubernetes/charts/ingress-per-domain
+
Edit the values.yaml
and change the domainUID:
parameter to match your domainUID
, for example domainUID: accessdomain
. The file should look as follows:
# Load balancer type. Supported values are: NGINX
+type: NGINX
+
+# Type of Configuration Supported Values are : SSL and NONSSL
+sslType: SSL
+
+# domainType. Supported values are: oam
+domainType: oam
+
+
+#WLS domain as backend to the load balancer
+wlsDomain:
+ domainUID: accessdomain
+ adminServerName: AdminServer
+ adminServerPort: 7001
+ adminServerSSLPort:
+ oamClusterName: oam_cluster
+ oamManagedServerPort: 14100
+ oamManagedServerSSLPort:
+ policyClusterName: policy_cluster
+ policyManagedServerPort: 15100
+ policyManagedServerSSLPort:
+
+# Host specific values
+hostName:
+ enabled: false
+ admin:
+ runtime:
+
Run the following helm command to install the ingress:
+$ cd $WORKDIR
+$ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace <domain_namespace> --values kubernetes/charts/ingress-per-domain/values.yaml
+
For example:
+$ cd $WORKDIR
+$ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml
+
The output will look similar to the following:
+NAME: oam-nginx
+LAST DEPLOYED: <DATE>
+NAMESPACE: oamns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+
Run the following command to show the ingress is created successfully:
+$ kubectl get ing -n <domain_namespace>
+
For example:
+$ kubectl get ing -n oamns
+
The output will look similar to the following:
+NAME CLASS HOSTS ADDRESS PORTS AGE
+accessdomain-nginx <none> * 80 5s
+
Find the node port of NGINX using the following command:
+$ kubectl --namespace <domain_namespace> get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller
+
For example:
+$ kubectl --namespace oamns get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller
+
The output will look similar to the following:
+31051
+
Run the following command to check the ingress:
+$ kubectl describe ing <domainUID>-nginx -n <domain_namespace>
+
For example:
+$ kubectl describe ing accessdomain-nginx -n oamns
+
The output will look similar to the following:
+Name: accessdomain-nginx
+Namespace: oamns
+Address: 10.106.70.55
+Ingress Class: <none>
+Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
+Rules:
+ Host Path Backends
+ ---- ---- --------
+ *
+ /console accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /consolehelp accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /rreg/rreg accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /em accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /oamconsole accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /dms accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /oam/services/rest accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /iam/admin/config accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /iam/admin/diag accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100)
+ /oam/admin/api accessdomain-adminserver:7001 (10.244.1.18:7001)
+ /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100)
+ /access accessdomain-cluster-policy-cluster:15100 (10.244.1.19:15100,10.244.2.12:15100)
+ / accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100)
+Annotations: kubernetes.io/ingress.class: nginx
+ meta.helm.sh/release-name: oam-nginx
+ meta.helm.sh/release-namespace: oamns
+ nginx.ingress.kubernetes.io/configuration-snippet:
+ more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL";
+ more_set_input_headers "X-Forwarded-Proto: https";
+ more_set_input_headers "WL-Proxy-SSL: true";
+ nginx.ingress.kubernetes.io/enable-access-log: false
+ nginx.ingress.kubernetes.io/ingress.allow-http: false
+ nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Sync 14m (x2 over 15m) nginx-ingress-controller Scheduled for sync
+
To confirm that the new ingress is successfully routing to the domain’s server pods, run the following command to send a request to the URL for the ‘WebLogic ReadyApp framework’:
+$ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready
+
For example:
+a) For NodePort
+$ curl -v -k https://masternode.example.com:31051/weblogic/ready
+
b) For LoadBalancer:
+$ curl -v -k https://loadbalancer.example.com/weblogic/ready
+
The output will look similar to the following:
+* Trying 12.345.67.89...
+* Connected to 12.345.67.89 (12.345.67.89) port 31051 (#0)
+* Initializing NSS with certpath: sql:/etc/pki/nssdb
+* skipping SSL peer certificate verification
+* SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+* Server certificate:
+* subject: CN=masternode.example.com
+* start date: <DATE>
+* expire date: <DATE>
+* common name: masternode.example.com
+* issuer: CN=masternode.example.com
+> GET /weblogic/ready HTTP/1.1
+> User-Agent: curl/7.29.0
+> Host: masternode.example.com:31051
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Date: Mon, 12 Jul 2021 15:06:12 GMT
+< Content-Length: 0
+< Connection: keep-alive
+< Strict-Transport-Security: max-age=15724800; includeSubDomains
+<
+* Connection #0 to host 12.345.67.89 left intact
+
After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31051) as per Validate Domain URLs
+ + + + + + + + +The OAM deployment scripts demonstrate the creation of an OAM domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.
+Before you begin, perform the following steps:
+The sample scripts for Oracle Access Management domain deployment are available at $WORKDIR/kubernetes/create-access-domain
.
Make a copy of the create-domain-inputs.yaml
file:
$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv
+$ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig
+
Edit the create-domain-inputs.yaml
and modify the following parameters. Save the file when complete:
domainUID: <domain_uid>
+domainHome: /u01/oracle/user_projects/domains/<domain_uid>
+image: <image_name>:<tag>
+imagePullSecretName: <container_registry_secret>
+weblogicCredentialsSecretName: <kubernetes_domain_secret>
+logHome: /u01/oracle/user_projects/domains/logs/<domain_uid>
+namespace: <domain_namespace>
+persistentVolumeClaimName: <pvc_name>
+rcuSchemaPrefix: <rcu_prefix>
+rcuDatabaseURL: <rcu_db_host>:<rcu_db_port>/<rcu_db_service_name>
+rcuCredentialsSecret: <kubernetes_rcu_secret>
+
For example:
+domainUID: accessdomain
+domainHome: /u01/oracle/user_projects/domains/accessdomain
+image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-<October`23>
+imagePullSecretName: orclcred
+weblogicCredentialsSecretName: accessdomain-credentials
+logHome: /u01/oracle/user_projects/domains/logs/accessdomain
+namespace: oamns
+persistentVolumeClaimName: accessdomain-domain-pvc
+rcuSchemaPrefix: OAMK8S
+rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com
+rcuCredentialsSecret: accessdomain-rcu-credentials
+
A full list of parameters in the create-domain-inputs.yaml
file are shown below:
Parameter | +Definition | +Default | +
---|---|---|
adminPort |
+Port number for the Administration Server inside the Kubernetes cluster. | +7001 |
+
adminNodePort |
+Port number of the Administration Server outside the Kubernetes cluster. | +30701 |
+
adminServerName |
+Name of the Administration Server. | +AdminServer |
+
clusterName |
+Name of the WebLogic cluster instance to generate for the domain. By default the cluster name is oam_cluster for the OAM domain. |
+oam_cluster |
+
configuredManagedServerCount |
+Number of Managed Server instances to generate for the domain. | +5 |
+
createDomainFilesDir |
+Directory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst , and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt , and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath , so that the Kubernetes pod can use the scripts and supporting files to create a domain home. |
+wlst |
+
createDomainScriptsMountPath |
+Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. |
+/u01/weblogic |
+
createDomainScriptName |
+Script that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run. |
+create-domain-job.sh |
+
domainHome |
+Home directory of the OAM domain. If not specified, the value is derived from the domainUID as /shared/domains/<domainUID> . |
+/u01/oracle/user_projects/domains/accessdomain |
+
domainPVMountPath |
+Mount path of the domain persistent volume. | +/u01/oracle/user_projects/domains |
+
domainUID |
+Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. | +accessdomain |
+
domainType |
+Type of the domain. Mandatory input for OAM domains. You must provide one of the supported domain type value: oam (deploys an OAM domain) |
+oam |
+
exposeAdminNodePort |
+Boolean indicating if the Administration Server is exposed outside of the Kubernetes cluster. | +false |
+
exposeAdminT3Channel |
+Boolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. | +true |
+
image |
+OAM container image. The operator requires OAM 12.2.1.4. Refer to Obtain the OAM container image for details on how to obtain or create the image. | +oracle/oam:12.2.1.4.0 |
+
imagePullPolicy |
+WebLogic container image pull policy. Legal values are IfNotPresent , Always , or Never |
+IfNotPresent |
+
imagePullSecretName |
+Name of the Kubernetes secret to access the container registry to pull the OAM container image. The presence of the secret will be validated when this parameter is specified. | ++ |
includeServerOutInPodLog |
+Boolean indicating whether to include the server .out to the pod’s stdout. | +true |
+
initialManagedServerReplicas |
+Number of Managed Servers to initially start for the domain. | +2 |
+
javaOptions |
+Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME) , $(DOMAIN_HOME) , $(ADMIN_NAME) , $(ADMIN_PORT) , and $(SERVER_NAME) . |
+-Dweblogic.StdoutDebugEnabled=false |
+
logHome |
+The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/<domainUID> . |
+/u01/oracle/user_projects/domains/logs/accessdomain |
+
managedServerNameBase |
+Base string used to generate Managed Server names. | +oam_server |
+
managedServerPort |
+Port number for each Managed Server. | +8001 |
+
namespace |
+Kubernetes namespace in which to create the domain. | +accessns |
+
persistentVolumeClaimName |
+Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as <domainUID>-weblogic-sample-pvc . |
+accessdomain-domain-pvc |
+
productionModeEnabled |
+Boolean indicating if production mode is enabled for the domain. | +true |
+
serverStartPolicy |
+Determines which WebLogic Server instances will be started. Legal values are Never , IfNeeded , AdminOnly . |
+IfNeeded |
+
t3ChannelPort |
+Port for the T3 channel of the NetworkAccessPoint. | +30012 |
+
t3PublicAddress |
+Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. | +If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster | +
weblogicCredentialsSecretName |
+Name of the Kubernetes secret for the Administration Server’s user name and password. If not specified, then the value is derived from the domainUID as <domainUID>-weblogic-credentials . |
+accessdomain-domain-credentials |
+
weblogicImagePullSecretName |
+Name of the Kubernetes secret for the container registry, used to pull the WebLogic Server image. | ++ |
serverPodCpuRequest , serverPodMemoryRequest , serverPodCpuCLimit , serverPodMemoryLimit |
+The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. |
+Resource requests and resource limits are not specified. | +
rcuSchemaPrefix |
+The schema prefix to use in the database, for example OAM1 . You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. |
+OAM1 |
+
rcuDatabaseURL |
+The database URL. | +oracle-db.default.svc.cluster.local:1521/devpdb.k8s |
+
rcuCredentialsSecret |
+The Kubernetes secret containing the database credentials. | +accessdomain-rcu-credentials |
+
datasourceType |
+Type of JDBC datasource applicable for the OAM domain. Legal values are agl and generic . Choose agl for Active GridLink datasource and generic for Generic datasource. For enterprise deployments, Oracle recommends that you use GridLink data sources to connect to Oracle RAC databases. See the Enterprise Deployment Guide for further details. |
+generic |
+
Note that the names of the Kubernetes resources in the generated YAML files may be formed with the
+value of some of the properties specified in the create-inputs.yaml
file. Those properties include
+the adminServerName
, clusterName
and managedServerNameBase
. If those values contain any
+characters that are invalid in a Kubernetes service name, those characters are converted to
+valid values in the generated YAML files. For example, an uppercase letter is converted to a
+lowercase letter and an underscore ("_")
is converted to a hyphen ("-")
.
The sample demonstrates how to create an OAM domain home and associated Kubernetes resources for a domain +that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts +to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.
+Run the create domain script, specifying your inputs file and an output directory to store the +generated artifacts:
+$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv
+$ ./create-domain.sh -i create-domain-inputs.yaml -o /<path to output-directory>
+
For example:
+$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv
+$ ./create-domain.sh -i create-domain-inputs.yaml -o output
+
The output will look similar to the following:
+Input parameters being used
+export version="create-weblogic-sample-domain-inputs-v1"
+export adminPort="7001"
+export adminServerName="AdminServer"
+export domainUID="accessdomain"
+export domainType="oam"
+export domainHome="/u01/oracle/user_projects/domains/accessdomain"
+export serverStartPolicy="IfNeeded"
+export clusterName="oam_cluster"
+export configuredManagedServerCount="5"
+export initialManagedServerReplicas="2"
+export managedServerNameBase="oam_server"
+export managedServerPort="14100"
+export image="container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-<October`23>"
+export imagePullPolicy="IfNotPresent"
+export imagePullSecretName="orclcred"
+export productionModeEnabled="true"
+export weblogicCredentialsSecretName="accessdomain-credentials"
+export includeServerOutInPodLog="true"
+export logHome="/u01/oracle/user_projects/domains/logs/accessdomain"
+export httpAccessLogInLogHome="true"
+export t3ChannelPort="30012"
+export exposeAdminT3Channel="false"
+export adminNodePort="30701"
+export exposeAdminNodePort="false"
+export namespace="oamns"
+javaOptions=-Dweblogic.StdoutDebugEnabled=false
+export persistentVolumeClaimName="accessdomain-domain-pvc"
+export domainPVMountPath="/u01/oracle/user_projects/domains"
+export createDomainScriptsMountPath="/u01/weblogic"
+export createDomainScriptName="create-domain-job.sh"
+export createDomainFilesDir="wlst"
+export rcuSchemaPrefix="OAMK8S"
+export rcuDatabaseURL="mydatabasehost.example.com:1521/orcl.example.com"
+export rcuCredentialsSecret="accessdomain-rcu-credentials"
+export datasourceType="generic"
+
+validateWlsDomainName called with accessdomain
+createFiles - valuesInputFile is create-domain-inputs.yaml
+createDomainScriptName is create-domain-job.sh
+Generating output/weblogic-domains/accessdomain/create-domain-job.yaml
+Generating output/weblogic-domains/accessdomain/delete-domain-job.yaml
+Generating output/weblogic-domains/accessdomain/domain.yaml
+Checking to see if the secret accessdomain-credentials exists in namespace oamns
+configmap/accessdomain-create-oam-infra-domain-job-cm created
+Checking the configmap accessdomain-create-oam-infra-domain-job-cm was created
+configmap/accessdomain-create-oam-infra-domain-job-cm labeled
+Checking if object type job with name accessdomain-create-oam-infra-domain-job exists
+No resources found in oamns namespace.
+Creating the domain by creating the job output/weblogic-domains/accessdomain/create-domain-job.yaml
+job.batch/accessdomain-create-oam-infra-domain-job created
+Waiting for the job to complete...
+status on iteration 1 of 20
+pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
+status on iteration 2 of 20
+pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
+status on iteration 3 of 20
+pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
+status on iteration 4 of 20
+pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
+status on iteration 5 of 20
+pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running
+status on iteration 6 of 20
+pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Completed
+
+Domain accessdomain was created and will be started by the WebLogic Kubernetes Operator
+
+The following files were generated:
+ output/weblogic-domains/accessdomain/create-domain-inputs.yaml
+ output/weblogic-domains/accessdomain/create-domain-job.yaml
+ output/weblogic-domains/accessdomain/domain.yaml
+
Note: If the domain creation fails, refer to the Troubleshooting section.
+The command creates a domain.yaml
file required for domain creation.
By default, the java memory parameters assigned to the oam_server cluster are very small. The minimum recommended values are -Xms4096m -Xmx8192m
. However, Oracle recommends you to set these to -Xms8192m -Xmx8192m
in a production environment.
Navigate to the /output/weblogic-domains/<domain_uid>
directory:
$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/<domain_uid>
+
For example:
+$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain
+
Edit the domain.yaml
file and inside name: accessdomain-oam-cluster
, add the memory setting as below:
serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
+ resources:
+ limits:
+ cpu: "2"
+ memory: "8Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+
For example:
+apiVersion: weblogic.oracle/v1
+kind: Cluster
+metadata:
+ name: accessdomain-oam-cluster
+ namespace: oamns
+spec:
+ clusterName: oam_cluster
+ serverService:
+ precreateService: true
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
+ resources:
+ limits:
+ cpu: "2"
+ memory: "8Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+ replicas: 1
+
+
Note: The above CPU and memory values are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.
+Note: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An “m
” suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.
Note: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.
+Note: If required you can also set the same resources and limits for the accessdomain-policy-cluster
.
In the domain.yaml
locate the section of the file starting with adminServer:
. Under the env:
tag add the following CLASSPATH
entries. This is required for running the idmconfigtool
from the Administration Server.
- name: CLASSPATH
+ value: "/u01/oracle/wlserver/server/lib/weblogic.jar"
+
For example:
+# adminServer is used to configure the desired behavior for starting the administration server.
+adminServer:
+ # adminService:
+ # channels:
+ # The Admin Server's NodePort
+ # - channelName: default
+ # nodePort: 30701
+ # Uncomment to export the T3Channel as a service
+ # - channelName: T3Channel
+ serverPod:
+ # an (optional) list of environment variable to be set on the admin servers
+ env:
+ - name: USER_MEM_ARGS
+ value: "-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m "
+ - name: CLASSPATH
+ value: "/u01/oracle/wlserver/server/lib/weblogic.jar"
+
If required, you can add the optional parameter maxClusterConcurrentStartup
to the spec
section of the domain.yaml
. This parameter specifies the number of managed servers to be started in sequence per cluster. For example if you updated the initialManagedServerReplicas
to 4
in create-domain-inputs.yaml
and only had 2 nodes, then setting maxClusterConcurrentStartup: 1
will start one managed server at a time on each node, rather than starting them all at once. This can be useful to take the strain off individual nodes at startup. Below is an example with the parameter added:
apiVersion: "weblogic.oracle/v9"
+kind: Domain
+metadata:
+ name: accessdomain
+ namespace: oamns
+ labels:
+ weblogic.domainUID: accessdomain
+spec:
+ # The WebLogic Domain Home
+ domainHome: /u01/oracle/user_projects/domains/accessdomain
+ maxClusterConcurrentStartup: 1
+
+ # The domain home source type
+ # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image
+ domainHomeSourceType: PersistentVolume
+ ....
+
Save the changes to domain.yaml
Create the Kubernetes resource using the following command:
+$ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/<domain_uid>/domain.yaml
+
For example:
+$ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain/domain.yaml
+
The output will look similar to the following:
+domain.weblogic.oracle/accessdomain created
+cluster.weblogic.oracle/accessdomain-oam-cluster created
+cluster.weblogic.oracle/accessdomain-policy-cluster created
+
Verify the domain, servers pods and services are created and in the READY
state with a status of 1/1
, by running the following command:
$ kubectl get all,domains -n <domain_namespace>
+
For example:
+$ kubectl get all,domains -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+pod/accessdomain-adminserver 1/1 Running 0 11m
+pod/accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 18m
+pod/accessdomain-oam-policy-mgr1 1/1 Running 0 3m31s
+pod/accessdomain-oam-server1 1/1 Running 0 3m31s
+pod/helper 1/1 Running 0 33m
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/accessdomain-adminserver ClusterIP None <none> 7001/TCP 11m
+service/accessdomain-cluster-oam-cluster ClusterIP 10.101.59.154 <none> 14100/TCP 3m31s
+service/accessdomain-cluster-policy-cluster ClusterIP 10.98.236.51 <none> 15100/TCP 3m31s
+service/accessdomain-oam-policy-mgr1 ClusterIP None <none> 15100/TCP 3m31s
+service/accessdomain-oam-policy-mgr2 ClusterIP 10.104.92.12 <none> 15100/TCP 3m31s
+service/accessdomain-oam-policy-mgr3 ClusterIP 10.96.244.37 <none> 15100/TCP 3m31s
+service/accessdomain-oam-policy-mgr4 ClusterIP 10.105.201.23 <none> 15100/TCP 3m31s
+service/accessdomain-oam-policy-mgr5 ClusterIP 10.110.12.227 <none> 15100/TCP 3m31s
+service/accessdomain-oam-server1 ClusterIP None <none> 14100/TCP 3m31s
+service/accessdomain-oam-server2 ClusterIP 10.96.137.33 <none> 14100/TCP 3m31s
+service/accessdomain-oam-server3 ClusterIP 10.103.178.35 <none> 14100/TCP 3m31s
+service/accessdomain-oam-server4 ClusterIP 10.97.254.78 <none> 14100/TCP 3m31s
+service/accessdomain-oam-server5 ClusterIP 10.105.65.104 <none> 14100/TCP 3m31s
+
+NAME COMPLETIONS DURATION AGE
+job.batch/accessdomain-create-oam-infra-domain-job 1/1 2m6s 18m
+
+NAME AGE
+domain.weblogic.oracle/accessdomain 12m
+
+NAME AGE
+cluster.weblogic.oracle/accessdomain-oam-cluster 11m
+cluster.weblogic.oracle/accessdomain-policy-cluster 11m
+
Note: It will take several minutes before all the services listed above show. When a pod has a STATUS
of 0/1
the pod is started but the OAM server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:
$ kubectl logs accessdomain-adminserver -n oamns
+$ kubectl logs accessdomain-oam-policy-mgr1 -n oamns
+$ kubectl logs accessdomain-oam-server1 -n oamns
+etc..
+
The default domain created by the script has the following characteristics:
+AdminServer
listening on port 7001
.oam_cluster
of size 5.policy_cluster
of size 5.oam_server1
, listening on port 14100
.oam-policy-mgr1
, listening on port 15100
.<persistent_volume>/logs/<domainUID>
.Run the following command to describe the domain:
+$ kubectl describe domain <domain_uid> -n <domain_namespace>
+
For example:
+$ kubectl describe domain accessdomain -n oamns
+
The output will look similar to the following:
+Name: accessdomain
+Namespace: oamns
+Labels: weblogic.domainUID=accessdomain
+Annotations: <none>
+API Version: weblogic.oracle/v9
+Kind: Domain
+Metadata:
+ Creation Timestamp: <DATE>
+ Generation: 1
+ Managed Fields:
+ API Version: weblogic.oracle/v9
+ Fields Type: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ .:
+ f:kubectl.kubernetes.io/last-applied-configuration:
+ f:labels:
+ .:
+ f:weblogic.domainUID:
+ f:spec:
+ .:
+ f:adminServer:
+ .:
+ f:adminChannelPortForwardingEnabled:
+ f:serverPod:
+ .:
+ f:env:
+ f:serverStartPolicy:
+ f:clusters:
+ f:dataHome:
+ f:domainHome:
+ f:domainHomeSourceType:
+ f:failureRetryIntervalSeconds:
+ f:failureRetryLimitMinutes:
+ f:httpAccessLogInLogHome:
+ f:image:
+ f:imagePullPolicy:
+ f:imagePullSecrets:
+ f:includeServerOutInPodLog:
+ f:logHome:
+ f:logHomeEnabled:
+ f:logHomeLayout:
+ f:maxClusterConcurrentShutdown:
+ f:maxClusterConcurrentStartup:
+ f:maxClusterUnavailable:
+ f:replicas:
+ f:serverPod:
+ .:
+ f:env:
+ f:volumeMounts:
+ f:volumes:
+ f:serverStartPolicy:
+ f:webLogicCredentialsSecret:
+ .:
+ f:name:
+ Manager: kubectl-client-side-apply
+ Operation: Update
+ Time: <DATE>
+ API Version: weblogic.oracle/v9
+ Fields Type: FieldsV1
+ fieldsV1:
+ f:status:
+ .:
+ f:clusters:
+ f:conditions:
+ f:observedGeneration:
+ f:servers:
+ f:startTime:
+ Manager: Kubernetes Java Client
+ Operation: Update
+ Subresource: status
+ Time: <DATE>
+ Resource Version: 2074089
+ UID: e194d483-7383-4359-adb9-bf97de36518b
+Spec:
+ Admin Server:
+ Admin Channel Port Forwarding Enabled: true
+ Server Pod:
+ Env:
+ Name: USER_MEM_ARGS
+ Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m
+ Name: CLASSPATH
+ Value: /u01/oracle/wlserver/server/lib/weblogic.jar
+ Server Start Policy: IfNeeded
+ Clusters:
+ Name: accessdomain-oam-cluster
+ Name: accessdomain-policy-cluster
+ Data Home:
+ Domain Home: /u01/oracle/user_projects/domains/accessdomain
+ Domain Home Source Type: PersistentVolume
+ Failure Retry Interval Seconds: 120
+ Failure Retry Limit Minutes: 1440
+ Http Access Log In Log Home: true
+ Image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-<October'23>
+ Image Pull Policy: IfNotPresent
+ Image Pull Secrets:
+ Name: orclcred
+ Include Server Out In Pod Log: true
+ Log Home: /u01/oracle/user_projects/domains/logs/accessdomain
+ Log Home Enabled: true
+ Log Home Layout: ByServers
+ Max Cluster Concurrent Shutdown: 1
+ Max Cluster Concurrent Startup: 0
+ Max Cluster Unavailable: 1
+ Replicas: 1
+ Server Pod:
+ Env:
+ Name: JAVA_OPTIONS
+ Value: -Dweblogic.StdoutDebugEnabled=false
+ Name: USER_MEM_ARGS
+ Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m
+ Volume Mounts:
+ Mount Path: /u01/oracle/user_projects/domains
+ Name: weblogic-domain-storage-volume
+ Volumes:
+ Name: weblogic-domain-storage-volume
+ Persistent Volume Claim:
+ Claim Name: accessdomain-domain-pvc
+ Server Start Policy: IfNeeded
+ Web Logic Credentials Secret:
+ Name: accessdomain-credentials
+Status:
+ Clusters:
+ Cluster Name: oam_cluster
+ Conditions:
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Available
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Completed
+ Label Selector: weblogic.domainUID=accessdomain,weblogic.clusterName=oam_cluster
+ Maximum Replicas: 5
+ Minimum Replicas: 0
+ Observed Generation: 1
+ Ready Replicas: 1
+ Replicas: 1
+ Replicas Goal: 1
+ Cluster Name: policy_cluster
+ Conditions:
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Available
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Completed
+ Label Selector: weblogic.domainUID=accessdomain,weblogic.clusterName=policy_cluster
+ Maximum Replicas: 5
+ Minimum Replicas: 0
+ Observed Generation: 1
+ Ready Replicas: 1
+ Replicas: 1
+ Replicas Goal: 1
+ Conditions:
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Available
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Completed
+ Observed Generation: 1
+ Servers:
+ Health:
+ Activation Time: <DATE>
+ Overall Health: ok
+ Subsystems:
+ Subsystem Name: ServerRuntime
+ Symptoms:
+ Node Name: worker-node2
+ Pod Phase: Running
+ Pod Ready: True
+ Server Name: AdminServer
+ State: RUNNING
+ State Goal: RUNNING
+ Cluster Name: oam_cluster
+ Health:
+ Activation Time: <DATE>
+ Overall Health: ok
+ Subsystems:
+ Subsystem Name: ServerRuntime
+ Symptoms:
+ Node Name: worker-node1
+ Pod Phase: Running
+ Pod Ready: True
+ Server Name: oam_server1
+ State: RUNNING
+ State Goal: RUNNING
+ Cluster Name: oam_cluster
+ Server Name: oam_server2
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: oam_cluster
+ Server Name: oam_server3
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: oam_cluster
+ Server Name: oam_server4
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: oam_cluster
+ Server Name: oam_server5
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: policy_cluster
+ Health:
+ Activation Time: <DATE>
+ Overall Health: ok
+ Subsystems:
+ Subsystem Name: ServerRuntime
+ Symptoms:
+ Node Name: worker-node1
+ Pod Phase: Running
+ Pod Ready: True
+ Server Name: oam_policy_mgr1
+ State: RUNNING
+ State Goal: RUNNING
+ Cluster Name: policy_cluster
+ Server Name: oam_policy_mgr2
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: policy_cluster
+ Server Name: oam_policy_mgr3
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: policy_cluster
+ Server Name: oam_policy_mgr4
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: policy_cluster
+ Server Name: oam_policy_mgr5
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Start Time: <DATE>
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Created 15m weblogic.operator Domain accessdomain was created.
+ Normal Available 2m56s weblogic.operator Domain accessdomain is available: a sufficient number of its servers have reached the ready state.
+ Normal Completed 2m56s weblogic.operator Domain accessdomain is complete because all of the following are true: there is no failure detected, there are no pending server shutdowns, and all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version.
+
In the Status
section of the output, the available servers and clusters are listed.
Run the following command to see the pods running the servers and which nodes they are running on:
+$ kubectl get pods -n <domain_namespace> -o wide
+
For example:
+$ kubectl get pods -n oamns -o wide
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+accessdomain-adminserver 1/1 Running 0 18m 10.244.6.63 10.250.42.252 <none> <none>
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 25m 10.244.6.61 10.250.42.252 <none> <none>
+accessdomain-oam-policy-mgr1 1/1 Running 0 10m 10.244.5.13 10.250.42.255 <none> <none>
+accessdomain-oam-server1 1/1 Running 0 10m 10.244.5.12 10.250.42.255 <none> <none>
+helper 1/1 Running 0 40m 10.244.6.60 10.250.42.252 <none> <none>
+
You are now ready to configure an Ingress to direct traffic for your OAM domain as per Configure an Ingress for an OAM domain.
+As described in Prepare Your Environment you can create your own OAM container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Access Management image for production deployments.
+Using the WebLogic Image Tool, you can create a new Oracle Access Management image with PSU’s and interim patches or update an existing image with one or more interim patches.
+++Recommendations:
++
+- Use create for creating a new Oracle Access Management image containing the Oracle Access Management binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OAM patches because it optimizes the size of the image.
+- Use update for patching an existing Oracle Access Management image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
+
Verify that your environment meets the following prerequisites:
+To set up the WebLogic Image Tool:
+Create a working directory and change to it:
+$ mdir <workdir>
+$ cd <workdir>
+
For example:
+$ mkdir /scratch/imagetool-setup
+$ cd /scratch/imagetool-setup
+
Download the latest version of the WebLogic Image Tool from the releases page.
+$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip
+
where X.X.X is the latest release referenced on the releases page.
+Unzip the release ZIP file in the imagetool-setup
directory.
$ unzip imagetool.zip
+
Execute the following commands to set up the WebLogic Image Tool:
+$ cd <workdir>/imagetool-setup/imagetool/bin
+$ source setup.sh
+
For example:
+$ cd /scratch/imagetool-setup/imagetool/bin
+$ source setup.sh
+
To validate the setup of the WebLogic Image Tool:
+Enter the following command to retrieve the version of the WebLogic Image Tool:
+$ imagetool --version
+
Enter imagetool
then press the Tab key to display the available imagetool
commands:
$ imagetool <TAB>
+cache create help rebase update
+
The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp
, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR
:
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache
directory. Under this directory, the lookup information is stored in the .metadata
file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR
:
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Creating an Oracle Access Management container image using the WebLogic Image Tool requires additional container scripts for Oracle Access Management domains.
+Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO
:
$ cd <workdir>/imagetool-setup
+$ git clone https://github.com/oracle/docker-images.git
+
For example:
+$ cd /scratch/imagetool-setup
+$ git clone https://github.com/oracle/docker-images.git
+
++Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.
+
After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create
a new Oracle Access Management image.
You must download the required Oracle Access Management installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.
+The installation binaries and patches required are:
+Oracle Identity and Access Management 12.2.1.4.0
+Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0
+OAM and FMW Infrastructure Patches:
+Container Image Download/Patch Details
section, locate the Oracle Access Management (OAM)
table. For the latest PSU click the README
link in the Documentation
column. In the README, locate the “Installed Software” section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support.Oracle JDK v8
+The following files in the code repository location <imagetool-setup-location>/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0
are used for creating the image:
additionalBuildCmds.txt
buildArgs
Edit the <workdir>/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs
file and change %DOCKER_REPO%
, %JDK_VERSION%
and %BUILDTAG%
appropriately.
For example:
+create
+--jdkVersion=8u301
+--type oam
+--version=12.2.1.4.0
+--tag=oam-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/addtionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts
+
Edit the <workdir>/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file
and under the GENERIC section add the line INSTALL_TYPE="Fusion Middleware Infrastructure”. For example:
[GENERIC]
+INSTALL_TYPE="Fusion Middleware Infrastructure"
+DECLINE_SECURITY_UPDATES=true
+SECURITY_UPDATES_VIA_MYORACLESUPPORT=false
+
Add a JDK package to the WebLogic Image Tool cache. For example:
+$ imagetool cache addInstaller --type jdk --version 8uXXX --path <download location>/jdk-8uXXX-linux-x64.tar.gz
+
where XXX
is the JDK version downloaded
Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:
+$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_infrastructure.jar
+
+$ imagetool cache addInstaller --type OAM --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_idm.jar
+
Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:
+$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <download location>/p28186730_139428_Generic.zip
+
Add the rest of the downloaded product patches to the WebLogic Image Tool cache:
+$ imagetool cache addEntry --key <patch>_12.2.1.4.0 --value <download location>/p<patch>_122140_Generic.zip
+
For example:
+$ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value <download location>/p32971905_122140_Generic.zip
+
+$ imagetool cache addEntry --key 20812896_12.2.1.4.0 --value <download location>/p20812896_122140_Generic.zip
+
+$ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value <download location>/p32880070_122140_Generic.zip
+
+$ imagetool cache addEntry --key 33059296_12.2.1.4.0 --value <download location>/p33059296_122140_Generic.zip
+
+$ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value <download location>/p32905339_122140_Generic.zip
+
+$ imagetool cache addEntry --key 33084721_12.2.1.4.0 --value <download location>/p33084721_122140_Generic.zip
+
+$ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value <download location>/p31544353_122140_Linux-x86-64.zip
+
+$ imagetool cache addEntry --key 32957281_12.2.1.4.0 --value <download location>/p32957281_122140_Generic.zip
+
+$ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value <download location>/p33093748_122140_Generic.zip
+
Edit the <workdir>/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs
file and append the product patches and opatch patch as follows:
--patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0
+--opatchBugNumber=28186730_13.9.4.2.8
+
An example buildArgs
file is now as follows:
create
+--jdkVersion=8u301
+--type oam
+--version=12.2.1.4.0
+--tag=oam-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/additionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts
+--patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0
+--opatchBugNumber=28186730_13.9.4.2.8
+
++Note: In the
+buildArgs
file:+
+- +
--jdkVersion
value must match the--version
value used in theimagetool cache addInstaller
command for--type jdk
.- +
--version
value must match the--version
value used in theimagetool cache addInstaller
command for--type OAM
.
Refer to this page for the complete list of options available with the WebLogic Image Tool create
command.
Create the Oracle Access Management image:
+$ imagetool @<absolute path to buildargs file> --fromImage ghcr.io/oracle/oraclelinux:7-slim
+
++Note: Make sure that the absolute path to the
+buildargs
file is prepended with a@
character, as shown in the example above.
For example:
+$ imagetool @<imagetool-setup-location>/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim
+
Check the created image using the docker images
command:
$ docker images | grep oam
+
The output will look similar to the following:
+oam-latestpsu 12.2.1.4.0 ad732fc7c16b About a minute ago 3.35GB
+
Run the following command to save the container image to a tar file:
+$ docker save -o <path>/<file>.tar <image>
+
For example:
+$ docker save -o $WORKDIR/oam-latestpsu.tar oam-latestpsu:12.2.1.4.0
+
The steps below show how to update an existing Oracle Access Management image with an interim patch.
+The container image to be patched must be loaded in the local docker images repository before attempting these steps.
+In the examples below the image oracle/oam:12.2.1.4.0
is updated with an interim patch.
$ docker images
+
+REPOSITORY TAG IMAGE ID CREATED SIZE
+oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB
+
Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.
+Add the OPatch patch to the WebLogic Image Tool cache, for example:
+$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <downloaded-patches-location>/p28186730_139428_Generic.zip
+
Execute the imagetool cache addEntry
command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip
:
$ imagetool cache addEntry --key=32701831_12.2.1.4.210607 --value <downloaded-patches-location>/p32701831_12214210607_Generic.zip
+
Provide the following arguments to the WebLogic Image Tool update
command:
–-fromImage
- Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oam:12.2.1.4.0
.–-patches
- Multiple patches can be specified as a comma-separated list.--tag
- Specify the new tag to be applied for the image being built.Refer here for the complete list of options available with the WebLogic Image Tool update
command.
++Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.
+
For example:
+$ imagetool update --fromImage oracle/oam:12.2.1.4.0 --tag=oracle/oam-new:12.2.1.4.0 --patches=32701831_12.2.1.4.210607 --opatchBugNumber=28186730_13.9.4.2.8
+
++Note: If the command fails because the files in the image being upgraded are not owned by
+oracle:oracle
, then add the parameter--chown <userid>:<groupid>
to correspond with the values returned in the error.
Check the built image using the docker images
command:
$ docker images | grep oam
+
The output will look similar to the following:
+REPOSITORY TAG IMAGE ID CREATED SIZE
+oracle/oam-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 3.8GB
+oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB
+
Run the following command to save the patched container image to a tar file:
+$ docker save -o <path>/<file>.tar <image>
+
For example:
+$ docker save -o $WORKDIR/oam-new.tar oracle/oam-new:12.2.1.4.0
+
Oracle supports the deployment of Oracle Access Management on Kubernetes. See the following sections:
+ + + + + + + +The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).
+In this release, OAM domains are supported using the “domain on a persistent volume” +model only, where the domain home is located in a persistent volume (PV).
+The WebLogic Kubernetes Operator has several key features to assist you with deploying and managing Oracle Access Management domains in a Kubernetes +environment. You can:
+The current production release for the Oracle Access Management domain deployment on Kubernetes is 23.4.1. This release uses the WebLogic Kubernetes Operator version 4.1.2.
+For 4.0.X WebLogic Kubernetes Operator refer to Version 23.3.1
+For 3.4.X WebLogic Kubernetes Operator refer to Version 23.1.1
+See the Release Notes for recent changes and known issues for Oracle Access Management domain deployment on Kubernetes.
+See here for limitations in this release.
+This documentation explains how to configure OAM on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.
+If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. +Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OAM and no other Oracle Identity Management products.
+Note: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Access Management deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:
+To view documentation for an earlier release, see:
+ + + + + + + + + +Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh
script.
Run the following command to delete the domain:
+$ cd $WORKDIR/kubernetes/delete-domain
+$ ./delete-weblogic-domain-resources.sh -d <domain_uid>
+
For example:
+$ cd $WORKDIR/kubernetes/delete-domain
+$ ./delete-weblogic-domain-resources.sh -d accessdomain
+
Drop the RCU schemas as follows:
+$ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
+[oracle@helper ~]$
+[oracle@helper ~]$ export CONNECTION_STRING=<db_host.domain>:<db_port>/<service_name>
+[oracle@helper ~]$ export RCUPREFIX=<rcu_schema_prefix>
+
+/u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \
+-dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \
+-component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \
+-component WLS -component STB -component OAM -f < /tmp/pwd.txt
+
For example:
+$ kubectl exec -it helper -n oamns -- /bin/bash
+[oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com
+[oracle@helper ~]$ export RCUPREFIX=OAMK8S
+/u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \
+-dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \
+-component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \
+-component WLS -component STB -component OAM -f < /tmp/pwd.txt
+
Delete the contents of the persistent volume, for example:
+$ rm -rf <persistent_volume>/accessdomainpv/*
+
For example:
+$ rm -rf /scratch/shared/accessdomainpv/*
+
Delete the WebLogic Kubernetes Operator, by running the following command:
+$ helm delete weblogic-kubernetes-operator -n opns
+
Delete the label from the OAM namespace:
+$ kubectl label namespaces <domain_namespace> weblogic-operator-
+
For example:
+$ kubectl label namespaces oamns weblogic-operator-
+
Delete the service account for the operator:
+$ kubectl delete serviceaccount <sample-kubernetes-operator-sa> -n <domain_namespace>
+
For example:
+$ kubectl delete serviceaccount op-sa -n opns
+
Delete the operator namespace:
+$ kubectl delete namespace <sample-kubernetes-operator-ns>
+
For example:
+$ kubectl delete namespace opns
+
To delete NGINX:
+$ helm delete oam-nginx -n <domain_namespace>
+
For example:
+$ helm delete oam-nginx -n oamns
+
Then run:
+$ helm delete nginx-ingress -n <domain_namespace>
+
For example:
+$ helm delete nginx-ingress -n oamns
+
Delete the OAM namespace:
+$ kubectl delete namespace <domain_namespace>
+
For example:
+$ kubectl delete namespace oamns
+
As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.
+This document shows the basic operations for starting, stopping and scaling servers in the OAM domain.
+For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.
+ +Do not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.
+Note: The instructions below are for starting, stopping, or scaling servers manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.
+The default OAM deployment starts the Administration Server (AdminServer
), one OAM Managed Server (oam_server1
) and one OAM Policy Manager server (oam_policy_mgr1
).
The deployment also creates, but doesn’t start, four extra OAM Managed Servers (oam-server2
to oam-server5
) and four more OAM Policy Manager servers (oam_policy_mgr2
to oam_policy_mgr5
).
All these servers are visible in the WebLogic Server Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
by navigating to Domain Structure > oamcluster > Environment > Servers.
To view the running servers using kubectl, run the following command:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output should look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 3h29m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h36m
+accessdomain-oam-policy-mgr1 1/1 Running 0 3h21m
+accessdomain-oam-server1 1/1 Running 0 3h21m
+helper 1/1 Running 0 3h51m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 55m
+
The number of OAM Managed Servers running is dependent on the replicas
parameter configured for the oam-cluster. To start more OAM Managed Servers perform the following steps:
Run the following kubectl command to edit the oam-cluster:
+$ kubectl edit cluster accessdomain-oam-cluster -n <domain_namespace>
+
For example:
+$ kubectl edit cluster accessdomain-oam-cluster -n oamns
+
Note: This opens an edit session for the oam-cluster where parameters can be changed using standard vi
commands.
In the edit session, search for spec:
, and then look for the replicas
parameter under clusterName: oam_cluster
. By default the replicas parameter is set to “1” hence one OAM Managed Server is started (oam_server1
):
...
+spec:
+ clusterName: oam_cluster
+ replicas: 1
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m
+ -Xmx8192m
+...
+
To start more OAM Managed Servers, increase the replicas
value as desired. In the example below, two more managed servers will be started by setting replicas
to “3”:
...
+spec:
+ clusterName: oam_cluster
+ replicas: 3
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m
+ -Xmx8192m
+...
+
Save the file and exit (:wq!)
+The output will look similar to the following:
+cluster.weblogic.oracle/accessdomain-oam-cluster edited
+
Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 3h33m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h40m
+accessdomain-oam-policy-mgr1 1/1 Running 0 3h25m
+accessdomain-oam-server1 1/1 Running 0 3h25m
+accessdomain-oam-server2 0/1 Running 0 3h25m
+accessdomain-oam-server3 0/1 Pending 0 9s
+helper 1/1 Running 0 3h55m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 59m
+
Two new pods (accessdomain-oam-server2
and accessdomain-oam-server3
) are started, but currently have a READY
status of 0/1
. This means oam_server2
and oam_server3
are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until READY
shows 1/1
:
NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 3h37m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h43m
+accessdomain-oam-policy-mgr1 1/1 Running 0 3h29m
+accessdomain-oam-server1 1/1 Running 0 3h29m
+accessdomain-oam-server2 1/1 Running 0 3h29m
+accessdomain-oam-server3 1/1 Running 0 3m45s
+helper 1/1 Running 0 3h59m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 63m
+
+
Note: To check what is happening during server startup when READY
is 0/1
, run the following command to view the log of the pod that is starting:
$ kubectl logs <pod> -n <domain_namespace>
+
For example:
+$ kubectl logs accessdomain-oam-server3 -n oamns
+
As mentioned in the previous section, the number of OAM Managed Servers running is dependent on the replicas
parameter configured for the cluster. To stop one or more OAM Managed Servers, perform the following:
Run the following kubectl command to edit the oam-cluster:
+$ kubectl edit cluster accessdomain-oam-cluster -n <domain_namespace>
+
For example:
+$ kubectl edit cluster accessdomain-oam-cluster -n oamns
+
In the edit session, search for spec:
, and then look for the replicas
parameter under clusterName: oam_cluster
. In the example below replicas
is set to “3”, hence three OAM Managed Servers are started (access-domain-oam_server1
- access-domain-oam_server3
):
...
+spec:
+ clusterName: oam_cluster
+ replicas: 3
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m
+ -Xmx8192m
+...
+
To stop OAM Managed Servers, decrease the replicas
value as desired. In the example below, we will stop two managed servers by setting replicas to “1”:
spec:
+ clusterName: oam_cluster
+ replicas: 1
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m
+ -Xmx8192m
+...
+
Save the file and exit (:wq!)
+Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 3h45m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h51m
+accessdomain-oam-policy-mgr1 1/1 Running 0 3h37m
+accessdomain-oam-server1 1/1 Running 0 3h37m
+accessdomain-oam-server2 1/1 Running 0 3h37m
+accessdomain-oam-server3 1/1 Terminating 0 11m
+helper 1/1 Running 0 4h6m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 71m
+
One pod now has a STATUS
of Terminating
(accessdomain-oam-server3
). The server will take a minute or two to stop. Once terminated the other pod (accessdomain-oam-server2
) will move to Terminating
and then stop. Keep executing the command until the pods have disappeared:
NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 3h48m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h54m
+accessdomain-oam-policy-mgr1 1/1 Running 0 3h40m
+accessdomain-oam-server1 1/1 Running 0 3h40m
+helper 1/1 Running 0 4h9m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 74m
+
The number of OAM Policy Managed Servers running is dependent on the replicas
parameter configured for the policy-cluster. To start more OAM Policy Managed Servers perform the following steps:
Run the following kubectl command to edit the policy-cluster:
+$ kubectl edit cluster accessdomain-policy-cluster -n <domain_namespace>
+
For example:
+$ kubectl edit cluster accessdomain-policy-cluster -n oamns
+
Note: This opens an edit session for the policy-cluster where parameters can be changed using standard vi
commands.
In the edit session, search for spec:
, and then look for the replicas
parameter under clusterName: policy_cluster
. By default the replicas parameter is set to “1” hence one OAM Policy Managed Server is started (oam_policy_mgr1
):
...
+spec:
+ clusterName: policy_cluster
+ replicas: 1
+ serverService:
+ precreateService: true
+...
+
To start more OAM Policy Managed Servers, increase the replicas
value as desired. In the example below, two more managed servers will be started by setting replicas
to “3”:
...
+spec:
+ clusterName: policy_cluster
+ replicas: 3
+ serverService:
+ precreateService: true
+...
+
Save the file and exit (:wq!)
+The output will look similar to the following:
+cluster.weblogic.oracle/accessdomain-policy-cluster edited
+
After saving the changes two new pods will be started (accessdomain-oam-policy-mgr2
and accessdomain-oam-policy-mgr3
). After a few minutes they will have a READY
status of 1/1
. In the example below accessdomain-oam-policy-mgr2
and accessdomain-oam-policy-mgr3
are started:
NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 3h43m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h49m
+accessdomain-oam-policy-mgr1 1/1 Running 0 3h35m
+accessdomain-oam-policy-mgr2 1/1 Running 0 3h35m
+accessdomain-oam-policy-mgr3 1/1 Running 0 4m18s
+accessdomain-oam-server1 1/1 Running 0 3h35m
+helper 1/1 Running 0 4h4m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 69m
+
As mentioned in the previous section, the number of OAM Policy Managed Servers running is dependent on the replicas
parameter configured for the cluster. To stop one or more OAM Policy Managed Servers, perform the following:
Run the following kubectl command to edit the policy-cluster:
+$ kubectl edit cluster accessdomain-policy-cluster -n <domain_namespace>
+
For example:
+$ kubectl edit cluster accessdomain-policy-cluster -n oamns
+
In the edit session, search for spec:
, and then look for the replicas
parameter under clusterName: policy_cluster
. To stop OAM Policy Managed Servers, decrease the replicas
value as desired. In the example below, we will stop two managed servers by setting replicas to “1”:
...
+spec:
+ clusterName: policy_cluster
+ replicas: 1
+ serverService:
+ precreateService: true
+...
+
After saving the changes one pod will move to a STATUS
of Terminating
(accessdomain-oam-policy-mgr3
).
NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 3h49m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h55m
+accessdomain-oam-policy-mgr1 1/1 Running 0 3h41m
+accessdomain-oam-policy-mgr2 1/1 Running 0 3h41m
+accessdomain-oam-policy-mgr3 1/1 Terminating 0 10m
+accessdomain-oam-server1 1/1 Running 0 3h41m
+helper 1/1 Running 0 4h11m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 75m
+
The pods will take a minute or two to stop, so keep executing the command until the pods has disappeared:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 3h50m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h57m
+accessdomain-oam-policy-mgr1 1/1 Running 0 3h42m
+accessdomain-oam-server1 1/1 Running 0 3h42m
+helper 1/1 Running 0 4h12m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 76m
+
To stop all the OAM Managed Servers and the Administration Server in one operation:
+Run the following kubectl command to edit the domain:
+$ kubectl edit domain <domain_uid> -n <domain_namespace>
+
For example:
+$ kubectl edit domain accessdomain -n oamns
+
In the edit session, search for serverStartPolicy: IfNeeded
under the domain spec:
...
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ volumes:
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: accessdomain-domain-pvc
+ serverStartPolicy: IfNeeded
+ webLogicCredentialsSecret:
+ name: accessdomain-credentials
+...
+
Change serverStartPolicy: IfNeeded
to Never
as follows:
...
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ volumes:
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: accessdomain-domain-pvc
+ serverStartPolicy: Never
+ webLogicCredentialsSecret:
+ name: accessdomain-credentials
+...
+
Save the file and exit (:wq!).
+Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Terminating 0 3h52m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h59m
+accessdomain-oam-policy-mgr1 1/1 Terminating 0 3h44m
+accessdomain-oam-server1 1/1 Terminating 0 3h44m
+helper 1/1 Running 0 4h14m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 78m
+
The Administration Server pods and Managed Server pods will move to a STATUS
of Terminating
. After a few minutes, run the command again and the pods should have disappeared:
NAME READY STATUS RESTARTS AGE
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h
+helper 1/1 Running 0 4h15m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 80m
+
To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: Never
to IfNeeded
as follows:
...
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ volumes:
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: accessdomain-domain-pvc
+ serverStartPolicy: IfNeeded
+ webLogicCredentialsSecret:
+ name: accessdomain-credentials
+...
+
Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h1m
+accessdomain-introspector-jwqxw 1/1 Running 0 10s
+helper 1/1 Running 0 4h17m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 81m
+
The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY
status 1/1
:
NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 10m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h12m
+accessdomain-oam-policy-mgr1 1/1 Running 0 7m35s
+accessdomain-oam-server1 1/1 Running 0 7m35s
+helper 1/1 Running 0 4h28m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 92m
+
The WebLogic Kubernetes Operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.
+Note: Prior to running these scripts, you must have previously created and deployed the domain.
+The scripts are located in the $WORKDIR/kubernetes/domain-lifecycle
directory. For more information, see the README.
Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later.
+HPA allows automatic scaling (up and down) of the OAM Managed Servers. If load increases then extra OAM Managed Servers will be started as required, up to the value configuredManagedServerCount
defined when the domain was created (see Prepare the create domain script). Similarly, if load decreases, OAM Managed Servers will be automatically shutdown.
For more information on HPA, see Horizontal Pod Autoscaling.
+The instructions below show you how to configure and run an HPA to scale an OAM cluster (accessdomain-oam-cluster
) resource, based on CPU utilization or memory resource metrics. If required, you can also perform the following for the accessdomain-policy-cluster
.
Note: If you enable HPA and then decide you want to start/stop/scale OAM Managed servers manually as per Domain Life Cycle, it is recommended to delete HPA beforehand as per Delete the HPA.
+In order to use HPA, the OAM domain must have been created with the required resources
parameter as per Set the OAM server memory parameters. For example:
serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
+ resources:
+ limits:
+ cpu: "2"
+ memory: "8Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+
If you created the OAM domain without setting these parameters, then you can update the domain using the following steps:
+Run the following command to edit the cluster:
+$ kubectl edit cluster accessdomain-oam-cluster -n oamns
+
Note: This opens an edit session for the oam-cluster
where parameters can be changed using standard vi commands.
In the edit session, search for spec:
, and then look for the replicas parameter under clusterName: oam_cluster
. Change the entry so it looks as follows:
spec:
+ clusterName: oam_cluster
+ replicas: 1
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: 1000m
+ memory: 4Gi
+ serverService:
+ precreateService: true
+ ...
+
Save the file and exit (:wq!)
+The output will look similar to the following:
+cluster.weblogic.oracle/accessdomain-oam-cluster edited
+
The OAM Managed Server pods will then automatically be restarted.
+Before deploying HPA you must deploy the Kubernetes Metrics Server.
+Check to see if the Kubernetes Metrics Server is already deployed:
+$ kubectl get pods -n kube-system | grep metric
+
If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.
+metrics-server-d9694457-mf69d 1/1 Running 0 5m13s
+
If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml
:
$ mkdir $WORKDIR/kubernetes/hpa
+$ cd $WORKDIR/kubernetes/hpa
+$ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
+
Deploy the Kubernetes Metrics Server by running the following command:
+$ kubectl apply -f components.yaml
+
The output will look similar to the following:
+serviceaccount/metrics-server created
+clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
+clusterrole.rbac.authorization.k8s.io/system:metrics-server created
+rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
+clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
+clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
+service/metrics-server created
+deployment.apps/metrics-server created
+apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
+
Run the following command to check Kubernetes Metric Server is running:
+$ kubectl get pods -n kube-system | grep metric
+
Make sure the pod has a READY
status of 1/1
:
metrics-server-d9694457-mf69d 1/1 Running 0 39s
+
If the Kubernetes Metric Server does not reach the READY 1/1
state, run the following commands:
$ kubectl describe pod <metrics-server-pod> -n kube-system
+$ kubectl logs <metrics-server-pod> -n kube-system
+
If you see errors such as:
+Readiness probe failed: HTTP probe failed with statuscode: 500
+
and:
+E0907 13:07:50.937308 1 scraper.go:140] "Failed to scrape node" err="Get \"https://100.105.18.113:10250/metrics/resource\": x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs" node="worker-node1"
+
then you may need to install a valid cluster certificate for your Kubernetes cluster.
+For testing purposes, you can resolve this issue by:
+Delete the Kubernetes Metrics Server by running the following command:
+$ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml
+
Edit the $WORKDIR/hpa/components.yaml
and locate the args:
section. Add kubelet-insecure-tls
to the arguments. For example:
spec:
+ containers:
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --kubelet-insecure-tls
+ - --metric-resolution=15s
+ image: registry.k8s.io/metrics-server/metrics-server:v0.6.4
+ ...
+
Deploy the Kubenetes Metrics Server using the command:
+$ kubectl apply -f components.yaml
+
Run the following and make sure the READY status shows 1/1
:
$ kubectl get pods -n kube-system | grep metric
+
The output should look similar to the following:
+metrics-server-d9694457-mf69d 1/1 Running 0 40s
+
The steps below show how to configure and run an HPA to scale the accessdomain-oam-cluster
, based on the CPU or memory utilization resource metrics.
The default OAM deployment creates the cluster accessdomain-oam-cluster
which starts one OAM Managed Server (oam_server1
). The deployment also creates, but doesn’t start, four extra OAM Managed Servers (oam-server2
to oam-server5
).
In the following example an HPA resource is created, targeted at the cluster resource accessdomain-oam-cluster
. This resource will autoscale OAM Managed Servers from a minimum of 1 cluster member up to 5 cluster members. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.
Navigate to the $WORKDIR/kubernetes/hpa
and create an autoscalehpa.yaml
file that contains the following.
#
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: accessdomain-oam-cluster-hpa
+ namespace: oamns
+spec:
+ scaleTargetRef:
+ apiVersion: weblogic.oracle/v1
+ kind: Cluster
+ name: accessdomain-oam-cluster
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 60
+ scaleUp:
+ stabilizationWindowSeconds: 60
+ minReplicas: 1
+ maxReplicas: 5
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 70
+
Note : minReplicas
and maxReplicas
should match your current domain settings.
Note: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.
+metrics:
+- type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: 70
+
Run the following command to create the autoscaler:
+$ kubectl apply -f autoscalehpa.yaml
+
The output will look similar to the following:
+horizontalpodautoscaler.autoscaling/accessdomain-oam-cluster-hpa created
+
Verify the status of the autoscaler by running the following:
+$ kubectl get hpa -n oamns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+accessdomain-oam-cluster-hpa Cluster/accessdomain-oam-cluster 5%/70% 1 5 1 21s
+
In the example above, this shows that CPU is currently running at 5% for the accessdomain-oam-cluster-hpa
.
Check the current status of the OAM Managed Servers:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 0/1 Running 0 141m
+accessdomain-create-oam-infra-domain-job-6br2j 0/1 Completed 0 5h19m
+accessdomain-oam-policy-mgr1 0/1 Running 0 138m
+accessdomain-oam-server1 1/1 Running 0 138m
+helper 1/1 Running 0 21h
+nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt 1/1 Running 0 4h33m
+
In the above, only accessdomain-oam-server1
is running.
To test HPA can scale up the WebLogic cluster accessdomain-oam-cluster
, run the following commands:
$ kubectl exec --stdin --tty accessdomain-oam-server1 -n oamns -- /bin/bash
+
This will take you inside a bash shell inside the oam_server1
pod:
[oracle@accessdomain-oam-server1 oracle]$
+
Inside the bash shell, run the following command to increase the load on the CPU:
+[oracle@accessdomain-oam-server1 oracle]$ dd if=/dev/zero of=/dev/null
+
This command will continue to run in the foreground.
+In a command window outside the bash shell, run the following command to view the current CPU usage:
+$ kubectl get hpa -n oamns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+accessdomain-oam-cluster-hpa Cluster/accessdomain-oam-cluster 470%/70% 1 5 1 21s
+
In the above example the CPU has increased to 470%. As this is above the 70% limit, the autoscaler increases the replicas on the Cluster resource and the operator responds by starting additional cluster members.
+Run the following to see if any more OAM Managed Servers are started:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 0/1 Running 143m
+accessdomain-create-oam-infra-domain-job-6br2j 0/1 Completed 0 5h21m
+accessdomain-oam-policy-mgr1 0/1 Running 0 140m
+accessdomain-oam-server1 1/1 Running 0 140m
+accessdomain-oam-server2 1/1 Running 0 3m20s
+accessdomain-oam-server3 1/1 Running 0 3m20s
+accessdomain-oam-server4 1/1 Running 0 3m19s
+accessdomain-oam-server5 1/1 Running 0 3m5s
+helper 1/1 Running 0 21h
+
In the example above four more OAM Managed Servers have been started (oam-server2
- oam-server5
).
Note: It may take some time for the servers to appear and start. Once the servers are at READY
status of 1/1
, the servers are started.
To stop the load on the CPU, in the bash shell, issue a Control C, and then exit the bash shell:
+[oracle@accessdomain-oam-server1 oracle]$ dd if=/dev/zero of=/dev/null
+^C
+[oracle@accessdomain-oam-server1 oracle]$ exit
+
Run the following command to view the current CPU usage:
+$ kubectl get hpa -n oamns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+accessdomain-oam-cluster-hpa Cluster/accessdomain-oam-cluster 19%/70% 1 5 5 19m
+
In the above example CPU has dropped to 19%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 152m
+accessdomain-create-oam-infra-domain-job-6br2j 0/1 Completed 0 5h30m
+accessdomain-oam-policy-mgr1 1/1 Running 0 149m
+accessdomain-oam-server1 1/1 Running 0 149m
+accessdomain-oam-server2 1/1 Running 0 14m
+accessdomain-oam-server3 0/1 Terminating 0 14m
+helper 1/1 Running 0 21h
+nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt 1/1 Running 0 4h45m
+
Eventually, all the servers except oam-server1
will disappear:
NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 154m
+accessdomain-create-oam-infra-domain-job-6br2j 0/1 Completed 0 5h32m
+accessdomain-oam-policy-mgr1 1/1 Running 0 151m
+accessdomain-oam-server1 1/1 Running 0 151m
+helper 1/1 Running 0 21h
+nginx-ingress-ingress-nginx-controller-5f9bdf4c9-f5trt 1/1 Running 0 4h47m
+
If you need to delete the HPA, you can do so by running the following command:
+$ cd $WORKDIR/kubernetes/hpa
+$ kubectl delete -f autoscalehpa.yaml
+
Important considerations for Oracle Access Management domains in Kubernetes.
+ + + + + + + +Learn about the domain life cycle of an OAM domain.
+ + + + + + + + + + + + +Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OAM Domain.
+ + + + + + + + + + + + +Describes the steps for logging and visualization with Elasticsearch and Kibana.
+ + + + + + + + + + + + +Describes the steps for Monitoring the OAM domain.
+ + + + + + + + + + + + +Describes the steps for implementing the Horizontal Pod Autoscaler.
+ + + + + + + + + + + + +Learn about the steps to cleanup the OAM domain home.
+ + + + + + + + +After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.
+If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +Installing Elasticsearch (ELK) Stack and Kibana
+In order to create the logstash pod, you must create several files. These files contain variables which you must substitute with variables applicable to your environment.
+Most of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.
+The table below outlines the variables and values you must set:
+Variable | +Sample Value | +Description | +
---|---|---|
<ELK_VER> |
+8.3.1 |
+The version of logstash you want to install. | +
<ELK_SSL> |
+true |
+If SSL is enabled for ELK set the value to true , or if NON-SSL set to false . This value must be lowercase. |
+
<ELK_HOSTS> |
+https://elasticsearch.example.com:9200 |
+The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used. | +
<ELKNS> |
+oamns |
+The domain namespace. | +
<ELK_USER> |
+logstash_internal |
+The name of the user for logstash to access Elasticsearch. | +
<ELK_PASSWORD> |
+password |
+The password for ELK_USER. | +
<ELK_APIKEY> |
+apikey |
+The API key details. | +
You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt
.
Create a Kubernetes secret for Elasticsearch using the API Key or Password.
+a) If ELK uses an API Key for authentication:
+$ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_APIKEY>
+
For example:
+$ kubectl create secret generic elasticsearch-pw-elastic -n oamns --from-literal password=<ELK_APIKEY>
+
The output will look similar to the following:
+secret/elasticsearch-pw-elastic created
+
b) If ELK uses a password for authentication:
+$ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_PASSWORD>
+
For example:
+$ kubectl create secret generic elasticsearch-pw-elastic -n oamns --from-literal password=<ELK_PASSWORD>
+
The output will look similar to the following:
+secret/elasticsearch-pw-elastic created
+
Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.
+Create a Kubernetes secret to access the required images on hub.docker.com:
+Note: Before executing the command below, you must first have a user account on hub.docker.com.
+kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \
+--docker-username="<DOCKER_USER_NAME>" \
+--docker-password=<DOCKER_PASSWORD> --docker-email=<DOCKER_EMAIL_ID> \
+--namespace=<domain_namespace>
+
For example,
+kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \
+--docker-username="user@example.com" \
+--docker-password=password --docker-email=user@example.com \
+--namespace=oamns
+
The output will look similar to the following:
+secret/dockercred created
+
Run the following command to get the mountPath
of your domain:
$ kubectl describe domains <domain_uid> -n <domain_namespace> | grep "Mount Path"
+
For example:
+$ kubectl describe domains accessdomain -n oamns | grep "Mount Path"
+
The output will look similar to the following:
+Mount Path: /u01/oracle/user_projects/domains
+
Run the following command to get the OAM domain persistence volume details:
+$ kubectl get pv -n <domain_namespace>
+
For example:
+$ kubectl get pv -n oamns
+
The output will look similar to the following:
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+accessdomain-domain-pv 10Gi RWX Retain Bound oamns/accessdomain-domain-pvc accessdomain-domain-storage-class 23h
+
Make note of the CLAIM value, for example in this case accessdomain-domain-pvc
.
Copy the elk.crt
file to the $WORKDIR/kubernetes/elasticsearch-and-kibana
directory.
Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana
directory and run the following:
kubectl create configmap elk-cert --from-file=elk.crt -n <namespace>
+
For example:
+kubectl create configmap elk-cert --from-file=elk.crt -n oamns
+
The output will look similar to the following:
+configmap/elk-cert created
+
Create a logstash_cm.yaml
file in the $WORKDIR/kubernetes/elasticsearch-and-kibana
directory as follows:
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: oam-logstash-configmap
+ namespace: <ELKNS>
+data:
+ logstash.yml: |
+ #http.host: "0.0.0.0"
+ logstash-config.conf: |
+ input {
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log"
+ tags => "Adminserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log"
+ tags => "Policymanager_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log"
+ tags => "Oamserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log"
+ tags => "Adminserver_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log"
+ tags => "Policy_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log"
+ tags => "Audit_logs"
+ start_position => beginning
+ }
+ }
+ filter {
+ grok {
+ match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ]
+ }
+ if "_grokparsefailure" in [tags] {
+ mutate {
+ remove_tag => [ "_grokparsefailure" ]
+ }
+ }
+ }
+ output {
+ elasticsearch {
+ hosts => ["<ELK_HOSTS>"]
+ cacert => '/usr/share/logstash/config/certs/elk.crt'
+ index => "oamlogs-000001"
+ ssl => true
+ ssl_certificate_verification => false
+ user => "<ELK_USER>"
+ password => "${ELASTICSEARCH_PASSWORD}"
+ api_key => "${ELASTICSEARCH_PASSWORD}"
+ }
+ }
+
Change the values in the above file as follows:
+<ELKNS>
, <ELK_HOSTS>
, <ELK_SSL>
, and <ELK_USER>
to match the values for your environment./u01/oracle/user_projects/domains
to match the mountPath
returned earlieraccessdomain
, change each instance of accessdomain
to your domainUID.user
and password
lines.api_key
line.user
, password
, and api_key
lines.For example:
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: oam-logstash-configmap
+ namespace: oamns
+data:
+ logstash.yml: |
+ #http.host: "0.0.0.0"
+ logstash-config.conf: |
+ input {
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log"
+ tags => "Adminserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log"
+ tags => "Policymanager_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log"
+ tags => "Oamserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log"
+ tags => "Adminserver_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log"
+ tags => "Policy_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log"
+ tags => "Audit_logs"
+ start_position => beginning
+ }
+ }
+ filter {
+ grok {
+ match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ]
+ }
+ if "_grokparsefailure" in [tags] {
+ mutate {
+ remove_tag => [ "_grokparsefailure" ]
+ }
+ }
+ }
+ output {
+ elasticsearch {
+ hosts => ["https://elasticsearch.example.com:9200"]
+ cacert => '/usr/share/logstash/config/certs/elk.crt'
+ index => "oamlogs-000001"
+ ssl => true
+ ssl_certificate_verification => false
+ user => "logstash_internal"
+ password => "${ELASTICSEARCH_PASSWORD}"
+ }
+ }
+
Run the following command to create the configmap:
+$ kubectl apply -f logstash_cm.yaml
+
The output will look similar to the following:
+configmap/oam-logstash-configmap created
+
Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana
directory and create a logstash.yaml
file as follows:
apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: oam-logstash
+ namespace: <ELKNS>
+spec:
+ selector:
+ matchLabels:
+ k8s-app: logstash
+ template: # create pods using pod definition in this template
+ metadata:
+ labels:
+ k8s-app: logstash
+ spec:
+ imagePullSecrets:
+ - name: dockercred
+ containers:
+ - command:
+ - logstash
+ image: logstash:<ELK_VER>
+ imagePullPolicy: IfNotPresent
+ name: oam-logstash
+ env:
+ - name: ELASTICSEARCH_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-pw-elastic
+ key: password
+ resources:
+ ports:
+ - containerPort: 5044
+ name: logstash
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects
+ name: weblogic-domain-storage-volume
+ - name: shared-logs
+ mountPath: /shared-logs
+ - mountPath: /usr/share/logstash/pipeline/
+ name: oam-logstash-pipeline
+ - mountPath: /usr/share/logstash/config/logstash.yml
+ subPath: logstash.yml
+ name: config-volume
+ - mountPath: /usr/share/logstash/config/certs
+ name: elk-cert
+ volumes:
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: elk.crt
+ path: elk.crt
+ name: elk-cert
+ name: elk-cert
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: logstash-config.conf
+ path: logstash-config.conf
+ name: oam-logstash-configmap
+ name: oam-logstash-pipeline
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: logstash.yml
+ path: logstash.yml
+ name: oam-logstash-configmap
+ name: config-volume
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: accessdomain-domain-pvc
+ - name: shared-logs
+ emptyDir: {}
+
<ELKNS>
, <ELK_VER>
to match the values for your environment./u01/oracle/user_projects/domains
to match the mountPath
returned earlierclaimName
value to match the claimName
returned earlierimage: logstash:<ELK_VER>
to the location of the image in your container registry e.g: container-registry.example.com/logstash:8.3.1
For example:
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: oam-logstash
+ namespace: oamns
+spec:
+ selector:
+ matchLabels:
+ k8s-app: logstash
+ template: # create pods using pod definition in this template
+ metadata:
+ labels:
+ k8s-app: logstash
+ spec:
+ imagePullSecrets:
+ - name: dockercred
+ containers:
+ - command:
+ - logstash
+ image: logstash:8.3.1
+ imagePullPolicy: IfNotPresent
+ name: oam-logstash
+ env:
+ - name: ELASTICSEARCH_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-pw-elastic
+ key: password
+ resources:
+ ports:
+ - containerPort: 5044
+ name: logstash
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ - name: shared-logs
+ mountPath: /shared-logs
+ - mountPath: /usr/share/logstash/pipeline/
+ name: oam-logstash-pipeline
+ - mountPath: /usr/share/logstash/config/logstash.yml
+ subPath: logstash.yml
+ name: config-volume
+ - mountPath: /usr/share/logstash/config/certs
+ name: elk-cert
+ volumes:
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: elk.crt
+ path: elk.crt
+ name: elk-cert
+ name: elk-cert
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: logstash-config.conf
+ path: logstash-config.conf
+ name: oam-logstash-configmap
+ name: oam-logstash-pipeline
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: logstash.yml
+ path: logstash.yml
+ name: oam-logstash-configmap
+ name: config-volume
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: accessdomain-domain-pvc
+ - name: shared-logs
+ emptyDir: {}
+
Deploy the logstash
pod by executing the following command:
$ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml
+
The output will look similar to the following:
+deployment.apps/oam-logstash created
+
Run the following command to check the logstash
pod is created correctly:
$ kubectl get pods -n <namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output should look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 18h
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 23h
+accessdomain-oam-policy-mgr1 1/1 Running 0 18h
+accessdomain-oam-policy-mgr2 1/1 Running 0 18h
+accessdomain-oam-server1 1/1 Running 1 18h
+accessdomain-oam-server2 1/1 Running 1 18h
+elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 5m
+helper 1/1 Running 0 23h
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 20h
+oam-logstash-bbbdf5876-85nkd 1/1 Running 0 4m23s
+
Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:
+$ kubectl logs -f oam-logstash-<pod> -n oamns
+
Most errors occur due to misconfiguration of the logstash_cm.yaml
or logstash.yaml
. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.
If the pod has errors, delete the pod and configmap as follows:
+$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml
+$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash_cm.yaml
+
Once you have resolved the issue in the yaml files, run the commands outlined earlier to recreate the configmap and logstash pod.
+To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.
+For Kibana 7.7.x and below:
+Access the Kibana console with http://<hostname>:<port>/app/kibana
and login with your username and password.
From the Navigation menu, navigate to Management > Kibana > Index Patterns.
+In the Create Index Pattern page enter oamlogs*
for the Index pattern and click Next Step.
In the Configure settings page, from the Time Filter field name drop down menu select @timestamp
and click Create index pattern.
Once the index pattern is created click on Discover in the navigation menu to view the OAM logs.
+For Kibana version 7.8.X and above:
+Access the Kibana console with http://<hostname>:<port>/app/kibana
and login with your username and password.
From the Navigation menu, navigate to Management > Stack Management.
+Click Data Views in the Kibana section.
+Click Create Data View and enter the following information:
+oamlogs*
@timestamp
Click Create Data View.
+From the Navigation menu, click Discover to view the log file entries.
+From the drop down menu, select oamlogs*
to view the log file entries.
After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain.
+The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.
+There are two ways to setup monitoring and you should choose one method or the other:
+ +The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh
sets up the monitoring for the OAM domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OAM domain. It also deploys the WebLogic Server Grafana dashboard.
For usage details execute ./setup-monitoring.sh -h
.
Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml
and change the domainUID
, domainNamespace
, and weblogicCredentialsSecretName
to correspond to your deployment. Also change wlsMonitoringExporterTooamCluster
, wlsMonitoringExporterTopolicyCluster
, exposeMonitoringNodePort
to true
. For example:
version: create-accessdomain-monitoring-inputs-v1
+
+# Unique ID identifying your domain.
+# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster.
+domainUID: accessdomain
+
+# Name of the domain namespace
+domainNamespace: oamns
+
+# Boolean value indicating whether to install kube-prometheus-stack
+setupKubePrometheusStack: true
+
+# Additional parameters for helm install kube-prometheus-stack
+# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters
+# Sample :
+# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false
+additionalParamForKubePrometheusStack:
+
+# Name of the monitoring namespace
+monitoringNamespace: monitoring
+
+# Name of the Admin Server
+adminServerName: AdminServer
+#
+# Port number for admin server
+adminServerPort: 7001
+
+# Cluster name
+oamClusterName: oam_cluster
+
+# Port number for managed server
+oamManagedServerPort: 14100
+
+# WebLogic Monitoring Exporter to Cluster
+wlsMonitoringExporterTooamCluster: true
+
+# Cluster name
+policyClusterName: policy_cluster
+
+# Port number for managed server
+policyManagedServerPort: 15100
+
+# WebLogic Monitoring Exporter to Cluster
+wlsMonitoringExporterTopolicyCluster: true
+
+
+# Boolean to indicate if the adminNodePort will be exposed
+exposeMonitoringNodePort: true
+
+# NodePort to expose Prometheus
+prometheusNodePort: 32101
+
+# NodePort to expose Grafana
+grafanaNodePort: 32100
+
+# NodePort to expose Alertmanager
+alertmanagerNodePort: 32102
+
+# Name of the Kubernetes secret for the Admin Server's username and password
+weblogicCredentialsSecretName: accessdomain-credentials
+
Note: If your cluster does not have access to the internet to pull external images, such as grafana or prometheus, you must load the images in a local container registry. You must then set additionalParamForKubePrometheusStack
to set the location of the image in your local container registry, for example:
# Additional parameters for helm install kube-prometheus-stack
+# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters
+# Sample :
+# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false
+additionalParamForKubePrometheusStack: --set grafana.image.repository=container-registry.example.com/grafana --set grafana.image.tag=8.3.4
+
Run the following command to setup monitoring.
+$ cd $WORKDIR/kubernetes/monitoring-service
+$ ./setup-monitoring.sh -i monitoring-inputs.yaml
+
The output should be similar to the following:
+Monitoring setup in monitoring in progress
+
+node/worker-node1 not labeled
+node/worker-node2 not labeled
+node/master-node not labeled
+Setup prometheus-community/kube-prometheus-stack started
+"prometheus-community" has been added to your repositories
+Hang tight while we grab the latest from your chart repositories...
+ ...Successfully got an update from the "stable" chart repository
+ ...Successfully got an update from the "prometheus" chart repository
+ ...Successfully got an update from the "prometheus-community" chart repository
+ ...Successfully got an update from the "appscode" chart repository
+Update Complete. ⎈ Happy Helming!⎈
+Setup prometheus-community/kube-prometheus-stack in progress
+W0320 <DATE> 19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
+W0320 <DATE> 19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
+W0320 <DATE> 19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
+..
+W0320 <DATE> 19121 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
+NAME: monitoring
+LAST DEPLOYED: <DATE>
+NAMESPACE: monitoring
+STATUS: deployed
+REVISION: 1
+NOTES:
+kube-prometheus-stack has been installed. Check its status by running:
+ kubectl --namespace monitoring get pods -l "release=monitoring"
+
+Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
+Setup prometheus-community/kube-prometheus-stack completed
+Deploy WebLogic Monitoring Exporter started
+Deploying WebLogic Monitoring Exporter with domainNamespace[oamns], domainUID[accessdomain], adminServerPodName[accessdomain-adminserver]
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+100 655 100 655 0 0 1564 0 --:--:-- --:--:-- --:--:-- 1566
+100 2196k 100 2196k 0 0 2025k 0 0:00:01 0:00:01 --:--:-- 5951k
+created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir
+created /tmp/ci-EHhB7bP847
+/tmp/ci-EHhB7bP847 $WORKDIR/kubernetes/monitoring-service
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service
+created /tmp/ci-e7wPrlLlud
+14:26
+/tmp/ci-e7wPrlLlud $WORKDIR/kubernetes/monitoring-service
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service
+created /tmp/ci-U38XXs6d06
+/tmp/ci-U38XXs6d06 $WORKDIR/kubernetes/monitoring-service
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service
+
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+
+Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ...
+Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-adminserver.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .>
+Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+14:27
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-oam.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-policy.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Disconnected from weblogic server: AdminServer
+
+
+Exiting WebLogic Scripting Tool.
+
+<DATE> <Warning> <JNDI> <BEA-050001> <WLContext.close() was called in a different thread than the one in which it was created.>
+14:27
+Deploy WebLogic Monitoring Exporter completed
+secret/basic-auth created
+servicemonitor.monitoring.coreos.com/wls-exporter created
+Deploying WebLogic Server Grafana Dashboard....
+{"id":25,"slug":"weblogic-server-dashboard","status":"success","uid":"5yUwzbZWz","url":"/d/5yUwzbZWz/weblogic-server-dashboard","version":1}
+Deployed WebLogic Server Grafana Dashboard successfully
+
+Grafana is available at NodePort: 32100
+Prometheus is available at NodePort: 32101
+Altermanager is available at NodePort: 32102
+==============================================================
+
After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.
+Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery
Click on serviceMonitor/oamns/wls-exporter/0
and then show more. Verify all the targets are mentioned.
Note : It may take several minutes for serviceMonitor/oamns/wls-exporter/0
to appear, so refresh the page until it does.
Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100
and login with admin/admin
. Change your password when prompted.
In the Dashboards
panel, click on WebLogic Server Dashboard
. The dashboard for your OAM domain should be displayed. If it is not displayed, click the Search
icon in the left hand menu and search for WebLogic Server Dashboard
.
To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh
script. For usage details execute ./delete-monitoring.sh -h
.
To uninstall run the following command:
+For example:
+$ cd $WORKDIR/kubernetes/monitoring-service
+$ ./delete-monitoring.sh -i monitoring-inputs.yaml
+$ kubectl delete namespace monitoring
+
Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OAM domain.
+Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux
. To check if your nodes are labelled, run the following:
$ kubectl get nodes --show-labels
+
If the nodes are labelled the output will look similar to the following:
+NAME STATUS ROLES AGE VERSION LABELS
+worker-node1 Ready <none> 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux
+worker-node2 Ready <none> 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux
+master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master=
+
If the nodes are not labelled, run the following command:
+$ kubectl label nodes --all kubernetes.io/os=linux
+
Clone Prometheus by running the following commands:
+$ cd $WORKDIR/kubernetes/monitoring-service
+$ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0
+
Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.
+If your cluster does not have access to the internet to pull external images, such as grafana, you must load the images in a local container registry.
+For grafana, edit the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/grafana-deployment.yaml
and change image: grafana/grafana:7.3.4
to your local container registry image location, for example image: container-registry.example.com/grafana/grafana:8.3.4
.
For any other images check the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/*deployment.yaml
files.
Run the following command to create the namespace and custom resource definitions:
+$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus
+$ kubectl create -f manifests/setup
+
The output will look similar to the following:
+namespace/monitoring created
+customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created
+clusterrole.rbac.authorization.k8s.io/prometheus-operator created
+clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created
+deployment.apps/prometheus-operator created
+service/prometheus-operator created
+serviceaccount/prometheus-operator created
+
Run the following command to created the rest of the resources:
+$ kubectl create -f manifests/
+
The output will look similar to the following:
+alertmanager.monitoring.coreos.com/main created
+networkpolicy.networking.k8s.io/alertmanager-main created
+poddisruptionbudget.policy/alertmanager-main created
+prometheusrule.monitoring.coreos.com/alertmanager-main-rules created
+secret/alertmanager-main created
+service/alertmanager-main created
+serviceaccount/alertmanager-main created
+servicemonitor.monitoring.coreos.com/alertmanager-main created
+clusterrole.rbac.authorization.k8s.io/blackbox-exporter created
+clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created
+configmap/blackbox-exporter-configuration created
+deployment.apps/blackbox-exporter created
+networkpolicy.networking.k8s.io/blackbox-exporter created
+service/blackbox-exporter created
+serviceaccount/blackbox-exporter created
+servicemonitor.monitoring.coreos.com/blackbox-exporter created
+secret/grafana-config created
+secret/grafana-datasources created
+configmap/grafana-dashboard-alertmanager-overview created
+configmap/grafana-dashboard-apiserver created
+configmap/grafana-dashboard-cluster-total created
+configmap/grafana-dashboard-controller-manager created
+configmap/grafana-dashboard-grafana-overview created
+configmap/grafana-dashboard-k8s-resources-cluster created
+configmap/grafana-dashboard-k8s-resources-namespace created
+configmap/grafana-dashboard-k8s-resources-node created
+configmap/grafana-dashboard-k8s-resources-pod created
+configmap/grafana-dashboard-k8s-resources-workload created
+configmap/grafana-dashboard-k8s-resources-workloads-namespace created
+configmap/grafana-dashboard-kubelet created
+configmap/grafana-dashboard-namespace-by-pod created
+configmap/grafana-dashboard-namespace-by-workload created
+configmap/grafana-dashboard-node-cluster-rsrc-use created
+configmap/grafana-dashboard-node-rsrc-use created
+configmap/grafana-dashboard-nodes-darwin created
+configmap/grafana-dashboard-nodes created
+configmap/grafana-dashboard-persistentvolumesusage created
+configmap/grafana-dashboard-pod-total created
+configmap/grafana-dashboard-prometheus-remote-write created
+configmap/grafana-dashboard-prometheus created
+configmap/grafana-dashboard-proxy created
+configmap/grafana-dashboard-scheduler created
+configmap/grafana-dashboard-workload-total created
+configmap/grafana-dashboards created
+deployment.apps/grafana created
+networkpolicy.networking.k8s.io/grafana created
+prometheusrule.monitoring.coreos.com/grafana-rules created
+service/grafana created
+serviceaccount/grafana created
+servicemonitor.monitoring.coreos.com/grafana created
+prometheusrule.monitoring.coreos.com/kube-prometheus-rules created
+clusterrole.rbac.authorization.k8s.io/kube-state-metrics created
+clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created
+deployment.apps/kube-state-metrics created
+networkpolicy.networking.k8s.io/kube-state-metrics created
+prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created
+service/kube-state-metrics created
+serviceaccount/kube-state-metrics created
+servicemonitor.monitoring.coreos.com/kube-state-metrics created
+prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created
+servicemonitor.monitoring.coreos.com/kube-apiserver created
+servicemonitor.monitoring.coreos.com/coredns created
+servicemonitor.monitoring.coreos.com/kube-controller-manager created
+servicemonitor.monitoring.coreos.com/kube-scheduler created
+servicemonitor.monitoring.coreos.com/kubelet created
+clusterrole.rbac.authorization.k8s.io/node-exporter created
+clusterrolebinding.rbac.authorization.k8s.io/node-exporter created
+daemonset.apps/node-exporter created
+networkpolicy.networking.k8s.io/node-exporter created
+prometheusrule.monitoring.coreos.com/node-exporter-rules created
+service/node-exporter created
+serviceaccount/node-exporter created
+servicemonitor.monitoring.coreos.com/node-exporter created
+clusterrole.rbac.authorization.k8s.io/prometheus-k8s created
+clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+networkpolicy.networking.k8s.io/prometheus-k8s created
+poddisruptionbudget.policy/prometheus-k8s created
+prometheus.monitoring.coreos.com/k8s created
+prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+role.rbac.authorization.k8s.io/prometheus-k8s-config created
+role.rbac.authorization.k8s.io/prometheus-k8s created
+role.rbac.authorization.k8s.io/prometheus-k8s created
+role.rbac.authorization.k8s.io/prometheus-k8s created
+service/prometheus-k8s created
+serviceaccount/prometheus-k8s created
+servicemonitor.monitoring.coreos.com/prometheus-k8s created
+apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
+clusterrole.rbac.authorization.k8s.io/prometheus-adapter created
+clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
+clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created
+clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created
+clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created
+configmap/adapter-config created
+deployment.apps/prometheus-adapter created
+networkpolicy.networking.k8s.io/prometheus-adapter created
+poddisruptionbudget.policy/prometheus-adapter created
+rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created
+service/prometheus-adapter created
+serviceaccount/prometheus-adapter created
+servicemonitor.monitoring.coreos.com/prometheus-adapter created
+clusterrole.rbac.authorization.k8s.io/prometheus-operator created
+clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created
+deployment.apps/prometheus-operator created
+networkpolicy.networking.k8s.io/prometheus-operator created
+prometheusrule.monitoring.coreos.com/prometheus-operator-rules created
+service/prometheus-operator created
+serviceaccount/prometheus-operator created
+servicemonitor.monitoring.coreos.com/prometheus-operator created
+
Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:
+$ kubectl patch svc grafana -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32100 }]'
+
+$ kubectl patch svc prometheus-k8s -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32101 }]'
+
+$ kubectl patch svc alertmanager-main -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32102 }]'
+
Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.
+The output will look similar to the following:
+service/grafana patched
+service/prometheus-k8s patched
+service/alertmanager-main patched
+
Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:
+$ kubectl get pods,services -o wide -n monitoring
+
The output should look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/alertmanager-main-0 2/2 Running 0 67s 10.244.1.7 worker-node1 <none> <none>
+pod/alertmanager-main-1 2/2 Running 0 67s 10.244.2.26 worker-node2 <none> <none>
+pod/alertmanager-main-2 2/2 Running 0 67s 10.244.1.8 worker-node1 <none> <none>
+pod/grafana-f8cd57fcf-tmlqt 1/1 Running 0 65s 10.244.2.28 worker-node2 <none> <none>
+pod/kube-state-metrics-587bfd4f97-l8knh 3/3 Running 0 65s 10.244.1.9 worker-node1 <none> <none>
+pod/node-exporter-2ztpd 2/2 Running 0 65s 10.247.95.26 worker-node1 <none> <none>
+pod/node-exporter-92sxb 2/2 Running 0 65s 10.250.40.59 worker-node2 <none> <none>
+pod/node-exporter-d77tl 2/2 Running 0 65s 10.196.54.36 master-node <none> <none>
+pod/prometheus-adapter-69b8496df6-6gqrz 1/1 Running 0 65s 10.244.2.29 worker-node2 <none> <none>
+pod/prometheus-k8s-0 2/2 Running 1 66s 10.244.2.27 worker-node2 <none> <none>
+pod/prometheus-k8s-1 2/2 Running 1 66s 10.244.1.10 worker-node1 <none> <none>
+pod/prometheus-operator-7649c7454f-9p747 2/2 Running 0 2m 10.244.2.25 worker-node2 <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/alertmanager-main NodePort 10.104.92.62 <none> 9093:32102/TCP 67s alertmanager=main,app=alertmanager
+service/alertmanager-operated ClusterIP None <none> 9093/TCP,9094/TCP,9094/UDP 67s app=alertmanager
+service/grafana NodePort 10.100.171.3 <none> 3000:32100/TCP 66s app=grafana
+service/kube-state-metrics ClusterIP None <none> 8443/TCP,9443/TCP 66s app.kubernetes.io/name=kube-state-metrics
+service/node-exporter ClusterIP None <none> 9100/TCP 66s app.kubernetes.io/name=node-exporter
+service/prometheus-adapter ClusterIP 10.109.248.92 <none> 443/TCP 66s name=prometheus-adapter
+service/prometheus-k8s NodePort 10.98.212.247 <none> 9090:32101/TCP 66s app=prometheus,prometheus=k8s
+service/prometheus-operated ClusterIP None <none> 9090/TCP 66s app=prometheus
+service/prometheus-operator ClusterIP None <none> 8443/TCP 2m1s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator
+
Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war
package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain. Set the below environment values and run the script get-wls-exporter.sh
to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy
:
$ cd $WORKDIR/kubernetes/monitoring-service/scripts
+$ export adminServerPort=7001
+$ export wlsMonitoringExporterTopolicyCluster=true
+$ export policyManagedServerPort=15100
+$ export wlsMonitoringExporterTooamCluster=true
+$ export oamManagedServerPort=14100
+$ sh get-wls-exporter.sh
+
The output will look similar to the following:
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+100 655 100 655 0 0 1107 0 --:--:-- --:--:-- --:--:-- 1108
+100 2196k 100 2196k 0 0 1787k 0 0:00:01 0:00:01 --:--:-- 9248k
+created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir
+domainNamespace is empty, setting to default oamns
+domainUID is empty, setting to default accessdomain
+weblogicCredentialsSecretName is empty, setting to default "accessdomain-domain-credentials"
+adminServerName is empty, setting to default "AdminServer"
+oamClusterName is empty, setting to default "oam_cluster"
+policyClusterName is empty, setting to default "policy_cluster"
+created /tmp/ci-Bu74rCBxwu
+/tmp/ci-Bu74rCBxwu $WORKDIR/kubernetes/monitoring-service/scripts
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service/scripts
+created /tmp/ci-RQv3rLbLsX
+/tmp/ci-RQv3rLbLsX $WORKDIR/kubernetes/monitoring-service/scripts
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service/scripts
+created /tmp/ci-DWIYlocP5e
+/tmp/ci-DWIYlocP5e $WORKDIR/kubernetes/monitoring-service/scripts
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service/scripts
+
Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain:
+$ cd $WORKDIR/kubernetes/monitoring-service/scripts
+$ kubectl cp wls-exporter-deploy <domain_namespace>/<domain_uid>-adminserver:/u01/oracle
+$ kubectl cp deploy-weblogic-monitoring-exporter.py <domain_namespace>/<domain_uid>-adminserver:/u01/oracle/wls-exporter-deploy
+$ kubectl exec -it -n <domain_namespace> <domain_uid>-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName <domain_uid> -adminServerName AdminServer -adminURL <domain_uid>-adminserver:7001 -username weblogic -password <password> -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true
+
For example:
+$ cd $WORKDIR/kubernetes/monitoring-service/scripts
+$ kubectl cp wls-exporter-deploy oamns/accessdomain-adminserver:/u01/oracle
+$ kubectl cp deploy-weblogic-monitoring-exporter.py oamns/accessdomain-adminserver:/u01/oracle/wls-exporter-deploy
+$ kubectl exec -it -n oamns accessdomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName accessdomain -adminServerName AdminServer -adminURL accessdomain-adminserver:7001 -username weblogic -password <password> -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true
+
The output will look similar to the following:
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+
+Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ...
+Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .>
+..Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-adminserver.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-oam.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-policy.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Disconnected from weblogic server: AdminServer
+
+Exiting WebLogic Scripting Tool.
+
+<DATE> <Warning> <JNDI> <BEA-050001> <WLContext.close() was called in a different thread than the one in which it was created.>
+
Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.
+The exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml
has basicAuth with credentials as username: weblogic
and password: <password>
in base64 encoded.
Run the following command to get the base64 encoded version of the weblogic password:
+$ echo -n "<password>" | base64
+
The output will look similar to the following:
+V2VsY29tZTE=
+
Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml
and change the password:
value to the value returned above. Also change the namespace:
and weblogic.domainName:
values to match your OAM namespace and domain name:
apiVersion: v1
+kind: Secret
+metadata:
+ name: basic-auth
+ namespace: oamns
+data:
+ password: V2VsY29tZTE=
+ user: d2VibG9naWM=
+type: Opaque
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: wls-exporter
+ namespace: oamns
+ labels:
+ k8s-app: wls-exporter
+ release: monitoring
+spec:
+ namespaceSelector:
+ matchNames:
+ - oamns
+ selector:
+ matchLabels:
+ weblogic.domainName: accessdomain
+ endpoints:
+ - basicAuth:
+ password:
+ name: basic-auth
+ key: password
+ username:
+ name: basic-auth
+ key: user
+ port: default
+ relabelings:
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ interval: 10s
+ honorLabels: true
+ path: /wls-exporter/metrics
+
Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml
and change the namespace
to match your OAM namespace. For example:
apiVersion: rbac.authorization.k8s.io/v1
+items:
+- apiVersion: rbac.authorization.k8s.io/v1
+ kind: Role
+ metadata:
+ name: prometheus-k8s
+ namespace: oamns
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+kind: RoleList
+
Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml
and change the namespace
to match your OAM namespace. For example:
apiVersion: rbac.authorization.k8s.io/v1
+items:
+- apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: prometheus-k8s
+ namespace: oamns
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: prometheus-k8s
+ subjects:
+ - kind: ServiceAccount
+ name: prometheus-k8s
+ namespace: monitoring
+kind: RoleBindingList
+
Run the following command to enable Prometheus:
+$ cd $WORKDIR/kubernetes/monitoring-service/manifests
+$ kubectl apply -f .
+
The output will look similar to the following:
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+role.rbac.authorization.k8s.io/prometheus-k8s created
+secret/basic-auth created
+servicemonitor.monitoring.coreos.com/wls-exporter created
+
After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.
+Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery
Click on oamns/wls-exporter/0
and then show more. Verify all the targets are mentioned.
Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100
and login with admin/admin
. Change your password when prompted.
Import the Grafana dashboard by navigating on the left hand menu to Create > Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json
and paste. Then click Load and Import. The dashboard should be displayed in the Dashboards panel.
To clean up a manual installation:
+Run the following commands:
+$ cd $WORKDIR/kubernetes/monitoring-service/manifests/
+$ kubectl delete -f .
+
Delete the deployments:
+$ cd $WORKDIR/kubernetes/monitoring-service/scripts/
+$ kubectl cp undeploy-weblogic-monitoring-exporter.py <domain_namespace>/<domain_uid>-adminserver:/u01/oracle/wls-exporter-deploy
+$ kubectl exec -it -n <domain_namespace> <domain_uid>-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName <domain_uid> -adminServerName AdminServer -adminURL <domain_uid>-adminserver:7001 -username weblogic -password <password> -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true
+
Delete Prometheus:
+$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus
+$ kubectl delete -f manifests
+
To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain.
+Check to see if the helper pod exists by running:
+$ kubectl get pods -n <domain_namespace> | grep helper
+
For example:
+$ kubectl get pods -n oamns | grep helper
+
The output should look similar to the following:
+helper 1/1 Running 0 26h
+
If the helper pod doesn’t exist then see Step 1 in Prepare your environment to create it.
+Run the following command to start a bash shell in the helper pod:
+$ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
+
For example:
+$ kubectl exec -it helper -n oamns -- /bin/bash
+
This will take you into a bash shell in the running helper pod:
+[oracle@helper ~]$
+
Connect to WLST using the following command:
+$ cd $ORACLE_HOME/oracle_common/common/bin
+$ ./wlst.sh
+
The output will look similar to the following:
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away.
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+
+wls:/offline>
+
To access t3 for the Administration Server connect as follows:
+wls:/offline> connect('weblogic','<password>','t3://accessdomain-adminserver:7001')
+
The output will look similar to the following:
+Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ...
+Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+wls:/accessdomain/serverConfig/>
+
Or to access t3 for the OAM Cluster service, connect as follows:
+connect('weblogic','<password>','t3://accessdomain-cluster-oam-cluster:14100')
+
The output will look similar to the following:
+Connecting to t3://accessdomain-cluster-oam-cluster:14100 with userid weblogic ...
+Successfully connected to managed Server "oam_server1" that belongs to domain "accessdomain".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+wls:/accessdomain/serverConfig/>
+
For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.
+wls:/accessdomain/serverConfig/> cd('/Servers')
+wls:/accessdomain/serverConfig/Servers> ls()
+
+dr-- AdminServer
+dr-- oam_policy_mgr1
+dr-- oam_policy_mgr2
+dr-- oam_policy_mgr3
+dr-- oam_policy_mgr4
+dr-- oam_policy_mgr5
+dr-- oam_server1
+dr-- oam_server2
+dr-- oam_server3
+dr-- oam_server4
+dr-- oam_server5
+
+wls:/accessdomain/serverConfig/Servers>
+
Connect to the Administration Server and run the following:
+wls:/accessdomain/serverConfig/> domainRuntime()
+Location changed to domainRuntime tree. This is a read-only tree
+with DomainMBean as the root MBean.
+For more help, use help('domainRuntime')
+
+wls:/accessdomain/domainRuntime/>
+
+wls:/accessdomain/domainRuntime/> listLoggers(pattern="oracle.oam.*",target="oam_server1")
+------------------------------------------+-----------------
+Logger | Level
+------------------------------------------+-----------------
+oracle.oam | <Inherited>
+oracle.oam.admin.foundation.configuration | <Inherited>
+oracle.oam.admin.service.config | <Inherited>
+oracle.oam.agent | <Inherited>
+oracle.oam.agent-default | <Inherited>
+oracle.oam.audit | <Inherited>
+oracle.oam.binding | <Inherited>
+oracle.oam.certvalidation | <Inherited>
+oracle.oam.certvalidation.mbeans | <Inherited>
+oracle.oam.common.healthcheck | <Inherited>
+oracle.oam.common.runtimeent | <Inherited>
+oracle.oam.commonutil | <Inherited>
+oracle.oam.config | <Inherited>
+oracle.oam.controller | <Inherited>
+oracle.oam.default | <Inherited>
+oracle.oam.diagnostic | <Inherited>
+oracle.oam.engine.authn | <Inherited>
+oracle.oam.engine.authz | <Inherited>
+oracle.oam.engine.policy | <Inherited>
+oracle.oam.engine.ptmetadata | <Inherited>
+oracle.oam.engine.session | <Inherited>
+oracle.oam.engine.sso | <Inherited>
+oracle.oam.esso | <Inherited>
+oracle.oam.extensibility.lifecycle | <Inherited>
+oracle.oam.foundation.access | <Inherited>
+oracle.oam.idm | <Inherited>
+oracle.oam.install | <Inherited>
+oracle.oam.install.bootstrap | <Inherited>
+oracle.oam.install.mbeans | <Inherited>
+oracle.oam.ipf.rest.api | <Inherited>
+oracle.oam.oauth | <Inherited>
+oracle.oam.plugin | <Inherited>
+oracle.oam.proxy.oam | <Inherited>
+oracle.oam.proxy.oam.workmanager | <Inherited>
+oracle.oam.proxy.opensso | <Inherited>
+oracle.oam.pswd.service.provider | <Inherited>
+oracle.oam.replication | <Inherited>
+oracle.oam.user.identity.provider | <Inherited>
+wls:/accessdomain/domainRuntime/>
+
Set the log level to TRACE:32
:
wls:/accessdomain/domainRuntime/> setLogLevel(target='oam_server1',logger='oracle.oam',level='TRACE:32',persist="1",addLogger=1)
+wls:/accessdomain/domainRuntime/>
+
+wls:/accessdomain/domainRuntime/> listLoggers(pattern="oracle.oam.*",target="oam_server1")
+------------------------------------------+-----------------
+Logger | Level
+------------------------------------------+-----------------
+oracle.oam | TRACE:32
+oracle.oam.admin.foundation.configuration | <Inherited>
+oracle.oam.admin.service.config | <Inherited>
+oracle.oam.agent | <Inherited>
+oracle.oam.agent-default | <Inherited>
+oracle.oam.audit | <Inherited>
+oracle.oam.binding | <Inherited>
+oracle.oam.certvalidation | <Inherited>
+oracle.oam.certvalidation.mbeans | <Inherited>
+oracle.oam.common.healthcheck | <Inherited>
+oracle.oam.common.runtimeent | <Inherited>
+oracle.oam.commonutil | <Inherited>
+oracle.oam.config | <Inherited>
+oracle.oam.controller | <Inherited>
+oracle.oam.default | <Inherited>
+oracle.oam.diagnostic | <Inherited>
+oracle.oam.engine.authn | <Inherited>
+oracle.oam.engine.authz | <Inherited>
+oracle.oam.engine.policy | <Inherited>
+oracle.oam.engine.ptmetadata | <Inherited>
+oracle.oam.engine.session | <Inherited>
+oracle.oam.engine.sso | <Inherited>
+oracle.oam.esso | <Inherited>
+oracle.oam.extensibility.lifecycle | <Inherited>
+oracle.oam.foundation.access | <Inherited>
+oracle.oam.idm | <Inherited>
+oracle.oam.install | <Inherited>
+oracle.oam.install.bootstrap | <Inherited>
+oracle.oam.install.mbeans | <Inherited>
+oracle.oam.ipf.rest.api | <Inherited>
+oracle.oam.oauth | <Inherited>
+oracle.oam.plugin | <Inherited>
+oracle.oam.proxy.oam | <Inherited>
+oracle.oam.proxy.oam.workmanager | <Inherited>
+oracle.oam.proxy.opensso | <Inherited>
+oracle.oam.pswd.service.provider | <Inherited>
+oracle.oam.replication | <Inherited>
+oracle.oam.user.identity.provider | <Inherited>
+wls:/accessdomain/domainRuntime/>
+
Verify that TRACE:32
log level is set by connecting to the Administration Server and viewing the logs:
$ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash
+[oracle@accessdomain-adminserver oracle]$
+[oracle@accessdomain-adminserver oracle]$ cd /u01/oracle/user_projects/domains/accessdomain/servers/oam_server1/logs
+[oracle@accessdomain-adminserver logs]$ tail oam_server1-diagnostic.log
+[<DATE>] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.observable.ObservableConfigStore$StoreWatcher] [SRC_METHOD: run] Start of run before start of detection at 1,635,848,774,793. Detector: oracle.security.am.admin.config.util.observable.DbStoreChangeDetector:Database configuration store:DSN:jdbc/oamds. Monitor: { StoreMonitor: { disabled: 'false' } }
+[<DATE>] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG_HISTORY not specified
+[<DATE>] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG not specified
+[<DATE>] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT version from IDM_OBJECT_STORE where id = ? and version = (select max(version) from IDM_OBJECT_STORE where id = ?)
+[<DATE>] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: <anonymous>] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT version from IDM_OBJECT_STORE }:4
+
By default the SSL port is not enabled for the Administration Server or OAM Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
and navigate to Lock & Edit -> Environment ->Servers -> server_name ->Configuration -> General -> SSL Listen Port Enabled -> Provide SSL Port ( For Administration Server: 7002 and for OAM Managed Server (oam_server1): 14101) - > Save -> Activate Changes.
Note: If configuring the OAM Managed Servers for SSL you must enable SSL on the same port for all servers (oam_server1 through oam_server5)
+Create a myscripts
directory as follows:
$ cd $WORKDIR/kubernetes/
+$ mkdir myscripts
+$ cd myscripts
+
For example:
+$ cd $WORKDIR/kubernetes/
+$ mkdir myscripts
+$ cd myscripts
+
Create a sample yaml template file in the myscripts
directory called <domain_uid>-adminserver-ssl.yaml
to create a Kubernetes service for the Administration Server:
Note: Update the domainName
, domainUID
and namespace
based on your environment. For example:
apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ serviceType: SERVER
+ weblogic.domainName: accessdomain
+ weblogic.domainUID: accessdomain
+ weblogic.resourceVersion: domain-v2
+ weblogic.serverName: AdminServer
+ name: accessdomain-adminserverssl
+ namespace: oamns
+spec:
+ clusterIP: None
+ ports:
+ - name: default
+ port: 7002
+ protocol: TCP
+ targetPort: 7002
+ selector:
+ weblogic.createdByOperator: "true"
+ weblogic.domainUID: accessdomain
+ weblogic.serverName: AdminServer
+ type: ClusterIP
+
and the following sample yaml template file <domain_uid>-oamcluster-ssl.yaml
for the OAM Managed Server:
apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ serviceType: SERVER
+ weblogic.domainName: accessdomain
+ weblogic.domainUID: accessdomain
+ weblogic.resourceVersion: domain-v2
+ name: accessdomain-oamcluster-ssl
+ namespace: oamns
+spec:
+ clusterIP: None
+ ports:
+ - name: default
+ port: 14101
+ protocol: TCP
+ targetPort: 14101
+ selector:
+ weblogic.clusterName: oam_cluster
+ weblogic.createdByOperator: "true"
+ weblogic.domainUID: accessdomain
+ type: ClusterIP
+
Apply the template using the following command for the AdminServer:
+$ kubectl apply -f <domain_uid>-adminserver-ssl.yaml
+
For example:
+$ kubectl apply -f accessdomain-adminserver-ssl.yaml
+service/accessdomain-adminserverssl created
+
and using the following command for the OAM Managed Server:
+$ kubectl apply -f <domain_uid>-oamcluster-ssl.yaml
+
For example:
+$ kubectl apply -f accessdomain-oamcluster-ssl.yaml
+service/accessdomain-oamcluster-ssl created
+
Validate that the Kubernetes Services to access SSL ports are created successfully:
+$ kubectl get svc -n <domain_namespace> |grep ssl
+
For example:
+$ kubectl get svc -n oamns |grep ssl
+
The output will look similar to the following:
+accessdomain-adminserverssl ClusterIP None <none> 7002/TCP 102s
+accessdomain-oamcluster-ssl ClusterIP None <none> 14101/TCP 35s
+
Inside the bash shell of the running helper pod, run the following:
+[oracle@helper bin]$ export WLST_PROPERTIES="-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust"
+[oracle@helper bin]$ cd /u01/oracle/oracle_common/common/bin
+[oracle@helper bin]$ ./wlst.sh
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+wls:/offline>
+
To connect to the Administration Server t3s service:
+wls:/offline> connect('weblogic','<password>','t3s://accessdomain-adminserverssl:7002')
+Connecting to t3s://accessdomain-adminserverssl:7002 with userid weblogic ...
+<<DATE>> <Info> <Security> <BEA-090905> <Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.>
+<<DATE>> <Info> <Security> <BEA-090906> <Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.>
+<<DATE>> <Info> <Security> <BEA-090909> <Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.>
+Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain".
+
+wls:/accessdomain/serverConfig/>
+
To connect to the OAM Managed Server t3s service:
+wls:/offline> connect('weblogic','<password>','t3s://accessdomain-oamcluster-ssl:14101')
+Connecting to t3s://accessdomain-oamcluster-ssl:14101 with userid weblogic ...
+<<DATE>> <Info> <Security> <BEA-090905> <Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.>
+<<DATE>> <Info> <Security> <BEA-090906> <Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.>
+<<DATE>> <Info> <Security> <BEA-090909> <Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.>
+Successfully connected to managed Server "oam_server1" that belongs to domain "accessdomain".
+
This section shows you how to upgrade the WebLogic Kubernetes Operator, the OAM image, the Elasticsearch and Kibana stack, and the Ingress.
+The upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to.
+Please refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.
+ + + + + + + +Instructions on how to update the WebLogic Kubernetes Operator version.
+ + + + + + + + + + + + +Instructions on how to update your OAM Kubernetes cluster with a new OAM container image.
+ + + + + + + + + + + + +Instructions on how to upgrade the ingress.
+ + + + + + + + + + + + +Instructions on how to upgrade Elastic Search and Kibana.
+ + + + + + + + +Choose one of the following options to update your OAM kubernetes cluster to use the new image:
+kubectl edit domain
commandkubectl patch domain
commandIn all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers.
+Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.
+To update the domain with the kubectl edit domain
command, run the following:
$ kubectl edit domain <domainname> -n <namespace>
+
For example:
+$ kubectl edit domain accessdomain -n oamns
+
If using Oracle Container Registry or your own container registry for your OAM container image, update the image
to point at the new image, for example:
domainHomeInImage: false
+image: container-registry.oracle.com/middleware/oam_cpu:<tag>
+imagePullPolicy: IfNotPresent
+
If you are not using a container registry and have loaded the image on each of the master and worker nodes, update the image
to point at the new image:
domainHomeInImage: false
+image: oracle/oam:<tag>
+imagePullPolicy: IfNotPresent
+
Save the file and exit (:wq!)
+To update the domain with the kubectl patch domain
command, run the following:
$ kubectl patch domain <domain> -n <namespace> --type merge -p '{"spec":{"image":"newimage:tag"}}'
+
For example, if using Oracle Container Registry or your own container registry for your OAM container image:
+$ kubectl patch domain accessdomain -n oamns --type merge -p '{"spec":{"image":"container-registry.oracle.com/middleware/oam_cpu:<tag>"}}'
+
For example, if you are not using a container registry and have loaded the image on each of the master and worker nodes:
+$ kubectl patch domain accessdomain -n oamns --type merge -p '{"spec":{"image":"oracle/oam:<tag>"}}'
+
The output will look similar to the following:
+domain.weblogic.oracle/accessdomain patched
+
This section shows how to upgrade the ingress.
+To determine if this step is required for the version you are upgrading to, refer to the Release Notes.
+Download the latest code repository as follows:
+Create a working directory to setup the source code.
+$ mkdir <workdir>
+
For example:
+$ mkdir /scratch/OAMK8Slatest
+
Download the latest OAM deployment scripts from the OAM repository.
+$ cd <workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ cd /scratch/OAMK8Slatest
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleAccessManagement
+
For example:
+$ export WORKDIR=/scratch/OAMK8Slatest/fmw-kubernetes/OracleAccessManagement
+
To upgrade the existing ingress rules, follow the steps below:
+List the existing ingress:
+$ helm list -n oamns
+
The output will look similar to the following:
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+nginx-ingress oamns 1 <DATE> deployed ingress-nginx-4.3.0 1.4.0
+oam-nginx oamns 1 <DATE> deployed ingress-per-domain-0.1.0 1.0
+
Edit the $WORKDIR/kubernetes/charts/ingress-per-domain/values.yaml
and change the domainUID
parameter to match your domainUID, for example domainUID: accessdomain
. For example:
# Load balancer type. Supported values are: NGINX
+type: NGINX
+
+# SSL configuration Type. Supported Values are : NONSSL,SSL
+sslType: SSL
+
+# domainType. Supported values are: oam
+domainType: oam
+
+#WLS domain as backend to the load balancer
+wlsDomain:
+ domainUID: accessdomain
+ adminServerName: AdminServer
+ adminServerPort: 7001
+ adminServerSSLPort:
+ oamClusterName: oam_cluster
+ oamManagedServerPort: 14100
+ oamManagedServerSSLPort:
+ policyClusterName: policy_cluster
+ policyManagedServerPort: 15100
+ policyManagedServerSSLPort:
+
+
+# Host specific values
+hostName:
+ enabled: false
+ admin:
+ runtime:
+
Upgrade the oam-nginx
with the following command:
$ helm upgrade oam-nginx kubernetes/charts/ingress-per-domain/ --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values
+
The output will look similar to the following:
+Release "oam-nginx" has been upgraded. Happy Helming!
+NAME: oam-nginx
+LAST DEPLOYED: <DATE>
+NAMESPACE: oamns
+STATUS: deployed
+REVISION: 2
+TEST SUITE: None
+
List the ingress:
+$ kubectl get ing -n oamns
+
The output will look similar to the following:
+NAME CLASS HOSTS ADDRESS PORTS AGE
+accessdomain-nginx <none> * 10.99.189.61 80 18s
+
Describe the ingress and make sure all the listed paths are accessible:
+$ kubectl describe ing accessdomain-nginx -n oamns
+
The output will look similar to the following:
+Name: accessdomain-nginx
+Labels: app.kubernetes.io/managed-by=Helm
+Namespace: oamns
+Address: 10.99.189.61
+Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
+Rules:
+ Host Path Backends
+ ---- ---- --------
+ *
+ /console accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /consolehelp accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /rreg/rreg accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /em accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /oamconsole accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /dms accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /oam/services/rest accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /iam/admin/config accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /iam/admin/diag accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100)
+ /oam/admin/api accessdomain-adminserver:7001 (10.244.1.224:7001)
+ /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100)
+ /access accessdomain-cluster-policy-cluster:15100 (10.244.1.226:15100)
+ / accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100)
+Annotations: kubernetes.io/ingress.class: nginx
+ meta.helm.sh/release-name: oam-nginx
+ meta.helm.sh/release-namespace: oamns
+ nginx.ingress.kubernetes.io/configuration-snippet:
+ more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL";
+ more_set_input_headers "X-Forwarded-Proto: https";
+ more_set_input_headers "WL-Proxy-SSL: true";
+ nginx.ingress.kubernetes.io/enable-access-log: false
+ nginx.ingress.kubernetes.io/ingress.allow-http: false
+ nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Sync 55s (x2 over 63s) nginx-ingress-controller Scheduled for sync
+
These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.X release family as additional versions are released.
+On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:
+$ mkdir <workdir>/weblogic-kubernetes-operator-4.X.X
+$ cd <workdir>/weblogic-kubernetes-operator-4.X.X
+$ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X
+
For example:
+$ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X
+$ cd /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X
+$ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X
+
This will create the directory <workdir>/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
Run the following helm command to upgrade the operator:
+$ cd <workdir>/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
+$ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace <sample-kubernetes-operator-ns> --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator
+
For example:
+$ cd /scratch/OAMK8S/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
+$ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace opns --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator
+
The output will look similar to the following:
+Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming!
+NAME: weblogic-kubernetes-operator
+LAST DEPLOYED: <DATE>
+NAMESPACE: opns
+STATUS: deployed
+REVISION: 2
+TEST SUITE: None
+
Verify that the operator’s pod and services are running by executing the following command:
+$ kubectl get all -n <sample-kubernetes-operator-ns>
+
For example:
+$ kubectl get all -n opns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+pod/weblogic-operator-b7d6df78c-mfrc4 1/1 Running 0 40s
+pod/weblogic-operator-webhook-7996b8b58b-frtwp 1/1 Running 0 42s
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/weblogic-operator-webhook-svc ClusterIP 10.106.51.57 <none> 8083/TCP,8084/TCP 42s
+
+NAME READY UP-TO-DATE AVAILABLE AGE
+deployment.apps/weblogic-operator 1/1 1 1 6d
+deployment.apps/weblogic-operator-webhook 1/1 1 1 42s
+
+NAME DESIRED CURRENT READY AGE
+replicaset.apps/weblogic-operator-5884685f4f 0 0 0 6d
+replicaset.apps/weblogic-operator-b7d6df78c 1 1 1 40s
+replicaset.apps/weblogic-operator-webhook-7996b8b58b 1 1 1 42s
+
Note: When you upgrade a 3.x WebLogic Kubernetes Operator to 4.x, the upgrade process creates a WebLogic Domain resource conversion webhook deployment, and associated resources in the same namespace. The webhook automatically and transparently upgrades the existing WebLogic Domains from the 3.x schema to the 4.x schema. For more information, see Domain Upgrade in the WebLogic Kubernetes Operator documentation.
+Note: In WebLogic Kubernetes Operator 4.X, changes are made to serverStartPolicy
that affect starting/stopping of the domain. Refer to the serverStartPolicy
entry in the create-domain-inputs.yaml for more information. Also see Domain Life Cycle.
This section shows how to upgrade Elasticsearch and Kibana.
+To determine if this step is required for the version you are upgrading to, refer to the Release Notes.
+From October 22 (22.4.1) onwards, OAM logs should be stored on a centralized Elasticsearch and Kibana stack.
+Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.
+If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below:
+Make sure you have downloaded the latest code repository as per Download the latest code repository
+Edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml
and change all instances of namespace to correspond to your deployment.
Delete the Elasticsearch and Kibana resources using the following command:
+$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml
+
Follow these post install configuration steps.
+Navigate to the following directory:
+$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain
+
Create a setUserOverrides.sh
with the following contents:
DERBY_FLAG=false
+JAVA_OPTIONS="${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true"
+MEM_ARGS="-Xms8192m -Xmx8192m"
+
Copy the setUserOverrides.sh
file to the Administration Server pod:
$ chmod 755 setUserOverrides.sh
+$ kubectl cp setUserOverrides.sh oamns/accessdomain-adminserver:/u01/oracle/user_projects/domains/accessdomain/bin/setUserOverrides.sh
+
Where oamns
is the OAM namespace and accessdomain
is the DOMAIN_NAME/UID
.
Stop the OAM domain using the following command:
+$ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
+
For example:
+$ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
+
The output will look similar to the following:
+domain.weblogic.oracle/accessdomain patched
+
Check that all the pods are stopped:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Terminating 0 27m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h29m
+accessdomain-oam-policy-mgr1 1/1 Terminating 0 24m
+accessdomain-oam-server1 1/1 Terminating 0 24m
+helper 1/1 Running 0 4h44m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 108m
+
The Administration Server pod and Managed Server pods will move to a STATUS of Terminating
. After a few minutes, run the command again and the pods should have disappeared:
NAME READY STATUS RESTARTS AGE
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m
+helper 1/1 Running 0 4h45m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 109m
+
Start the domain using the following command:
+$ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
+
For example:
+$ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
+
Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m
+accessdomain-introspector-mckp2 1/1 Running 0 8s
+helper 1/1 Running 0 4h46m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 110m
+
The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY
status 1/1
:
NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 5m38s
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h37m
+accessdomain-oam-policy-mgr1 1/1 Running 0 2m51s
+accessdomain-oam-server1 1/1 Running 0 2m50s
+helper 1/1 Running 0 4h52m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 116m
+
Exclude all Oracle Access Management (OAM) clusters (including Policy Manager and OAM runtime server) from the default WebLogic Server 12c coherence cluster by using the WebLogic Server Administration Console.
+From 12.2.1.3.0 onwards, OAM server-side session management uses the database and does not require coherence cluster to be established. In some environments, warnings and errors are observed due to default coherence cluster initialized by WebLogic. To avoid or fix these errors, exclude all of the OAM clusters from default WebLogic Server coherence cluster using the following steps:
+https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
.For production environments, the following WebLogic Server tuning parameters must be set:
+https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
.accessdomain
is your domain_UID
.https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
.https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em
virtualize
property with value true
and click OK.For the above changes to take effect, you must restart the OAM domain:
+Stop the OAM domain using the following command:
+$ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
+
For example:
+$ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
+
The output will look similar to the following:
+domain.weblogic.oracle/accessdomain patched
+
Check that all the pods are stopped:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Terminating 0 27m
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h29m
+accessdomain-oam-policy-mgr1 1/1 Terminating 0 24m
+accessdomain-oam-server1 1/1 Terminating 0 24m
+helper 1/1 Running 0 4h44m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 108m
+
The Administration Server pod and Managed Server pods will move to a STATUS of Terminating
. After a few minutes, run the command again and the pods should have disappeared:
NAME READY STATUS RESTARTS AGE
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m
+helper 1/1 Running 0 4h45m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 109m
+
Start the domain using the following command:
+$ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
+
For example:
+$ kubectl -n oamns patch domains accessdomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
+
Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m
+accessdomain-introspector-mckp2 1/1 Running 0 8s
+helper 1/1 Running 0 4h46m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 110m
+
The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY
status 1/1
:
NAME READY STATUS RESTARTS AGE
+accessdomain-adminserver 1/1 Running 0 5m38s
+accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h37m
+accessdomain-oam-policy-mgr1 1/1 Running 0 2m51s
+accessdomain-oam-server1 1/1 Running 0 2m50s
+helper 1/1 Running 0 4h52m
+nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 116m
+
To prepare for Oracle Access Management deployment in a Kubernetes environment, complete the following steps:
+Preparing the environment for domain creation
+a. Creating Kubernetes secrets for the domain and RCU
+b. Create a Kubernetes persistent volume and persistent volume claim
+As per the Prerequisites a Kubernetes cluster should have already been configured.
+Check that all the nodes in the Kubernetes cluster are running.
+Run the following command on the master node to check the cluster and worker nodes are running:
+$ kubectl get nodes,pods -n kube-system
+
The output will look similar to the following:
+NAME STATUS ROLES AGE VERSION
+node/worker-node1 Ready <none> 17h v1.26.6+1.el8
+node/worker-node2 Ready <none> 17h v1.26.6+1.el8
+node/master-node Ready control-plane,master 23h v1.26.6+1.el8
+
+NAME READY STATUS RESTARTS AGE
+pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h
+pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h
+pod/etcd-master 1/1 Running 0 21h
+pod/kube-apiserver-master-node 1/1 Running 0 21h
+pod/kube-controller-manager-master-node 1/1 Running 0 21h
+pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h
+pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h
+pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h
+pod/kube-proxy-2kxv2 1/1 Running 0 17h
+pod/kube-proxy-82vvj 1/1 Running 0 17h
+pod/kube-proxy-nrgw9 1/1 Running 0 23h
+pod/kube-scheduler-master 1/1 Running 0 21h
+
The OAM Kubernetes deployment requires access to an OAM container image. The image can be obtained in the following ways:
+The prebuilt OAM October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.
+Note: Before using this image you must login to Oracle Container Registry, navigate to Middleware
> oam_cpu
and accept the license agreement.
You can use this image in the following ways:
+You can build your own OAM container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OAM container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.
+You can use an image built with WebLogic Image Tool in the following ways:
+Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.
+OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OAM domains, you need to set up the deployment scripts on the master node as below:
+Create a working directory to setup the source code.
+$ mkdir <workdir>
+
For example:
+$ mkdir /scratch/OAMK8S
+
Download the latest OAM deployment scripts from the OAM repository.
+$ cd <workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ cd /scratch/OAMK8S
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleAccessManagement
+
For example:
+$ export WORKDIR=/scratch/OAMK8S/fmw-kubernetes/OracleAccessManagement
+
Run the following command and see if the WebLogic custom resource definition name already exists:
+$ kubectl get crd
+
In the output you should see:
+No resources found
+
If you see any of the following:
+NAME AGE
+clusters.weblogic.oracle 5d
+domains.weblogic.oracle 5d
+
then run the following command to delete the existing crd’s:
+$ kubectl delete crd clusters.weblogic.oracle
+$ kubectl delete crd domains.weblogic.oracle
+
On the master node run the following command to create a namespace for the operator:
+$ kubectl create namespace <sample-kubernetes-operator-ns>
+
For example:
+$ kubectl create namespace opns
+
The output will look similar to the following:
+namespace/opns created
+
Create a service account for the operator in the operator’s namespace by running the following command:
+$ kubectl create serviceaccount -n <sample-kubernetes-operator-ns> <sample-kubernetes-operator-sa>
+
For example:
+$ kubectl create serviceaccount -n opns op-sa
+
The output will look similar to the following:
+serviceaccount/op-sa created
+
Run the following helm command to install and start the operator:
+$ cd $WORKDIR
+$ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \
+--namespace <sample-kubernetes-operator-ns> \
+--set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \
+--set serviceAccount=<sample-kubernetes-operator-sa> \
+--set “enableClusterRoleBinding=true” \
+--set "domainNamespaceSelectionStrategy=LabelSelector" \
+--set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \
+--set "javaLoggingLevel=FINE" --wait
+
For example:
+$ cd $WORKDIR
+$ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \
+--namespace opns \
+--set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \
+--set serviceAccount=op-sa \
+--set "enableClusterRoleBinding=true" \
+--set "domainNamespaceSelectionStrategy=LabelSelector" \
+--set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \
+--set "javaLoggingLevel=FINE" --wait
+
The output will look similar to the following:
+NAME: weblogic-kubernetes-operator
+LAST DEPLOYED: <DATE>
+NAMESPACE: opns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+
Verify that the operator’s pod and services are running by executing the following command:
+$ kubectl get all -n <sample-kubernetes-operator-ns>
+
For example:
+$ kubectl get all -n opns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+pod/weblogic-operator-676d5cc6f4-wct7b 1/1 Running 0 40s
+pod/weblogic-operator-webhook-7996b8b58b-9sfhd 1/1 Running 0 40s
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/weblogic-operator-webhook-svc ClusterIP 10.100.91.237 <none> 8083/TCP,8084/TCP 47s
+
+NAME READY UP-TO-DATE AVAILABLE AGE
+deployment.apps/weblogic-operator 1/1 1 1 40s
+deployment.apps/weblogic-operator-webhook 1/1 1 1 40s
+
+NAME DESIRED CURRENT READY AGE
+replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 40s
+replicaset.apps/weblogic-operator-webhook-7996b8b58b 1 1 1 46s
+
Verify the operator pod’s log:
+$ kubectl logs -n <sample-kubernetes-operator-ns> -c weblogic-operator deployments/weblogic-operator
+
For example:
+$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator
+
The output will look similar to the following:
+...
+{"timestamp":"<DATE>","thread":21,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678183291191,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
+{"timestamp":"<DATE>","thread":37,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678183296193,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
+{"timestamp":"<DATE>","thread":31,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678183301194,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
+{"timestamp":"<DATE>","thread":31,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678183306195,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
+
Run the following command to create a namespace for the domain:
+$ kubectl create namespace <domain_namespace>
+
For example:
+$ kubectl create namespace oamns
+
The output will look similar to the following:
+namespace/oamns created
+
Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:
+$ kubectl label namespaces <domain_namespace> weblogic-operator=enabled
+
For example:
+$ kubectl label namespaces oamns weblogic-operator=enabled
+
The output will look similar to the following:
+namespace/oamns labeled
+
Run the following command to check the label was created:
+$ kubectl describe namespace <domain_namespace>
+
For example:
+$ kubectl describe namespace oamns
+
The output will look similar to the following:
+Name: oamns
+Labels: kubernetes.io/metadata.name=oamns
+ weblogic-operator=enabled
+Annotations: <none>
+Status: Active
+
+No resource quota.
+
+No LimitRange resource.
+
In this section you create a secret that stores the credentials for the container registry where the OAM image is stored.
+If you are not using a container registry and have loaded the images on each of the master and worker nodes, then there is no need to create the registry secret.
+Run the following command to create the secret:
+kubectl create secret docker-registry "orclcred" --docker-server=<CONTAINER_REGISTRY> \
+--docker-username="<USER_NAME>" \
+--docker-password=<PASSWORD> --docker-email=<EMAIL_ID> \
+--namespace=<domain_namespace>
+
For example, if using Oracle Container Registry:
+kubectl create secret docker-registry "orclcred" --docker-server=container-registry.oracle.com \
+--docker-username="user@example.com" \
+--docker-password=password --docker-email=user@example.com \
+--namespace=oamns
+
Replace <USER_NAME>
and <PASSWORD>
with the credentials for the registry with the following caveats:
If using Oracle Container Registry to pull the OAM container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware
> oam_cpu
and accept the license agreement.
If using your own container registry to store the OAM container image, this is the username and password (or token) for your container registry.
+The output will look similar to the following:
+secret/orclcred created
+
In this section you create the RCU schemas in the Oracle Database.
+Before following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.
+If using Oracle Container Registry or your own container registry for your OAM container image, run the following command to create a helper pod to run RCU:
+$ kubectl run --image=<image_name-from-registry>:<tag> --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1", "spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n <domain_namespace> -- sleep infinity
+
For example:
+$ kubectl run --image=container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-<October`23> --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n oamns -- sleep infinity
+
If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command:
+$ kubectl run helper --image <image>:<tag> -n oamns -- sleep infinity
+
For example:
+$ kubectl run helper --image oracle/oam:12.2.1.4-jdk8-ol7-<October`23> -n oamns -- sleep infinity
+
The output will look similar to the following:
+pod/helper created
+
Run the following command to check the pod is running:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oamns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+helper 1/1 Running 0 3m
+
Note: If you are pulling the image from a container registry it may take several minutes before the pod has a STATUS
of 1\1
. While the pod is starting you can check the status of the pod, by running the following command:
$ kubectl describe pod helper -n oamns
+
Run the following command to start a bash shell in the helper pod:
+$ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
+
For example:
+$ kubectl exec -it helper -n oamns -- /bin/bash
+
This will take you into a bash shell in the running helper pod:
+[oracle@helper ~]$
+
In the helper bash shell run the following commands to set the environment:
+[oracle@helper ~]$ export CONNECTION_STRING=<db_host.domain>:<db_port>/<service_name>
+[oracle@helper ~]$ export RCUPREFIX=<rcu_schema_prefix>
+[oracle@helper ~]$ echo -e <db_pwd>"\n"<rcu_schema_pwd> > /tmp/pwd.txt
+[oracle@helper ~]$ cat /tmp/pwd.txt
+
where:
+<db_host.domain>:<db_port>/<service_name>
is your database connect string
<rcu_schema_prefix>
is the RCU schema prefix you want to set
<db_pwd>
is the SYS password for the database
<rcu_schema_pwd>
is the password you want to set for the <rcu_schema_prefix>
For example:
+[oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com
+[oracle@helper ~]$ export RCUPREFIX=OAMK8S
+[oracle@helper ~]$ echo -e <password>"\n"<password> > /tmp/pwd.txt
+[oracle@helper ~]$ cat /tmp/pwd.txt
+<password>
+<password>
+
In the helper bash shell run the following command to create the RCU schemas in the database:
+$ [oracle@helper ~]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \
+$CONNECTION_STRING -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \
+-selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component MDS -component IAU \
+-component IAU_APPEND -component IAU_VIEWER -component OPSS -component WLS -component STB -component OAM -f < /tmp/pwd.txt
+
The output will look similar to the following:
+RCU Logfile: /tmp/RCU<DATE>/logs/rcu.log
+Processing command line ....
+Repository Creation Utility - Checking Prerequisites
+Checking Global Prerequisites
+Repository Creation Utility - Checking Prerequisites
+Checking Component Prerequisites
+Repository Creation Utility - Creating Tablespaces
+Validating and Creating Tablespaces
+Create tablespaces in the repository database
+Repository Creation Utility - Create
+Repository Create in progress.
+Executing pre create operations
+Percent Complete: 18
+Percent Complete: 18
+Percent Complete: 19
+Percent Complete: 20
+Percent Complete: 21
+Percent Complete: 21
+Percent Complete: 22
+Percent Complete: 22
+Creating Common Infrastructure Services(STB)
+Percent Complete: 30
+Percent Complete: 30
+Percent Complete: 39
+Percent Complete: 39
+Percent Complete: 39
+Creating Audit Services Append(IAU_APPEND)
+Percent Complete: 46
+Percent Complete: 46
+Percent Complete: 55
+Percent Complete: 55
+Percent Complete: 55
+Creating Audit Services Viewer(IAU_VIEWER)
+Percent Complete: 62
+Percent Complete: 62
+Percent Complete: 63
+Percent Complete: 63
+Percent Complete: 64
+Percent Complete: 64
+Creating Metadata Services(MDS)
+Percent Complete: 73
+Percent Complete: 73
+Percent Complete: 73
+Percent Complete: 74
+Percent Complete: 74
+Percent Complete: 75
+Percent Complete: 75
+Percent Complete: 75
+Creating Weblogic Services(WLS)
+Percent Complete: 80
+Percent Complete: 80
+Percent Complete: 83
+Percent Complete: 83
+Percent Complete: 91
+Percent Complete: 98
+Percent Complete: 98
+Creating Audit Services(IAU)
+Percent Complete: 100
+Creating Oracle Platform Security Services(OPSS)
+Creating Oracle Access Manager(OAM)
+Executing post create operations
+Repository Creation Utility: Create - Completion Summary
+Database details:
+-----------------------------
+Host Name : mydatabasehost.example.com
+Port : 1521
+Service Name : ORCL.EXAMPLE.COM
+Connected As : sys
+Prefix for (prefixable) Schema Owners : OAMK8S
+RCU Logfile : /tmp/RCU<DATE>/logs/rcu.log
+
+Component schemas created:
+-----------------------------
+Component Status Logfile
+
+Common Infrastructure Services Success /tmp/RCU<DATE>/logs/stb.log
+Oracle Platform Security Services Success /tmp/RCU<DATE>/logs/opss.log
+Oracle Access Manager Success /tmp/RCU<DATE>/logs/oam.log
+Audit Services Success /tmp/RCU<DATE>/logs/iau.log
+Audit Services Append Success /tmp/RCU<DATE>/logs/iau_append.log
+Audit Services Viewer Success /tmp/RCU<DATE>/logs/iau_viewer.log
+Metadata Services Success /tmp/RCU<DATE>/logs/mds.log
+WebLogic Services Success /tmp/RCU<DATE>/logs/wls.log
+
+Repository Creation Utility - Create : Operation Completed
+[oracle@helper ~]$
+
Exit the helper bash shell by issuing the command exit
.
In this section you prepare the environment for the OAM domain creation. This involves the following steps:
+a. Creating Kubernetes secrets for the domain and RCU
+b. Create a Kubernetes persistent volume and persistent volume claim
+Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:
+$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials
+$ ./create-weblogic-credentials.sh -u weblogic -p <pwd> -n <domain_namespace> -d <domain_uid> -s <kubernetes_domain_secret>
+
where:
+-u weblogic
is the WebLogic username
-p <pwd>
is the password for the weblogic user
-n <domain_namespace>
is the domain namespace
-d <domain_uid>
is the domain UID to be created. The default is domain1 if not specified
-s <kubernetes_domain_secret>
is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified
For example:
+$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials
+$ ./create-weblogic-credentials.sh -u weblogic -p <password> -n oamns -d accessdomain -s accessdomain-credentials
+
The output will look similar to the following:
+secret/accessdomain-credentials created
+secret/accessdomain-credentials labeled
+The secret accessdomain-credentials has been successfully created in the oamns namespace.
+
Verify the secret is created using the following command:
+$ kubectl get secret <kubernetes_domain_secret> -o yaml -n <domain_namespace>
+
For example:
+$ kubectl get secret accessdomain-credentials -o yaml -n oamns
+
The output will look similar to the following:
+apiVersion: v1
+data:
+ password: V2VsY29tZTE=
+ username: d2VibG9naWM=
+kind: Secret
+metadata:
+ creationTimestamp: "<DATE>"
+ labels:
+ weblogic.domainName: accessdomain
+ weblogic.domainUID: accessdomain
+ name: accessdomain-credentials
+ namespace: oamns
+ resourceVersion: "29428101"
+ uid: 6dac0561-d157-4144-9ed7-c475a080eb3a
+type: Opaque
+
Create a Kubernetes secret for RCU using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:
+$ cd $WORKDIR/kubernetes/create-rcu-credentials
+$ ./create-rcu-credentials.sh -u <rcu_prefix> -p <rcu_schema_pwd> -a sys -q <sys_db_pwd> -d <domain_uid> -n <domain_namespace> -s <kubernetes_rcu_secret>
+
where:
+-u <rcu_prefix>
is the name of the RCU schema prefix created previously
-p <rcu_schema_pwd>
is the password for the RCU schema prefix
-q <sys_db_pwd>
is the sys database password
-d <domain_uid>
is the domain_uid that you created earlier
-n <domain_namespace>
is the domain namespace
-s <kubernetes_rcu_secret>
is the name of the rcu secret to create
For example:
+$ cd $WORKDIR/kubernetes/create-rcu-credentials
+$ ./create-rcu-credentials.sh -u OAMK8S -p <password> -a sys -q <password> -d accessdomain -n oamns -s accessdomain-rcu-credentials
+
The output will look similar to the following:
+secret/accessdomain-rcu-credentials created
+secret/accessdomain-rcu-credentials labeled
+The secret accessdomain-rcu-credentials has been successfully created in the oamns namespace.
+
Verify the secret is created using the following command:
+$ kubectl get secret <kubernetes_rcu_secret> -o yaml -n <domain_namespace>
+
For example:
+$ kubectl get secret accessdomain-rcu-credentials -o yaml -n oamns
+
The output will look similar to the following:
+apiVersion: v1
+data:
+ password: T3JhY2xlXzEyMw==
+ sys_password: T3JhY2xlXzEyMw==
+ sys_username: c3lz
+ username: T0FNSzhT
+kind: Secret
+metadata:
+ creationTimestamp: "<DATE>"
+ labels:
+ weblogic.domainName: accessdomain
+ weblogic.domainUID: accessdomain
+ name: accessdomain-rcu-credentials
+ namespace: oamns
+ resourceVersion: "29428242"
+ uid: 1b81b6e0-fd7d-40b8-a060-454c8d23f4dc
+type: Opaque
+
As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.
+A persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.
+When a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.
+The example below uses an NFS mounted volume (<persistent_volume>/accessdomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.
+Note: The persistent volume directory needs to be accessible to both the master and worker node(s). In this example /scratch/shared/accessdomainpv
is accessible from all nodes via NFS.
To create a Kubernetes persistent volume, perform the following steps:
+Make a backup copy of the create-pv-pvc-inputs.yaml
file and create required directories:
$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
+$ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig
+$ mkdir output
+$ mkdir -p <persistent_volume>/accessdomainpv
+$ sudo chown -R 1000:0 <persistent_volume>/accessdomainpv
+
For example:
+$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
+$ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig
+$ mkdir output
+$ mkdir -p /scratch/shared/accessdomainpv
+$ sudo chown -R 1000:0 /scratch/shared/accessdomainpv
+
On the master node run the following command to ensure it is possible to read and write to the persistent volume:
+cd <persistent_volume>/accessdomainpv
+touch filemaster.txt
+ls filemaster.txt
+
For example:
+cd /scratch/shared/accessdomainpv
+touch filemaster.txt
+ls filemaster.txt
+
On the first worker node run the following to ensure it is possible to read and write to the persistent volume:
+cd /scratch/shared/accessdomainpv
+ls filemaster.txt
+touch fileworker1.txt
+ls fileworker1.txt
+
Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it’s possible to read and write from each node to the persistent volume, delete the files created.
+Navigate to $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
:
$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
+
and edit the create-pv-pvc-inputs.yaml
file and update the following parameters to reflect your settings. Save the file when complete:
baseName: <domain>
+domainUID: <domain_uid>
+namespace: <domain_namespace>
+weblogicDomainStorageType: NFS
+weblogicDomainStorageNFSServer: <nfs_server>
+weblogicDomainStoragePath: <physical_path_of_persistent_storage>
+weblogicDomainStorageSize: 10Gi
+
For example:
+
+# The base name of the pv and pvc
+baseName: domain
+
+# Unique ID identifying a domain.
+# If left empty, the generated pv can be shared by multiple domains
+# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster.
+domainUID: accessdomain
+
+# Name of the namespace for the persistent volume claim
+namespace: oamns
+...
+# Persistent volume type for the persistent storage.
+# The value must be 'HOST_PATH' or 'NFS'.
+# If using 'NFS', weblogicDomainStorageNFSServer must be specified.
+weblogicDomainStorageType: NFS
+
+# The server name or ip address of the NFS server to use for the persistent storage.
+# The following line must be uncomment and customized if weblogicDomainStorateType is NFS:
+weblogicDomainStorageNFSServer: mynfsserver
+
+# Physical path of the persistent storage.
+# When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the
+# domain storage on the Kubernetes host.
+# When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set
+# to the IP address or name of the DNS server, and this value should be set to the exported path
+# on that server.
+# Note that the path where the domain is mounted in the WebLogic containers is not affected by this
+# setting, that is determined when you create your domain.
+# The following line must be uncomment and customized:
+weblogicDomainStoragePath: /scratch/shared/accessdomainpv
+
+# Reclaim policy of the persistent storage
+# The valid values are: 'Retain', 'Delete', and 'Recycle'
+weblogicDomainStorageReclaimPolicy: Retain
+
+# Total storage allocated to the persistent storage.
+weblogicDomainStorageSize: 10Gi
+
Execute the create-pv-pvc.sh
script to create the PV and PVC configuration files:
$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output
+
The output will be similar to the following:
+Input parameters being used
+export version="create-weblogic-sample-domain-pv-pvc-inputs-v1"
+export baseName="domain"
+export domainUID="accessdomain"
+export namespace="oamns"
+export weblogicDomainStorageType="NFS"
+export weblogicDomainStorageNFSServer="mynfsserver"
+export weblogicDomainStoragePath="/scratch/shared/accessdomainpv"
+export weblogicDomainStorageReclaimPolicy="Retain"
+export weblogicDomainStorageSize="10Gi"
+
+Generating output/pv-pvcs/accessdomain-domain-pv.yaml
+Generating output/pv-pvcs/accessdomain-domain-pvc.yaml
+The following files were generated:
+ output/pv-pvcs/accessdomain-domain-pv.yaml.yaml
+ output/pv-pvcs/accessdomain-domain-pvc.yaml
+
Run the following to show the files are created:
+$ ls output/pv-pvcs
+accessdomain-domain-pv.yaml accessdomain-domain-pvc.yaml create-pv-pvc-inputs.yaml
+
Run the following kubectl
command to create the PV and PVC in the domain namespace:
$ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n <domain_namespace>
+$ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n <domain_namespace>
+
For example:
+$ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n oamns
+$ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n oamns
+
The output will look similar to the following:
+persistentvolume/accessdomain-domain-pv created
+persistentvolumeclaim/accessdomain-domain-pvc created
+
Run the following commands to verify the PV and PVC were created successfully:
+$ kubectl describe pv <pv_name>
+$ kubectl describe pvc <pvc_name> -n <domain_namespace>
+
For example:
+$ kubectl describe pv accessdomain-domain-pv
+$ kubectl describe pvc accessdomain-domain-pvc -n oamns
+
The output will look similar to the following:
+$ kubectl describe pv accessdomain-domain-pv
+
+Name: accessdomain-domain-pv
+Labels: weblogic.domainUID=accessdomain
+Annotations: pv.kubernetes.io/bound-by-controller: yes
+Finalizers: [kubernetes.io/pv-protection]
+StorageClass: accessdomain-domain-storage-class
+Status: Bound
+Claim: oamns/accessdomain-domain-pvc
+Reclaim Policy: Retain
+Access Modes: RWX
+VolumeMode: Filesystem
+Capacity: 10Gi
+Node Affinity: <none>
+Message:
+Source:
+ Type: NFS (an NFS mount that lasts the lifetime of a pod)
+ Server: mynfsserver
+ Path: /scratch/shared/accessdomainpv
+ ReadOnly: false
+Events: <none>
+
$ kubectl describe pvc accessdomain-domain-pvc -n oamns
+
+Name: accessdomain-domain-pvc
+Namespace: oamns
+StorageClass: accessdomain-domain-storage-class
+Status: Bound
+Volume: accessdomain-domain-pv
+Labels: weblogic.domainUID=accessdomain
+Annotations: pv.kubernetes.io/bind-completed: yes
+ pv.kubernetes.io/bound-by-controller: yes
+Finalizers: [kubernetes.io/pvc-protection]
+Capacity: 10Gi
+Access Modes: RWX
+VolumeMode: Filesystem
+Events: <none>
+Mounted By: <none>
+
You are now ready to create the OAM domain as per Create OAM Domains.
+This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 4.1.2.
+A running Kubernetes cluster that meets the following requirements:
+cluster-admin
role to install the WebLogic Kubernetes Operator.date
command simultaneously on all the nodes in each cluster and then syncrhonize accordingly.A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OAM as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases. It is recommended that the database initialization parameters are set as per Minimum Initialization Parameters.
+Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. +Please refer to your vendor specific documentation for this information. Also see Getting Started.
+Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OAM domains:
+configuredManagedServerCount
. For more details on this parameter, see Prepare the create domain script. It is recommended to pre-configure your cluster so it’s sized a little larger than the maximum size you plan to expand it to. You must rigorously test at this maximum size to make sure that your system can scale as expected.Review the latest changes and known issues for Oracle Access Management on Kubernetes.
+Date | +Version | +Change | +
---|---|---|
October, 2023 | +23.4.1 | +Supports Oracle Access Management 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | This release contains the following changes: | +
+ | + | + Support for WebLogic Kubernetes Operator 4.1.2. | +
+ | + | + Ability to set resource requests and limits for CPU and memory on a cluster resource. See, Set the OAM server memory parameters. | +
+ | + | + Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler. | +
+ | + | + The default domain now only starts one OAM Managed Server (oam_server1) and one Policy Managed Server (policy_mgr1). | +
+ | + | If upgrading to October 23 (23.4.1) from October 22 (22.4.1) or later, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.1.2 | +
+ | + | 2. Patch the OAM container image to October 23 | +
+ | + | If upgrading to October 23 (23.4.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.1.2 | +
+ | + | 2. Patch the OAM container image to October 23 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana | +
+ | + | See Patch and Upgrade for these instructions. | +
July, 2023 | +23.3.1 | +Supports Oracle Access Management 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | If upgrading to July 23 (23.3.1) from April 23 (23.2.1), upgrade as follows: | +
+ | + | 1. Patch the OAM container image to July 23 | +
+ | + | If upgrading to July 23 (23.3.1) from October 22 (22.4.1), or January 23 (23.1.1) release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.0.4 | +
+ | + | 2. Patch the OAM container image to July 23 | +
+ | + | If upgrading to July 23 (23.3.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.0.4 | +
+ | + | 2. Patch the OAM container image to July 23 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana | +
+ | + | See Patch and Upgrade for these instructions. | +
April, 2023 | +23.2.1 | +Supports Oracle Access Management 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | Support for WebLogic Kubernetes Operator 4.0.4. | +
+ | + | Changes to stopping/starting pods due to domain and cluster configuration being separated and parameter changes (IF_NEEDED, NEVER to IfNeeded, Never). | +
+ | + | If upgrading to April 23 (23.2.1) from October 22 (22.4.1) or later, you must upgrade in the following order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.0.4 | +
+ | + | 2. Patch the OAM container image to April 23 | +
+ | + | If upgrading to April 23 (23.2.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.0.4 | +
+ | + | 2. Patch the OAM container image to April 23 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. | +
January, 2023 | +23.1.1 | +Supports Oracle Access Management 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | If upgrading to January 23 (23.1.1) from October 22 (22.4.1) release, you only need to patch the OAM container image to January 23. | +
+ | + | If upgrading to January 23 (23.1.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 3.4.2 | +
+ | + | 2. Patch the OAM container image to January 23 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. | +
October, 2022 | +22.4.1 | +Supports Oracle Access Management 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | Support for WebLogic Kubernetes Operator 3.4.2. | +
+ | + | Additional Ingress mappings added. | +
+ | + | Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. | +
+ | + | OAM container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support. | +
+ | + | If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 3.4.2 | +
+ | + | 2. Patch the OAM container image to October 22 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. | +
July, 2022 | +22.3.1 | +Supports Oracle Access Management 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
April, 2022 | +22.2.1 | +Updated for CRI-O support. | +
November, 2021 | +21.4.2 | +Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. | +
October 2021 | +21.4.1 | +A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Additional post configuration tasks added. D) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. | +
November 2020 | +20.4.1 | +Initial release of Oracle Access Management on Kubernetes. | +
If the OAM domain creation fails when running create-domain.sh
, run the following to diagnose the issue:
Run the following command to diagnose the create domain job:
+$ kubectl logs <domain_job> -n <domain_namespace>
+
For example:
+$ kubectl logs accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns
+
Also run:
+$ kubectl describe pod <domain_job> -n <domain_namespace>
+
For example:
+$ kubectl describe pod accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns
+
Using the output you should be able to diagnose the problem and resolve the issue.
+Clean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.
+If any of the above commands return the following error:
+Failed to start container "create-fmw-infra-sample-domain-job": Error response from daemon: error while creating mount source path
+'/scratch/shared/accessdomainpv ': mkdir /scratch/shared/accessdomainpv : permission denied
+
then there is a permissions error on the directory for the PV and PVC and the following should be checked:
+a) The directory has 777 permissions: chmod -R 777 <persistent_volume>/accessdomainpv
.
b) If it does have the permissions, check if an oracle user exists and the uid is 1000 and gid is 0.
+Create the oracle user if it doesn’t exist and set the uid to 1000 and gid to 0.
+c) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml
and add a slash to the end of the directory for the weblogicDomainStoragePath
parameter:
weblogicDomainStoragePath: /scratch/shared/accessdomainpv/
+
Clean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.
+In this section you validate the OAM domain URLs are accessible via the NGINX ingress.
+Make sure you know the master hostname and ingress port for NGINX before proceeding.
+Launch a browser and access the following URL’s. Login with the weblogic username and password (weblogic/<password>
).
Note: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}
.
Console or Page | +URL | +
---|---|
WebLogic Administration Console | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console |
+
Oracle Enterprise Manager Console | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em |
+
Oracle Access Management Console | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oamconsole |
+
Oracle Access Management Console | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/access |
+
Logout URL | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oam/server/logout |
+
Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OAM domain. To control the Administration Server and OAM Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.
+The browser will give certificate errors if you used a self signed certificate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.
+After validating the URL’s proceed to Post Install Configuration.
+ + + + + + + + +In this section you validate single-sign on works to the OAM Kubernetes cluster via Oracle WebGate. The instructions below assume you have a running Oracle HTTP Server (for example ohs_k8s
) and Oracle WebGate installed on an independent server. The instructions also assume basic knowledge of how to register a WebGate agent.
Note: At present Oracle HTTP Server and Oracle WebGate are not supported on a Kubernetes cluster.
+If using an NGINX ingress with no load balancer, change {LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}
to {MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
when referenced below.
Launch a browser and access the OAM console (https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}/oamconsole
). Login with the weblogic username and password (weblogic/<password>
)
Navigate to Configuration → Settings ( View ) → Access Manager.
+Under Load Balancing modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com
and <port>
respectively). In the OAM Server Protocol drop down list select https.
Under WebGate Traffic Load Balancer modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com
and <port>
repectively). In the OAM Server Protocol drop down list select https.
Click Apply.
+In all the examples below, change the directory path as appropriate for your installation.
+Run the following command on the server with Oracle HTTP Server and WebGate installed:
+$ cd <OHS_ORACLE_HOME>/webgate/ohs/tools/deployWebGate
+
+$ ./deployWebGateInstance.sh -w <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8s -oh <OHS_ORACLE_HOME> -ws ohs
+
The output will look similar to the following:
+Copying files from WebGate Oracle Home to WebGate Instancedir
+
Run the following command to update the OHS configuration files appropriately:
+$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<OHS_ORACLE_HOME>/lib
+$ cd <OHS_ORACLE_HOME>/webgate/ohs/tools/setup/InstallTools/
+$ ./EditHttpConf -w <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8s -oh <OHS_ORACLE_HOME>
+
The output will look similar to the following:
+The web server configuration file was successfully updated
+<OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf has been backed up as <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf.ORIG
+
Launch a browser, and access the OAM console. Navigate to Application Security → Quick Start Wizards → SSO Agent Registration. Register the agent in the usual way, download the configuration zip file and copy to the OHS WebGate server, for example: <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8/webgate/config
. Extract the zip file.
Copy the Certificate Authority (CA) certificate (cacert.pem
) for the load balancer/ingress certificate to the same directory e.g: <OHS_DOMAIN_HOME>/config/fmwconfig/components/OHS/ohs_k8/webgate/config
.
If you used a self signed certificate for the ingress, instead copy the self signed certificate (e.g: /scratch/ssl/tls.crt
) to the above directory. Rename the certificate to cacert.pem
.
Restart Oracle HTTP Server.
+Access the configured OHS e.g http://ohs.example.com:7778
, and check you are redirected to the SSO login page. Login and make sure you are redirected successfully to the home page.
Note: This section should only be followed if you need to change the OAM/WebGate Agent communication from HTTPS to OAP.
+To change the WebGate agent to use OAP:
+In the OAM Console click Application Security and then Agents.
+Search for the agent you want modify and select it.
+In the User Defined Parameters change:
+a) OAMServerCommunicationMode
from HTTPS
to OAP
. For example OAMServerCommunicationMode=OAP
b) OAMRestEndPointHostName=<hostname>
to the {$MASTERNODE-HOSTNAME}
. For example OAMRestEndPointHostName=masternode.example.com
In the Server Lists section click Add to add a new server with the following values:
+Access Server
: Other
Host Name
: <{$MASTERNODE-HOSTNAME}>
Host Port
: <oamoap-service NodePort>
Note: To find the value for Host Port
run the following:
$ kubectl describe svc accessdomain-oamoap-service -n oamns
+
The output will look similar to the following:
+Name: accessdomain-oamoap-service
+Namespace: oamns
+Labels: <none>
+Annotations: <none>
+Selector: weblogic.clusterName=oam_cluster
+Type: NodePort
+IP Families: <none>
+IP: 10.100.202.44
+IPs: 10.100.202.44
+Port: <unset> 5575/TCP
+TargetPort: 5575/TCP
+NodePort: <unset> 30540/TCP
+Endpoints: 10.244.5.21:5575,10.244.6.76:5575
+Session Affinity: None
+External Traffic Policy: Cluster
+Events: <none>
+
In the example above the NodePort
is 30540
.
Delete all servers in Server Lists except for the one just created, and click Apply
.
Click Download to download the webgate zip file. Copy the zip file to the desired WebGate.
+Delete the cache from <OHS_DOMAIN_HOME>/servers/ohs1/cache
and restart Oracle HTTP Server.
As of July 2022, container support has been removed for Oracle Internet Directory. Refer to document ID 2723908.1 on My Oracle Support for more details.
+To view documentation for previous releases, see:
+ + + + + + + + + +Review the latest changes and known issues for Oracle Internet Directory on Kubernetes.
+Date | +Version | +Change | +
---|---|---|
July, 2022 | +22.3.1 | +As of July 2022, Container support has been removed for Oracle Internet Directory. Refer to document ID 2723908.1 on My Oracle Support for more details. | +
April, 2022 | +22.2.1 | +Updated for CRI-O support. | +
October, 2021 | +21.4.1 | +Initial release of Oracle Identity Directory on Kubernetes. | +
Configure an Ingress to allow Design Console to connect to your Kubernetes cluster.
+ + + + + + + +Configure Design Console with NGINX(non-SSL).
+ + + + + + + + + + + + +Configure Design Console with NGINX(SSL).
+ + + + + + + + +Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster.
+If you haven’t already configured an NGINX ingress controller (Non-SSL) for OIG, follow Using an Ingress with NGINX (non-SSL).
+Make sure you know the master hostname and ingress port for NGINX before proceeding e.g http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
.
Note: In all steps below if you are using a load balancer for your ingress instead of NodePort then replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
with `${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.
Setup routing rules by running the following commands:
+$ cd $WORKDIR/kubernetes/design-console-ingress
+
Edit values.yaml
and ensure that tls: NONSSL
and domainUID: governancedomain
are set, for example:
# Load balancer type. Supported values are: NGINX
+type: NGINX
+# Type of Configuration Supported Values are : NONSSL,SSL
+# tls: NONSSL
+tls: NONSSL
+# TLS secret name if the mode is SSL
+secretName: dc-tls-cert
+
+
+# WLS domain as backend to the load balancer
+wlsDomain:
+ domainUID: governancedomain
+ oimClusterName: oim_cluster
+ oimServerT3Port: 14002
+
Run the following command to create the ingress:
+$ cd $WORKDIR
+$ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml
+
For example:
+The output will look similar to the following:
+NAME: governancedomain-nginx-designconsole
+LAST DEPLOYED: <DATE>
+NAMESPACE: oigns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+
Run the following command to show the ingress is created successfully:
+$ kubectl describe ing governancedomain-nginx-designconsole -n <domain_namespace>
+
For example:
+$ kubectl describe ing governancedomain-nginx-designconsole -n oigns
+
The output will look similar to the following:
+Name: governancedomain-nginx-designconsole
+Namespace: oigns
+Address:
+Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
+Rules:
+ Host Path Backends
+ ---- ---- --------
+ *
+ governancedomain-cluster-oim-cluster:14002 (10.244.1.25:14002)
+Annotations: kubernetes.io/ingress.class: nginx
+ meta.helm.sh/release-name: governancedomain-nginx-designconsole
+ meta.helm.sh/release-namespace: oigns
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/enable-access-log: false
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Sync 13s nginx-ingress-controller Scheduled for sync
+
Log in to the WebLogic Console using http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
.
Navigate to Environment, click Servers, and then select oim_server1.
+Click Protocols, and then Channels.
+Click the default T3 channel called T3Channel.
+Click Lock and Edit.
+Set the External Listen Address to the ingress controller hostname ${MASTERNODE-HOSTNAME}
.
Set the External Listen Port to the ingress controller port ${MASTERNODE-PORT}
.
Click Save.
+Click Activate Changes.
+Restart the OIG Managed Server for the above changes to take effect:
+$ cd $WORKDIR/kubernetes/domain-lifecycle
+$ ./restartServer.sh -s oim_server1 -d <domain_uid> -n <domain_namespace>
+
For example:
+$ cd $WORKDIR/kubernetes/domain-lifecycle
+./restartServer.sh -s oim_server1 -d governancedomain -n oigns
+
Make sure the <domain_uid>-oim-server1
has a READY
status of 1/1
before continuing:
$ kubectl get pods -n oigns | grep oim-server1
+
The output will look similar to the following:
+governancedomain-oim-server1 1/1 Running 0 8m
+
It is possible to use Design Console from an on-premises install, or from a container image.
+Install Design Console on an on-premises machine
+Follow Login to the Design Console.
+The Design Console can be run from a container using X windows emulation.
+On the parent machine where the Design Console is to be displayed, run xhost +
.
Find which worker node the <domain>-oim-server1
pod is running. For example:
$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1
+
The output will look similar to the following:
+governancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 <none> <none>
+
On the worker node returned above e.g worker-node2
, execute the following command to find the OIG container image name:
$ docker images
+
Then execute the following command to start a container to run Design Console:
+$ docker run -u root --name oigdcbase -it <image> bash
+
For example:
+$ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<January'23> bash
+
This will take you into a bash shell inside the container:
+bash-4.2#
+
Inside the container set the proxy, for example:
+bash-4.2# export https_proxy=http://proxy.example.com:80
+
Install the relevant X windows packages in the container:
+bash-4.2# yum install libXext libXrender libXtst
+
Execute the following outside the container to create a new Design Console image from the container:
+$ docker commit <container_name> <design_console_image_name>
+
For example:
+$ docker commit oigdcbase oigdc
+
Exit the container bash session:
+bash-4.2# exit
+
Start a new container using the Design Console image:
+$ docker run --name oigdc -it oigdc /bin/bash
+
This will take you into a bash shell for the container:
+bash-4.2#
+
In the container run the following to export the DISPLAY:
+$ export DISPLAY=<parent_machine_hostname:1>
+
Start the Design Console from the container:
+bash-4.2# cd idm/designconsole
+bash-4.2# sh xlclient.sh
+
The Design Console login should be displayed. Now follow Login to the Design Console.
+On the parent machine where the Design Console is to be displayed, run xhost +
.
Find which worker node the <domain>-oim-server1
pod is running. For example:
$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1
+
The output will look similar to the following:
+governancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 <none> <none>
+
On the worker node returned above e.g worker-node2
, execute the following command to find the OIG container image name:
$ podman images
+
Then execute the following command to start a container to run Design Console:
+$ podman run -u root --name oigdcbase -it <image> bash
+
For example:
+$ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<January'23> bash
+
This will take you into a bash shell inside the container:
+bash-4.2#
+
Inside the container set the proxy, for example:
+bash-4.2# export https_proxy=http://proxy.example.com:80
+
Install the relevant X windows packages in the container:
+bash-4.2# yum install libXext libXrender libXtst
+
Execute the following outside the container to create a new Design Console image from the container:
+$ podman commit <container_name> <design_console_image_name>
+
For example:
+$ podman commit oigdcbase oigdc
+
Exit the container bash session:
+bash-4.2# exit
+
Start a new container using the Design Console image:
+$ podman run --name oigdc -it oigdc /bin/bash
+
This will take you into a bash shell for the container:
+bash-4.2#
+
In the container run the following to export the DISPLAY:
+$ export DISPLAY=<parent_machine_hostname:1>
+
Start the Design Console from the container:
+bash-4.2# cd idm/designconsole
+bash-4.2# sh xlclient.sh
+
The Design Console login should be displayed. Now follow Login to the Design Console.
+Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:
+Enter the following details and click Login:
+Server URL
: <url>
User ID
: xelsysadm
Password
: <password>
.where <url>
is http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
If successful the Design Console will be displayed.
+Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster.
+If you haven’t already configured an NGINX ingress controller (SSL) for OIG, follow Using an Ingress with NGINX (SSL).
+Make sure you know the master hostname and ingress port for NGINX before proceeding e.g https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
. Also make sure you know the Kubernetes secret for SSL that was generated e.g governancedomain-tls-cert
.
Setup routing rules by running the following commands:
+$ cd $WORKDIR/kubernetes/design-console-ingress
+
Edit values.yaml
and ensure that tls: SSL
is set. Change domainUID:
and secretName:
to match the values for your <domain_uid>
and your SSL Kubernetes secret, for example:
# Load balancer type. Supported values are: NGINX
+type: NGINX
+# Type of Configuration Supported Values are : NONSSL,SSL
+# tls: NONSSL
+tls: SSL
+# TLS secret name if the mode is SSL
+secretName: governancedomain-tls-cert
+
+
+# WLS domain as backend to the load balancer
+wlsDomain:
+ domainUID: governancedomain
+ oimClusterName: oim_cluster
+ oimServerT3Port: 14002
+
Run the following command to create the ingress:
+$ cd $WORKDIR
+$ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml
+
The output will look similar to the following:
+NAME: governancedomain-nginx-designconsole
+<DATE>
+NAMESPACE: oigns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+
Run the following command to show the ingress is created successfully:
+$ kubectl describe ing governancedomain-nginx-designconsole -n <domain_namespace>
+
For example:
+$ kubectl describe ing governancedomain-nginx-designconsole -n oigns
+
The output will look similar to the following:
+Name: governancedomain-nginx-designconsole
+Namespace: oigns
+Address:
+Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
+Rules:
+ Host Path Backends
+ ---- ---- --------
+ *
+ governancedomain-cluster-oim-cluster:14002 (10.244.2.103:14002)
+Annotations: kubernetes.io/ingress.class: nginx
+ meta.helm.sh/release-name: governancedomain-nginx-designconsole
+ meta.helm.sh/release-namespace: oigns
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/configuration-snippet:
+ more_set_input_headers "X-Forwarded-Proto: https";
+ more_set_input_headers "WL-Proxy-SSL: true";
+ nginx.ingress.kubernetes.io/enable-access-log: false
+ nginx.ingress.kubernetes.io/ingress.allow-http: false
+ nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Sync 6s nginx-ingress-controller Scheduled for sync
+
Log in to the WebLogic Console using https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
.
Navigate to Environment, click Servers, and then select oim_server1.
+Click Protocols, and then Channels.
+Click the default T3 channel called T3Channel.
+Click Lock and Edit.
+Set the External Listen Address to the ingress controller hostname ${MASTERNODE-HOSTNAME}
.
Set the External Listen Port to the ingress controller port ${MASTERNODE-PORT}
.
Click Save.
+Click Activate Changes.
+Restart the OIG Managed Server for the above changes to take effect:
+$ cd $WORKDIR/kubernetes/domain-lifecycle
+$ ./restartServer.sh -s oim_server1 -d <domain_uid> -n <domain_namespace>
+
For example:
+$ cd $WORKDIR/kubernetes/domain-lifecycle
+./restartServer.sh -s oim_server1 -d governancedomain -n oigns
+
Make sure the <domain_uid>-oim-server1 has a READY
status of 1/1
before continuing:
$ kubectl get pods -n oigns | grep oim-server1
+
The output will look similar to the following:
+governancedomain-oim-server1 1/1 Running 0 8m
+
It is possible to use Design Console from an on-premises install, or from a container image.
+The instructions below should be performed on the client where Design Console is installed.
+Import the CA certificate into the java keystore
+If in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must import the CA certificate (e.g cacert.crt) that signed your certificate, into the java truststore used by Design Console.
+If in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must import the self-signed certificate into the java truststore used by Design Console.
+Import the certificate using the following command:
+$ keytool -import -trustcacerts -alias dc -file <certificate> -keystore $JAVA_HOME/jre/lib/security/cacerts
+
where <certificate>
is the CA certificate, or self-signed certicate.
Once complete follow Login to the Design Console.
+The Design Console can be run from a container using X windows emulation.
+On the parent machine where the Design Console is to be displayed, run xhost +
.
Find which worker node the <domain>-oim-server1
pod is running. For example:
$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1
+
The output will look similar to the following:
+governancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2
+
On the worker node returned above e.g worker-node2
, execute the following command to find the OIG container image name:
$ docker images
+
Then execute the following command to start a container to run Design Console:
+$ docker run -u root --name oigdcbase -it <image> bash
+
For example:
+$ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<January'23> bash
+
This will take you into a bash shell inside the container:
+bash-4.2#
+
Inside the container set the proxy, for example:
+bash-4.2# export https_proxy=http://proxy.example.com:80
+
Install the relevant X windows packages in the container:
+bash-4.2# yum install libXext libXrender libXtst
+
Execute the following outside the container to create a new Design Console image from the container:
+$ docker commit <container_name> <design_console_image_name>
+
For example:
+$ docker commit oigdcbase oigdc
+
Exit the container bash session:
+bash-4.2# exit
+
Start a new container using the Design Console image:
+$ docker run --name oigdc -it oigdc /bin/bash
+
This will take you into a bash shell for the container:
+bash-4.2#
+
Copy the Ingress CA certificate into the container
+If in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container
+If in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container
+Note: You will have to copy the certificate over to the worker node where the oigdc image is created before running the following.
+Run the following command outside the container:
+$ cd <workdir>/ssl
+$ docker cp <certificate> <container_name>:/u01/jdk/jre/lib/security/<certificate>
+
For example:
+$ cd /scratch/OIGK8S/ssl
+$ docker cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt
+
Import the certificate using the following command:
+bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/<certificate> -keystore /u01/jdk/jre/lib/security/cacerts
+
For example:
+bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts
+
In the container run the following to export the DISPLAY:
+$ export DISPLAY=<parent_machine_hostname:1>
+
Start the Design Console from the container:
+bash-4.2# cd idm/designconsole
+bash-4.2# sh xlclient.sh
+
The Design Console login should be displayed. Now follow Login to the Design Console.
+On the parent machine where the Design Console is to be displayed, run xhost +
.
Find which worker node the <domain>-oim-server1
pod is running. For example:
$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1
+
The output will look similar to the following:
+governancedomain-oim-server1 1/1 Running 0 19h 10.244.2.55 worker-node2 <none>
+
On the worker node returned above e.g worker-node2
, execute the following command to find the OIG container image name:
$ podman images
+
Then execute the following command to start a container to run Design Console:
+$ podman run -u root --name oigdcbase -it <image> bash
+
For example:
+$ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<January'23> bash
+
This will take you into a bash shell inside the container:
+bash-4.2#
+
Inside the container set the proxy, for example:
+bash-4.2# export https_proxy=http://proxy.example.com:80
+
Install the relevant X windows packages in the container:
+bash-4.2# yum install libXext libXrender libXtst
+
Execute the following outside the container to create a new Design Console image from the container:
+$ podman commit <container_name> <design_console_image_name>
+
For example:
+$ podman commit oigdcbase oigdc
+
Exit the container bash session:
+bash-4.2# exit
+
Start a new container using the Design Console image:
+$ podman run --name oigdc -it oigdc /bin/bash
+
This will take you into a bash shell for the container:
+bash-4.2#
+
Copy the Ingress CA certificate into the container
+If in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container
+If in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container
+Note: You will have to copy the certificate over to the worker node where the oigdc image is created before running the following.
+Run the following command outside the container:
+$ cd <workdir>/ssl
+$ podman cp <certificate> <container_name>:/u01/jdk/jre/lib/security/<certificate>
+
For example:
+$ cd /scratch/OIGK8S/ssl
+$ podman cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt
+
Inside the container, import the certificate using the following command:
+bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/<certificate> -keystore /u01/jdk/jre/lib/security/cacerts
+
For example:
+bash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts
+
In the container run the following to export the DISPLAY:
+$ export DISPLAY=<parent_machine_hostname:1>
+
Start the Design Console from the container:
+bash-4.2# cd idm/designconsole
+bash-4.2# sh xlclient.sh
+
The Design Console login should be displayed. Now follow Login to the Design Console.
+Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:
+Enter the following details and click Login:
+Server URL
: <url>
User ID
: xelsysadm
Password
: <password>
.where <url>
is where <url>
is https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
.
If successful the Design Console will be displayed.
+Choose one of the following supported methods to configure an Ingress to direct traffic for your OIG domain.
+ + + + + + + +Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (non-SSL).
+ + + + + + + + + + + + +Steps to set up an Ingress for NGINX to direct traffic to the OIG domain using SSL.
+ + + + + + + + +The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination.
+Note: All the steps below should be performed on the master node.
+Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.
+If you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:
+$ mkdir <workdir>/ssl
+$ cd <workdir>/ssl
+$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=<nginx-hostname>"
+
For example:
+$ mkdir /scratch/OIGK8S/ssl
+$ cd /scratch/OIGK8S/ssl
+$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=masternode.example.com"
+
Note: The CN
should match the host.domain of the master node in order to prevent hostname problems during certificate verification.
The output will look similar to the following:
+Generating a 2048 bit RSA private key
+..........................................+++
+.......................................................................................................+++
+writing new private key to 'tls.key'
+-----
+
Create a secret for SSL containing the SSL certificate by running the following command:
+$ kubectl -n oigns create secret tls <domain_uid>-tls-cert --key <workdir>/tls.key --cert <workdir>/tls.crt
+
For example:
+$ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGK8S/ssl/tls.key --cert /scratch/OIGK8S/ssl/tls.crt
+
The output will look similar to the following:
+secret/governancedomain-tls-cert created
+
Confirm that the secret is created by running the following command:
+$ kubectl get secret <domain_uid>-tls-cert -o yaml -n oigns
+
For example:
+$ kubectl get secret governancedomain-tls-cert -o yaml -n oigns
+
The output will look similar to the following:
+apiVersion: v1
+data:
+ tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3Ym1lTzJkMVd2NQp1aFhzbkFTbnkwY1N9xVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeTlhUnUvN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
+ tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm1WcnYxTEg0eGNhaDJIZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
+kind: Secret
+metadata:
+ creationTimestamp: "<DATE>"
+ name: governancedomain-tls-cert
+ namespace: oigns
+ resourceVersion: "3319899"
+ uid: 274cc960-281a-494c-a3e3-d93c3abd051f
+type: kubernetes.io/tls
+
+
Use helm to install NGINX.
+Add the Helm chart repository for installing NGINX using the following command:
+$ helm repo add stable https://kubernetes.github.io/ingress-nginx
+
The output will look similar to the following:
+"stable" has been added to your repositories
+
Update the repository using the following command:
+$ helm repo update
+
The output will look similar to the following:
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "stable" chart repository
+Update Complete. Happy Helming!
+
Create a Kubernetes namespace for NGINX:
+$ kubectl create namespace nginxssl
+
The output will look similar to the following:
+namespace/nginxssl created
+
If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort
parameter.
If you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer
parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.
To install NGINX use the following helm command depending on if you are using NodePort
or LoadBalancer
:
a) Using NodePort
+$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+
The output will look similar to the following:
+$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+NAME: nginx-ingress
+LAST DEPLOYED: <DATE>
+NAMESPACE: nginxssl
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+The nginx-ingress controller has been installed.
+Get the application URL by running these commands:
+ export HTTP_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-controller)
+ export HTTPS_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-controller)
+ export NODE_IP=$(kubectl --namespace nginxssl get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
+
+ echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
+ echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
+
+An example Ingress that makes use of the controller:
+
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ name: example
+ namespace: foo
+ spec:
+ ingressClassName: example-class
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ serviceName: exampleService
+ servicePort: 80
+ path: /
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
+
b) Using LoadBalancer
+$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+
The output will look similar to the following:
+NAME: nginx-ingress
+LAST DEPLOYED: <DATE>
+NAMESPACE: nginxssl
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+The ingress-nginx controller has been installed.
+It may take a few minutes for the LoadBalancer IP to be available.
+You can watch the status by running 'kubectl --namespace nginxssl get services -o wide -w nginx-ingress-ingress-nginx-controller'
+
+An example Ingress that makes use of the controller:
+
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ name: example
+ namespace: foo
+ spec:
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: exampleService
+ port: 80
+
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
+
Setup routing rules by running the following commands:
+$ cd $WORKDIR/kubernetes/charts/ingress-per-domain
+
Edit values.yaml
and change the domainUID
parameter to match your domainUID
, for example domainUID: governancedomain
. Change sslType
to SSL
. The file should look as follows:
# Load balancer type. Supported values are: NGINX
+type: NGINX
+
+# SSL configuration Type. Supported Values are : NONSSL,SSL
+sslType: SSL
+
+# domainType. Supported values are: oim
+domainType: oim
+
+#WLS domain as backend to the load balancer
+wlsDomain:
+ domainUID: governancedomain
+ adminServerName: AdminServer
+ adminServerPort: 7001
+ adminServerSSLPort:
+ soaClusterName: soa_cluster
+ soaManagedServerPort: 8001
+ soaManagedServerSSLPort:
+ oimClusterName: oim_cluster
+ oimManagedServerPort: 14000
+ oimManagedServerSSLPort:
+
+# Host specific values
+hostName:
+ enabled: false
+ admin:
+ runtime:
+ internal:
+
+# Ngnix specific values
+nginx:
+ nginxTimeOut: 180
+
Create an Ingress for the domain (governancedomain-nginx
), in the domain namespace by using the sample Helm chart:
$ cd $WORKDIR
+$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml
+
Note: The $WORKDIR/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml
has nginx.ingress.kubernetes.io/enable-access-log
set to false
. If you want to enable access logs then set this value to true
before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.
For example:
+$ cd $WORKDIR
+$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml
+
The output will look similar to the following:
+NAME: governancedomain-nginx
+LAST DEPLOYED: <DATE>
+NAMESPACE: oigns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+
Run the following command to show the ingress is created successfully:
+$ kubectl get ing -n <namespace>
+
For example:
+$ kubectl get ing -n oigns
+
The output will look similar to the following:
+NAME CLASS HOSTS ADDRESS PORTS AGE
+governancedomain-nginx <none> * x.x.x.x 80 49s
+
Find the node port of NGINX using the following command:
+$ kubectl get services -n nginxssl -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller
+
The output will look similar to the following:
+32033
+
Run the following command to check the ingress:
+$ kubectl describe ing governancedomain-nginx -n <namespace>
+
For example:
+$ kubectl describe ing governancedomain-nginx -n oigns
+
The output will look similar to the following:
+Name: governancedomain-nginx
+Namespace: oigns
+Address: 10.111.175.104
+Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
+Rules:
+ Host Path Backends
+ ---- ---- --------
+ *
+ /console governancedomain-adminserver:7001 (10.244.2.50:7001)
+ /consolehelp governancedomain-adminserver:7001 (10.244.2.50:7001)
+ /em governancedomain-adminserver:7001 (10.244.2.50:7001)
+ /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
+ /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
+ /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
+ /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
+ /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ Annotations: kubernetes.io/ingress.class: nginx
+ meta.helm.sh/release-name: governancedomain-nginx
+ meta.helm.sh/release-namespace: oigns
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/affinity-mode: persistent
+ nginx.ingress.kubernetes.io/configuration-snippet:
+ more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL";
+ more_set_input_headers "X-Forwarded-Proto: https";
+ more_set_input_headers "WL-Proxy-SSL: true";
+ nginx.ingress.kubernetes.io/enable-access-log: false
+ nginx.ingress.kubernetes.io/ingress.allow-http: false
+ nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
+ nginx.ingress.kubernetes.io/proxy-read-timeout: 180
+ nginx.ingress.kubernetes.io/proxy-send-timeout: 180
+ nginx.ingress.kubernetes.io/session-cookie-name: sticky
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Sync 18s (x2 over 38s) nginx-ingress-controller Scheduled for sync
+
To confirm that the new Ingress is successfully routing to the domain’s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework
:
Note: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}
.
$ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready
+
For example:
+$ curl -v -k https://masternode.example.com:32033/weblogic/ready
+
The output will look similar to the following:
+$ curl -v -k https://masternode.example.com:32033/weblogic/ready
+* About to connect() to X.X.X.X port 32033 (#0)
+* Trying X.X.X.X...
+* Connected to masternode.example.com (X.X.X.X) port 32033 (#0)
+* Initializing NSS with certpath: sql:/etc/pki/nssdb
+* skipping SSL peer certificate verification
+* SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+* Server certificate:
+* subject: CN=masternode.example.com
+* start date: <DATE>
+* expire date: <DATE>
+* common name: masternode.example.com
+* issuer: CN=masternode.example.com
+> GET /weblogic/ready HTTP/1.1
+> User-Agent: curl/7.29.0
+> Host: X.X.X.X:32033
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Server: nginx/1.19.1
+< Date: <DATE>
+< Content-Length: 0
+< Connection: keep-alive
+< Strict-Transport-Security: max-age=15724800; includeSubDomains
+<
+* Connection #0 to host X.X.X.X left intact
+
After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 32033) as per Validate Domain URLs
+ + + + + + +The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination.
+Note: All the steps below should be performed on the master node.
+Use helm to install NGINX.
+Add the Helm chart repository for NGINX using the following command:
+$ helm repo add stable https://kubernetes.github.io/ingress-nginx
+
The output will look similar to the following:
+"stable" has been added to your repositories
+
Update the repository using the following command:
+$ helm repo update
+
The output will look similar to the following:
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "stable" chart repository
+Update Complete. Happy Helming!
+
Create a Kubernetes namespace for NGINX by running the following command:
+$ kubectl create namespace nginx
+
The output will look similar to the following:
+namespace/nginx created
+
If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort
parameter.
If you are using a Managed Service for your Kubernetes cluster,for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer
parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.
To install NGINX use the following helm command depending on if you are using NodePort
or LoadBalancer
:
a) Using NodePort
+$ helm install nginx-ingress -n nginx --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+
The output will look similar to the following:
+NAME: nginx-ingress
+LAST DEPLOYED: <DATE>
+NAMESPACE: nginx
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+The ingress-nginx controller has been installed.
+Get the application URL by running these commands:
+ export HTTP_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[0].nodePort}" nginx-ingress-ingress-nginx-controller)
+ export HTTPS_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller)
+ export NODE_IP=$(kubectl --namespace nginx get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
+
+ echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
+ echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
+
+An example Ingress that makes use of the controller:
+
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ name: example
+ namespace: foo
+ spec:
+ ingressClassName: example-class
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: exampleService
+ port: 80
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
+
b) Using LoadBalancer
+$ helm install nginx-ingress -n nginx --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx
+
The output will look similar to the following:
+NAME: nginx-ingress
+LAST DEPLOYED: <DATE>
+NAMESPACE: nginx
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+The nginx-ingress controller has been installed.
+It may take a few minutes for the LoadBalancer IP to be available.
+You can watch the status by running 'kubectl --namespace nginx get services -o wide -w nginx-ingress-controller'
+
+An example Ingress that makes use of the controller:
+
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ name: example
+ namespace: foo
+ spec:
+ ingressClassName: example-class
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: exampleService
+ port: 80
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
+
Setup routing rules by running the following commands:
+$ cd $WORKDIR/kubernetes/charts/ingress-per-domain
+
Edit values.yaml
and change the domainUID
parameter to match your domainUID
, for example domainUID: governancedomain
. Also change sslType
to NONSSL
. The file should look as follows:
# Load balancer type. Supported values are: NGINX
+type: NGINX
+
+# SSL configuration Type. Supported Values are : NONSSL,SSL
+sslType: NONSSL
+
+# domainType. Supported values are: oim
+domainType: oim
+
+#WLS domain as backend to the load balancer
+wlsDomain:
+ domainUID: governancedomain
+ adminServerName: AdminServer
+ adminServerPort: 7001
+ adminServerSSLPort:
+ soaClusterName: soa_cluster
+ soaManagedServerPort: 8001
+ soaManagedServerSSLPort:
+ oimClusterName: oim_cluster
+ oimManagedServerPort: 14000
+ oimManagedServerSSLPort:
+
+# Host specific values
+hostName:
+ enabled: false
+ admin:
+ runtime:
+ internal:
+
+# Ngnix specific values
+nginx:
+ nginxTimeOut: 180
+
Create an Ingress for the domain (governancedomain-nginx
), in the domain namespace by using the sample Helm chart:
$ cd $WORKDIR
+$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace <namespace> --values kubernetes/charts/ingress-per-domain/values.yaml
+
Note: The <workdir>/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml
has nginx.ingress.kubernetes.io/enable-access-log
set to false
. If you want to enable access logs then set this value to true
before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.
For example:
+$ cd $WORKDIR
+$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml
+
The output will look similar to the following:
+$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml
+NAME: governancedomain-nginx
+LAST DEPLOYED: <DATE>
+NAMESPACE: oigns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+
Run the following command to show the ingress is created successfully:
+$ kubectl get ing -n <domain_namespace>
+
For example:
+$ kubectl get ing -n oigns
+
The output will look similar to the following:
+NAME CLASS HOSTS ADDRESS PORTS AGE
+governancedomain-nginx <none> * x.x.x.x 80 47s
+
Find the NodePort of NGINX using the following command (only if you installed NGINX using NodePort):
+$ kubectl get services -n nginx -o jsonpath=”{.spec.ports[0].nodePort}” nginx-ingress-ingress-nginx-controller
+
The output will look similar to the following:
+31530
+
Run the following command to check the ingress:
+$ kubectl describe ing governancedomain-ingress -n <namespace>
+
For example:
+$ kubectl describe ing governancedomain-nginx -n oigns
+
The output will look similar to the following:
+Name: governancedomain-nginx
+Namespace: oigns
+Address:
+Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
+Rules:
+ Host Path Backends
+ ---- ---- --------
+ *
+ /console governancedomain-adminserver:7001 (10.244.2.50:7001)
+ /consolehelp governancedomain-adminserver:7001 (10.244.2.50:7001)
+ /em governancedomain-adminserver:7001 (10.244.2.50:7001)
+ /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
+ /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
+ /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
+ /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001)
+ /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+ /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000)
+Annotations: kubernetes.io/ingress.class: nginx
+ meta.helm.sh/release-name: governancedomain-nginx
+ meta.helm.sh/release-namespace: oigns
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/affinity-mode: persistent
+ nginx.ingress.kubernetes.io/enable-access-log: false
+ nginx.ingress.kubernetes.io/proxy-read-timeout: 180
+ nginx.ingress.kubernetes.io/proxy-send-timeout: 180
+ nginx.ingress.kubernetes.io/session-cookie-name: sticky
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Sync 27s nginx-ingress-controller Scheduled for sync
+
To confirm that the new ingress is successfully routing to the domain’s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework
:
Note: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}
.
$ curl -v http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready
+
For example:
+a) For NodePort
+$ curl -v http://masternode.example.com:31530/weblogic/ready
+
b) For LoadBalancer
+$ curl -v http://masternode.example.com:80/weblogic/ready
+
The output will look similar to the following:
+$ curl -v http://masternode.example.com:31530/weblogic/ready
+* About to connect() to masternode.example.com port 31530 (#0)
+* Trying X.X.X.X...
+* Connected to masternode.example.com (X.X.X.X) port 31530 (#0)
+> GET /weblogic/ready HTTP/1.1
+> User-Agent: curl/7.29.0
+> Host: masternode.example.com:31530
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Server: nginx/1.19.2
+< Date: <DATE>
+< Content-Length: 0
+< Connection: keep-alive
+<
+* Connection #0 to host masternode.example.com left intact
+
After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31530) as per Validate Domain URLs
+ + + + + + +a. Generate the create domain script
+ + +The OIG deployment scripts demonstrate the creation of an OIG domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.
+Before you begin, perform the following steps:
+The sample scripts for Oracle Identity Governance domain deployment are available at $WORKDIR/kubernetes/create-oim-domain
.
Make a copy of the create-domain-inputs.yaml
file:
$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv
+$ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig
+
Edit the create-domain-inputs.yaml
and modify the following parameters. Save the file when complete:
Note: Do not edit any other parameters other than ones mentioned below.
+domainUID: <domain_uid>
+domainHome: /u01/oracle/user_projects/domains/<domain_uid>
+image: <image_name>
+imagePullSecretName: <container_registry_secret>
+weblogicCredentialsSecretName: <kubernetes_domain_secret>
+logHome: /u01/oracle/user_projects/domains/logs/<domain_id>
+namespace: <domain_namespace>
+persistentVolumeClaimName: <pvc_name>
+rcuSchemaPrefix: <rcu_prefix>
+rcuDatabaseURL: <rcu_db_host>:<rcu_db_port>/<rcu_db_service_name>
+rcuCredentialsSecret: <kubernetes_rcu_secret>
+frontEndHost: <front_end_hostname>
+frontEndPort: <front_end_port>
+
For example:
+domainUID: governancedomain
+domainHome: /u01/oracle/user_projects/domains/governancedomain
+image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October'23>
+imagePullSecretName: orclcred
+weblogicCredentialsSecretName: oig-domain-credentials
+logHome: /u01/oracle/user_projects/domains/logs/governancedomain
+namespace: oigns
+persistentVolumeClaimName: governancedomain-domain-pvc
+rcuSchemaPrefix: OIGK8S
+rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com
+rcuCredentialsSecret: oig-rcu-credentials
+frontEndHost: example.com
+frontEndPort: 14100
+
Note: For now frontEndHost
and front_end_port
should be set to example.com
and 14100
respectively. These values will be changed to the correct values in post installation tasks in Set OIMFrontendURL using MBeans.
A full list of parameters in the create-domain-inputs.yaml
file are shown below:
Parameter | +Definition | +Default | +
---|---|---|
adminPort |
+Port number for the Administration Server inside the Kubernetes cluster. | +7001 |
+
adminNodePort |
+Port number of the Administration Server outside the Kubernetes cluster. | +30701 |
+
adminServerName |
+Name of the Administration Server. | +AdminServer |
+
clusterName |
+Name of the WebLogic cluster instance to generate for the domain. By default the cluster name is oimcluster for the OIG domain. |
+oimcluster |
+
configuredManagedServerCount |
+Number of Managed Server instances to generate for the domain. | +5 |
+
createDomainFilesDir |
+Directory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst , and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt , and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath , so that the Kubernetes pod can use the scripts and supporting files to create a domain home. |
+wlst |
+
createDomainScriptsMountPath |
+Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. |
+/u01/weblogic |
+
createDomainScriptName |
+Script that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run. |
+create-domain-job.sh |
+
domainHome |
+Home directory of the OIG domain. If not specified, the value is derived from the domainUID as /shared/domains/<domainUID> . |
+/u01/oracle/user_projects/domains/oimcluster |
+
domainPVMountPath |
+Mount path of the domain persistent volume. | +/u01/oracle/user_projects/domains |
+
domainUID |
+Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. | +oimcluster |
+
exposeAdminNodePort |
+Boolean indicating if the Administration Server is exposed outside of the Kubernetes cluster. | +false |
+
exposeAdminT3Channel |
+Boolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. | +true |
+
image |
+OIG container image. The operator requires OIG 12.2.1.4. Refer to OIG domains for details on how to obtain or create the image. | +oracle/oig:12.2.1.4.0 |
+
imagePullPolicy |
+WebLogic container image pull policy. Legal values are IfNotPresent , Always , or Never |
+IfNotPresent |
+
imagePullSecretName |
+Name of the Kubernetes secret to access the container registry to pull the OIG container image. The presence of the secret will be validated when this parameter is specified. | ++ |
includeServerOutInPodLog |
+Boolean indicating whether to include the server .out to the pod’s stdout. | +true |
+
initialManagedServerReplicas |
+Number of Managed Servers to initially start for the domain. | +2 |
+
javaOptions |
+Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME) , $(DOMAIN_HOME) , $(ADMIN_NAME) , $(ADMIN_PORT) , and $(SERVER_NAME) . |
+-Dweblogic.StdoutDebugEnabled=false |
+
logHome |
+The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/<domainUID> . |
+/u01/oracle/user_projects/domains/logs/oimcluster |
+
managedServerNameBase |
+Base string used to generate Managed Server names. | +oim_server |
+
managedServerPort |
+Port number for each Managed Server. | +8001 |
+
namespace |
+Kubernetes namespace in which to create the domain. | +oimcluster |
+
persistentVolumeClaimName |
+Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as <domainUID>-weblogic-sample-pvc . |
+oimcluster-domain-pvc |
+
productionModeEnabled |
+Boolean indicating if production mode is enabled for the domain. | +true |
+
serverStartPolicy |
+Determines which WebLogic Server instances will be started. Legal values are Never , IfNeeded , AdminOnly . |
+IfNeeded |
+
t3ChannelPort |
+Port for the T3 channel of the NetworkAccessPoint. | +30012 |
+
t3PublicAddress |
+Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. | +If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster | +
weblogicCredentialsSecretName |
+Name of the Kubernetes secret for the Administration Server’s user name and password. If not specified, then the value is derived from the domainUID as <domainUID>-weblogic-credentials . |
+oimcluster-domain-credentials |
+
weblogicImagePullSecretName |
+Name of the Kubernetes secret for the container registry, used to pull the WebLogic Server image. | ++ |
serverPodCpuRequest , serverPodMemoryRequest , serverPodCpuCLimit , serverPodMemoryLimit |
+The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. |
+Resource requests and resource limits are not specified. | +
rcuSchemaPrefix |
+The schema prefix to use in the database, for example OIGK8S . You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. |
+OIGK8S |
+
rcuDatabaseURL |
+The database URL. | +oracle-db.default.svc.cluster.local:1521/devpdb.k8s |
+
rcuCredentialsSecret |
+The Kubernetes secret containing the database credentials. | +oimcluster-rcu-credentials |
+
frontEndHost |
+The entry point URL for the OIM. | +Not set | +
frontEndPort |
+The entry point port for the OIM. | +Not set | +
datasourceType |
+Type of JDBC datasource applicable for the OIG domain. Legal values are agl and generic . Choose agl for Active GridLink datasource and generic for Generic datasource. For enterprise deployments, Oracle recommends that you use GridLink data sources to connect to Oracle RAC databases. See the Enterprise Deployment Guide for further details. |
+generic |
+
Note that the names of the Kubernetes resources in the generated YAML files may be formed with the
+value of some of the properties specified in the create-inputs.yaml
file. Those properties include
+the adminServerName
, clusterName
and managedServerNameBase
. If those values contain any
+characters that are invalid in a Kubernetes service name, those characters are converted to
+valid values in the generated YAML files. For example, an uppercase letter is converted to a
+lowercase letter and an underscore ("_")
is converted to a hyphen ("-")
.
The sample demonstrates how to create an OIG domain home and associated Kubernetes resources for a domain +that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts +to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.
+Run the create domain script, specifying your inputs file and an output directory to store the +generated artifacts:
+$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv
+$ mkdir output
+$ ./create-domain.sh -i create-domain-inputs.yaml -o /<path to output-directory>
+
For example:
+$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv
+$ mkdir output
+$ ./create-domain.sh -i create-domain-inputs.yaml -o output
+
The output will look similar to the following:
+Input parameters being used
+export version="create-weblogic-sample-domain-inputs-v1"
+export adminPort="7001"
+export adminServerName="AdminServer"
+export domainUID="governancedomain"
+export domainHome="/u01/oracle/user_projects/domains/governancedomain"
+export serverStartPolicy="IfNeeded"
+export clusterName="oim_cluster"
+export configuredManagedServerCount="5"
+export initialManagedServerReplicas="1"
+export managedServerNameBase="oim_server"
+export managedServerPort="14000"
+export image="container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October'23>"
+export imagePullPolicy="IfNotPresent"
+export imagePullSecretName="orclcred"
+export productionModeEnabled="true"
+export weblogicCredentialsSecretName="oig-domain-credentials"
+export includeServerOutInPodLog="true"
+export logHome="/u01/oracle/user_projects/domains/logs/governancedomain"
+export t3ChannelPort="30012"
+export exposeAdminT3Channel="false"
+export adminNodePort="30701"
+export exposeAdminNodePort="false"
+export namespace="oigns"
+javaOptions=-Dweblogic.StdoutDebugEnabled=false
+export persistentVolumeClaimName="governancedomain-domain-pvc"
+export domainPVMountPath="/u01/oracle/user_projects/domains"
+export createDomainScriptsMountPath="/u01/weblogic"
+export createDomainScriptName="create-domain-job.sh"
+export createDomainFilesDir="wlst"
+export rcuSchemaPrefix="OIGK8S"
+export rcuDatabaseURL="mydatabasehost.example.com:1521/orcl.example.com"
+export rcuCredentialsSecret="oig-rcu-credentials"
+export frontEndHost="example.com"
+export frontEndPort="14100"
+export datasourceType="generic"
+
+validateWlsDomainName called with governancedomain
+createFiles - valuesInputFile is create-domain-inputs.yaml
+createDomainScriptName is create-domain-job.sh
+Generating output/weblogic-domains/governancedomain/create-domain-job.yaml
+Generating output/weblogic-domains/governancedomain/delete-domain-job.yaml
+Generating output/weblogic-domains/governancedomain/domain.yaml
+Checking to see if the secret governancedomain-domain-credentials exists in namespace oigns
+configmap/governancedomain-create-fmw-infra-sample-domain-job-cm created
+Checking the configmap governancedomain-create-fmw-infra-sample-domain-job-cm was created
+configmap/governancedomain-create-fmw-infra-sample-domain-job-cm labeled
+Checking if object type job with name governancedomain-create-fmw-infra-sample-domain-job exists
+No resources found in oigns namespace.
+Creating the domain by creating the job output/weblogic-domains/governancedomain/create-domain-job.yaml
+job.batch/governancedomain-create-fmw-infra-sample-domain-job created
+Waiting for the job to complete...
+status on iteration 1 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 2 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 3 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 4 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 5 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 6 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 7 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 8 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 9 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 10 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running
+status on iteration 11 of 40
+pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Completed
+
+Domain governancedomain was created and will be started by the WebLogic Kubernetes Operator
+
+The following files were generated:
+ output/weblogic-domains/governancedomain/create-domain-inputs.yaml
+ output/weblogic-domains/governancedomain/create-domain-job.yaml
+ output/weblogic-domains/governancedomain/domain.yaml
+sed
+
+Completed
+$
+
Note: If the create domain script creation fails, refer to the Troubleshooting section.
+Navigate to the /output/weblogic-domains/<domain_uid>
directory:
$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/<domain_uid>
+
For example:
+$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain
+
Edit the domain.yaml
and locate the section of the file starting with: - clusterName: oim_cluster
under governancedomain-oim-cluster
. Add the following lines:
serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: "-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m
+ resources:
+ limits:
+ cpu: "2"
+ memory: "8Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+
The file should looks as follows:
+...
+apiVersion: weblogic.oracle/v1
+kind: Cluster
+metadata:
+ name: governancedomain-oim-cluster
+ namespace: oigns
+spec:
+ clusterName: oim_cluster
+ serverService:
+ precreateService: true
+ replicas: 0
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
+ resources:
+ limits:
+ cpu: "2"
+ memory: "8Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+...
+
Note: The above CPU and memory values are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.
+Note: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An “m
” suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.
Note: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.
+Note: If required you can also set the same resources and limits for the governancedomain-soa-cluster
.
Create the Kubernetes resource using the following command:
+$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/<domain_uid>
+$ kubectl apply -f domain.yaml
+
For example:
+$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain
+$ kubectl apply -f domain.yaml
+
The output will look similar to the following:
+domain.weblogic.oracle/governancedomain unchanged
+cluster.weblogic.oracle/governancedomain-oim-cluster created
+cluster.weblogic.oracle/governancedomain-soa-cluster created
+
Run the following command to view the status of the OIG pods:
+$ kubectl get pods -n oigns
+
The output will initially look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 27m
+governancedomain-introspect-domain-job-p4brt 1/1 Running 0 6s
+helper 1/1 Running 0 3h30m
+
The introspect-domain-job
pod will be displayed first. Run the command again after several minutes and check to see that the Administration Server and SOA Server are both started. When started they should have STATUS
= Running
and READY
= 1/1
.
NAME READY STATUS RESTARTS AGE/
+governancedomain-adminserver 1/1 Running 0 7m30s
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 35m
+governancedomain-soa-server1 1/1 Running 0 4m
+helper 1/1 Running 0 3h38m
+
Note: It will take several minutes before all the pods listed above show. When a pod has a STATUS
of 0/1
the pod is started but the OIG server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:
$ kubectl logs governancedomain-adminserver -n oigns
+$ kubectl logs governancedomain-soa-server1 -n oigns
+
Check the clusters using the following command:
+$ kubectl get cluster -n oigns
+
The output will look similar to the following:
+NAME AGE
+governancedomain-oim-cluster 9m
+governancedomain-soa-cluster 9m
+
Start the OIM server using the following command:
+$ kubectl patch cluster -n <namespace> <OIMClusterName> --type=merge -p '{"spec":{"replicas":<initialManagedServerReplicas>}}'
+
For example:
+$ kubectl patch cluster -n oigns governancedomain-oim-cluster --type=merge -p '{"spec":{"replicas":1}}'
+
The output will look similar to the following:
+cluster.weblogic.oracle/governancedomain-oim-cluster patched
+
Run the following command to view the status of the OIG pods:
+$ kubectl get pods -n oigns
+
The output will initially look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 7m30s
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 35m
+governancedomain-oim-server1 1/1 Running 0 4m25s
+governancedomain-soa-server1 1/1 Running 0 4m
+helper 1/1 Running 0 3h38m
+
Note: It will take several minutes before the governancedomain-oim-server1
pod has a STATUS
of 1/1
. While the pod is starting you can check the startup status in the pod log, by running the following command:
$ kubectl logs governancedomain-oim-server1 -n oigns
+
Verify the domain, servers pods and services are created and in the READY
state with a STATUS
of 1/1
, by running the following command:
$ kubectl get all,domains -n <domain_namespace>
+
For example:
+$ kubectl get all,domains -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+pod/governancedomain-adminserver 1/1 Running 0 19m30s
+pod/governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 47m
+pod/governancedomain-oim-server1 1/1 Running 0 16m25s
+pod/governancedomain-soa-server1 1/1 Running 0 16m
+pod/helper 1/1 Running 0 3h50m
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/governancedomain-adminserver ClusterIP None <none> 7001/TCP 28m
+service/governancedomain-cluster-oim-cluster ClusterIP 10.106.198.40 <none> 14002/TCP,14000/TCP 25m
+service/governancedomain-cluster-soa-cluster ClusterIP 10.102.218.11 <none> 8001/TCP 25m
+service/governancedomain-oim-server1 ClusterIP None <none> 14002/TCP,14000/TCP 16m24s
+service/governancedomain-oim-server2 ClusterIP 10.97.32.112 <none> 14002/TCP,14000/TCP 25m
+service/governancedomain-oim-server3 ClusterIP 10.100.233.109 <none> 14002/TCP,14000/TCP 25m
+service/governancedomain-oim-server4 ClusterIP 10.96.154.17 <none> 14002/TCP,14000/TCP 25m
+service/governancedomain-oim-server5 ClusterIP 10.103.222.213 <none> 14002/TCP,14000/TCP 25m
+service/governancedomain-soa-server1 ClusterIP None <none> 8001/TCP 25m
+service/governancedomain-soa-server2 ClusterIP 10.104.43.118 <none> 8001/TCP 25m
+service/governancedomain-soa-server3 ClusterIP 10.110.180.120 <none> 8001/TCP 25m
+service/governancedomain-soa-server4 ClusterIP 10.99.161.73 <none> 8001/TCP 25m
+service/governancedomain-soa-server5 ClusterIP 10.97.67.196 <none> 8001/TCP 25m
+
+NAME COMPLETIONS DURATION AGE
+job.batch/governancedomain-create-fmw-infra-sample-domain-job 1/1 3m6s 125m
+
+NAME AGE
+domain.weblogic.oracle/governancedomain 24m
+
+NAME AGE
+cluster.weblogic.oracle/governancedomain-oim-cluster 23m
+cluster.weblogic.oracle/governancedomain-soa-cluster 23m
+
The default domain created by the script has the following characteristics:
+AdminServer
listening on port 7001
.oig_cluster
of size 5.soa_cluster
of size 5.oim_server1
, listening on port 14000
.soa_server1
, listening on port 8001
.<persistent_volume>/logs/<domainUID>
Run the following command to describe the domain:
+$ kubectl describe domain <domain_uid> -n <namespace>
+
For example:
+$ kubectl describe domain governancedomain -n oigns
+
The output will look similar to the following:
+Name: governancedomain
+Namespace: oigns
+Labels: weblogic.domainUID=governancedomain
+Annotations: <none>
+API Version: weblogic.oracle/v9
+Kind: Domain
+Metadata:
+ Creation Timestamp: <DATE>
+ Generation: 1
+ Managed Fields:
+ API Version: weblogic.oracle/v9
+ Fields Type: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ .:
+ f:kubectl.kubernetes.io/last-applied-configuration:
+ f:labels:
+ .:
+ f:weblogic.domainUID:
+ f:spec:
+ .:
+ f:adminServer:
+ .:
+ f:adminChannelPortForwardingEnabled:
+ f:serverPod:
+ .:
+ f:env:
+ f:serverStartPolicy:
+ f:clusters:
+ f:dataHome:
+ f:domainHome:
+ f:domainHomeSourceType:
+ f:failureRetryIntervalSeconds:
+ f:failureRetryLimitMinutes:
+ f:httpAccessLogInLogHome:
+ f:image:
+ f:imagePullPolicy:
+ f:imagePullSecrets:
+ f:includeServerOutInPodLog:
+ f:logHome:
+ f:logHomeEnabled:
+ f:logHomeLayout:
+ f:maxClusterConcurrentShutdown:
+ f:maxClusterConcurrentStartup:
+ f:maxClusterUnavailable:
+ f:replicas:
+ f:serverPod:
+ .:
+ f:env:
+ f:volumeMounts:
+ f:volumes:
+ f:serverStartPolicy:
+ f:webLogicCredentialsSecret:
+ .:
+ f:name:
+ Manager: kubectl-client-side-apply
+ Operation: Update
+ Time: <DATE>
+ API Version: weblogic.oracle/v9
+ Fields Type: FieldsV1
+ fieldsV1:
+ f:status:
+ .:
+ f:clusters:
+ f:conditions:
+ f:observedGeneration:
+ f:servers:
+ f:startTime:
+ Manager: Kubernetes Java Client
+ Operation: Update
+ Subresource: status
+ Time: <DATE>
+ Resource Version: 1247307
+ UID: 4933be73-df97-493f-a20c-bf1e24f6b3f2
+Spec:
+ Admin Server:
+ Admin Channel Port Forwarding Enabled: true
+ Server Pod:
+ Env:
+ Name: USER_MEM_ARGS
+ Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m
+ Server Start Policy: IfNeeded
+ Clusters:
+ Name: governancedomain-oim-cluster
+ Name: governancedomain-soa-cluster
+ Data Home:
+ Domain Home: /u01/oracle/user_projects/domains/governancedomain
+ Domain Home Source Type: PersistentVolume
+ Failure Retry Interval Seconds: 120
+ Failure Retry Limit Minutes: 1440
+ Http Access Log In Log Home: true
+ Image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October'23>
+ Image Pull Policy: IfNotPresent
+ Image Pull Secrets:
+ Name: orclcred
+ Include Server Out In Pod Log: true
+ Log Home: /u01/oracle/user_projects/domains/logs/governancedomain
+ Log Home Enabled: true
+ Log Home Layout: ByServers
+ Max Cluster Concurrent Shutdown: 1
+ Max Cluster Concurrent Startup: 0
+ Max Cluster Unavailable: 1
+ Replicas: 1
+ Server Pod:
+ Env:
+ Name: JAVA_OPTIONS
+ Value: -Dweblogic.StdoutDebugEnabled=false
+ Name: USER_MEM_ARGS
+ Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m
+ Volume Mounts:
+ Mount Path: /u01/oracle/user_projects/domains
+ Name: weblogic-domain-storage-volume
+ Volumes:
+ Name: weblogic-domain-storage-volume
+ Persistent Volume Claim:
+ Claim Name: governancedomain-domain-pvc
+ Server Start Policy: IfNeeded
+ Web Logic Credentials Secret:
+ Name: oig-domain-credentials
+Status:
+ Clusters:
+ Cluster Name: oim_cluster
+ Conditions:
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Available
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Completed
+ Label Selector: weblogic.domainUID=governancedomain,weblogic.clusterName=oim_cluster
+ Maximum Replicas: 5
+ Minimum Replicas: 0
+ Observed Generation: 2
+ Ready Replicas: 1
+ Replicas: 1
+ Replicas Goal: 1
+ Cluster Name: soa_cluster
+ Conditions:
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Available
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Completed
+ Label Selector: weblogic.domainUID=governancedomain,weblogic.clusterName=soa_cluster
+ Maximum Replicas: 5
+ Minimum Replicas: 0
+ Observed Generation: 1
+ Ready Replicas: 1
+ Replicas: 1
+ Replicas Goal: 1
+ Conditions:
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Available
+ Last Transition Time: <DATE>
+ Status: True
+ Type: Completed
+ Observed Generation: 1
+ Servers:
+ Health:
+ Activation Time: <DATE>
+ Overall Health: ok
+ Subsystems:
+ Subsystem Name: ServerRuntime
+ Symptoms:
+ Node Name: worker-node2
+ Pod Phase: Running
+ Pod Ready: True
+ Server Name: AdminServer
+ State: RUNNING
+ State Goal: RUNNING
+ Cluster Name: oim_cluster
+ Health:
+ Activation Time: <DATE>
+ Overall Health: ok
+ Subsystems:
+ Subsystem Name: ServerRuntime
+ Symptoms:
+ Node Name: worker-node1
+ Pod Phase: Running
+ Pod Ready: True
+ Server Name: oim_server1
+ State: RUNNING
+ State Goal: RUNNING
+ Cluster Name: oim_cluster
+ Server Name: oim_server2
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: oim_cluster
+ Server Name: oim_server3
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: oim_cluster
+ Server Name: oim_server4
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: oim_cluster
+ Server Name: oim_server5
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: soa_cluster
+ Health:
+ Activation Time: <DATE>
+ Overall Health: ok
+ Subsystems:
+ Subsystem Name: ServerRuntime
+ Symptoms:
+ Node Name: worker-node1
+ Pod Phase: Running
+ Pod Ready: True
+ Server Name: soa_server1
+ State: RUNNING
+ State Goal: RUNNING
+ Cluster Name: soa_cluster
+ Server Name: soa_server2
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: soa_cluster
+ Server Name: soa_server3
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: soa_cluster
+ Server Name: soa_server4
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Cluster Name: soa_cluster
+ Server Name: soa_server5
+ State: SHUTDOWN
+ State Goal: SHUTDOWN
+ Start Time: <DATE>
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Created 35m weblogic.operator Domain governancedomain was created.
+ Normal Changed 34m (x1127 over 35m) weblogic.operator Domain governancedomain was changed.
+ Warning Failed 34m (x227 over 35m) weblogic.operator Domain governancedomain failed due to 'Domain validation error': Cluster resource 'governancedomain-oim-cluster' not found in namespace 'oigns'
+ Cluster resource 'governancedomain-soa-cluster' not found in namespace 'oigns'. Update the domain resource to correct the validation error.
+ Warning Unavailable 17m weblogic.operator Domain governancedomain is unavailable: an insufficient number of its servers that are expected to be running are ready.";
+ Warning Incomplete 17m weblogic.operator Domain governancedomain is incomplete for one or more of the following reasons: there are failures detected, there are pending server shutdowns, or not all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version.
+ Normal Completed 13m (x2 over 26m) weblogic.operator Domain governancedomain is complete because all of the following are true: there is no failure detected, there are no pending server shutdowns, and all servers expected to be running are ready and at their target image, auxiliary images, restart version, and introspect version.
+ Normal Available 13m (x2 over 26m) weblogic.operator Domain governancedomain is available: a sufficient number of its servers have reached the ready state.
+
In the Status
section of the output, the available servers and clusters are listed.
Run the following command to see the pods running the servers and which nodes they are running on:
+$ kubectl get pods -n <namespace> -o wide
+
For example:
+$ kubectl get pods -n oigns -o wide
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+governancedomain-adminserver 1/1 Running 0 24m 10.244.1.42 worker-node2 <none> <none>
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 52m 10.244.1.40 worker-node2 <none> <none>
+governancedomain-oim-server1 1/1 Running 0 52m 10.244.1.44 worker-node2 <none> <none>
+governancedomain-soa-server1 1/1 Running 0 21m 10.244.1.43 worker-node2 <none> <none>
+helper 1/1 Running 0 3h55m 10.244.1.39 worker-node2 <none> <none>
+
You are now ready to configure an Ingress to direct traffic for your OIG domain as per Configure an ingress for an OIG domain.
+As described in Prepare Your Environment you can create your own OIG container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Identity Governance image for production deployments.
+Using the WebLogic Image Tool, you can create a new Oracle Identity Governance image with PSU’s and interim patches or update an existing image with one or more interim patches.
+++Recommendations:
++
+- Use create for creating a new Oracle Identity Governance image containing the Oracle Identity Governance binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OIG patches because it optimizes the size of the image.
+- Use update for patching an existing Oracle Identity Governance image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
+
Verify that your environment meets the following prerequisites:
+To set up the WebLogic Image Tool:
+Create a working directory and change to it:
+$ mkdir <workdir>
+$ cd <workdir>
+
For example:
+$ mkdir /scratch/imagetool-setup
+$ cd /scratch/imagetool-setup
+
Download the latest version of the WebLogic Image Tool from the releases page.
+$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip
+
where X.X.X is the latest release referenced on the releases page.
+Unzip the release ZIP file in the imagetool-setup
directory.
$ unzip imagetool.zip
+
Execute the following commands to set up the WebLogic Image Tool:
+$ cd <workdir>/imagetool-setup/imagetool/bin
+$ source setup.sh
+
For example:
+$ cd /scratch/imagetool-setup/imagetool/bin
+$ source setup.sh
+
To validate the setup of the WebLogic Image Tool:
+Enter the following command to retrieve the version of the WebLogic Image Tool:
+$ imagetool --version
+
Enter imagetool
then press the Tab key to display the available imagetool
commands:
$ imagetool <TAB>
+cache create help rebase update
+
The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp
, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR
:
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache
directory. Under this directory, the lookup information is stored in the .metadata
file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR
:
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Creating an Oracle Identity Governance container image using the WebLogic Image Tool requires additional container scripts for Oracle Identity Governance domains.
+Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO
:
$ cd <workdir>/imagetool-setup
+$ git clone https://github.com/oracle/docker-images.git
+
For example:
+$ cd /scratch/imagetool-setup
+$ git clone https://github.com/oracle/docker-images.git
+
++Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.
+
After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create
a new Oracle Identity Governance image.
You must download the required Oracle Identity Governance installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.
+The installation binaries and patches required are:
+Oracle Identity and Access Management 12.2.1.4.0
+Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0
+Oracle SOA Suite for Oracle Middleware 12.2.1.4.0
+Oracle Service Bus 12.2.1.4.0
+OIG and FMW Infrastructure Patches:
+Container Image Download/Patch Details
section, locate the Oracle Identity Governance (OIG)
table. For the latest PSU click the README
link in the Documentation
column. In the README, locate the “Installed Software” section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support.Oracle JDK v8
+The following files in the code repository location <imagetool-setup-location>/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0
are used for creating the image:
additionalBuildCmds.txt
buildArgs
. Edit the <workdir>/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs
file and change %DOCKER_REPO%
, %JDK_VERSION%
and %BUILDTAG%
appropriately.
For example:
+create
+--jdkVersion=8u311
+--type oig
+--chown oracle:root
+--version=12.2.1.4.0
+--tag=oig-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts
+
Edit the <workdir>/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4.0/install.file
and under the GENERIC section add the line INSTALL_TYPE="Fusion Middleware Infrastructure”. For example:
[GENERIC]
+INSTALL_TYPE="Fusion Middleware Infrastructure"
+DECLINE_SECURITY_UPDATES=true
+SECURITY_UPDATES_VIA_MYORACLESUPPORT=false
+
Add a JDK package to the WebLogic Image Tool cache. For example:
+$ imagetool cache addInstaller --type jdk --version 8uXXX --path <download location>/jdk-8uXXX-linux-x64.tar.gz
+
where XXX
is the JDK version downloaded
Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:
+$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_infrastructure.jar
+
+$ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_soa.jar
+
+$ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_osb.jar
+
+$ imagetool cache addInstaller --type idm --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_idm.jar
+
Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:
+$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <download location>/p28186730_139428_Generic.zip
+
Add the rest of the downloaded product patches to the WebLogic Image Tool cache:
+$ imagetool cache addEntry --key <patch>_12.2.1.4.0 --value <download location>/p<patch>_122140_Generic.zip
+
For example:
+$ imagetool cache addEntry --key 33416868_12.2.1.4.0 --value <download location>/p33416868_122140_Generic.zip
+$ imagetool cache addEntry --key 33453703_12.2.1.4.0 --value <download location>/p33453703_122140_Generic.zip
+$ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value <download location>/p32999272_122140_Generic.zip
+$ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value <download location>/p33093748_122140_Generic.zip
+$ imagetool cache addEntry --key 33281560_12.2.1.4.0 --value <download location>/p33281560_122140_Generic.zip
+$ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value <download location>/p31544353_122140_Linux-x86-64.zip
+$ imagetool cache addEntry --key 33313802_12.2.1.4.0 --value <download location>/p33313802_122140_Generic.zip
+$ imagetool cache addEntry --key 33408307_12.2.1.4.0 --value <download location>/p33408307_122140_Generic.zip
+$ imagetool cache addEntry --key 33286160_12.2.1.4.0 --value <download location>/p33286160_122140_Generic.zip
+$ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value <download location>/p32880070_122140_Generic.zip
+$ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value <download location>/p32905339_122140_Generic.zip
+$ imagetool cache addEntry --key 32784652_12.2.1.4.0 --value <download location>/p32784652_122140_Generic.zip
+
Edit the <workdir>/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs
file and append the product patches and opatch patch as follows:
--patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0
+--opatchBugNumber=28186730_13.9.4.2.8
+
An example buildArgs
file is now as follows:
create
+--jdkVersion=8u301
+--type oig
+--version=12.2.1.4.0
+--tag=oig-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts
+--patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0
+--opatchBugNumber=28186730_13.9.4.2.8
+
++Note: In the
+buildArgs
file:+
+- +
--jdkVersion
value must match the--version
value used in theimagetool cache addInstaller
command for--type jdk
.- +
--version
value must match the--version
value used in theimagetool cache addInstaller
command for--type idm
.
Refer to this page for the complete list of options available with the WebLogic Image Tool create
command.
Create the Oracle Identity Governance image:
+$ imagetool @<absolute path to buildargs file> --fromImage ghcr.io/oracle/oraclelinux:7-slim
+
++Note: Make sure that the absolute path to the
+buildargs
file is prepended with a@
character, as shown in the example above.
For example:
+$ imagetool @<imagetool-setup-location>/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim
+
Check the created image using the docker images
command:
$ docker images | grep oig
+
The output will look similar to the following:
+oig-latestpsu 12.2.1.4.0 e391ed154bcb 50 seconds ago 4.43GB
+
Run the following command to save the container image to a tar file:
+$ docker save -o <path>/<file>.tar <image>
+
For example:
+$ docker save -o $WORKDIR/oig-latestpsu.tar oig-latestpsu:12.2.1.4.0
+
The steps below show how to update an existing Oracle Identity Governance image with an interim patch.
+The container image to be patched must be loaded in the local docker images repository before attempting these steps.
+In the examples below the image oracle/oig:12.2.1.4.0
is updated with an interim patch.
$ docker images
+
+REPOSITORY TAG IMAGE ID CREATED SIZE
+oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB
+
Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.
+Add the OPatch patch to the WebLogic Image Tool cache, for example:
+$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <downloaded-patches-location>/p28186730_139428_Generic.zip
+
Execute the imagetool cache addEntry
command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip
:
$ imagetool cache addEntry --key=33165837_12.2.1.4.210708 --value <downloaded-patches-location>/p33165837_12214210708_Generic.zip
+
Provide the following arguments to the WebLogic Image Tool update
command:
–-fromImage
- Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oig:12.2.1.4.0
.–-patches
- Multiple patches can be specified as a comma-separated list.--tag
- Specify the new tag to be applied for the image being built.Refer here for the complete list of options available with the WebLogic Image Tool update
command.
++Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.
+
For example:
+$ imagetool update --fromImage oracle/oig:12.2.1.4.0 --tag=oracle/oig-new:12.2.1.4.0 --patches=33165837_12.2.1.4.210708 --opatchBugNumber=28186730_13.9.4.2.8
+
++Note: If the command fails because the files in the image being upgraded are not owned by
+oracle:oracle
, then add the parameter--chown <userid>:<groupid>
to correspond with the values returned in the error.
Check the built image using the docker images
command:
$ docker images | grep oig
+
The output will look similar to the following:
+REPOSITORY TAG IMAGE ID CREATED SIZE
+oracle/oig-new 12.2.1.4.0 0c8381922e95 16 seconds ago 4.91GB
+oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB
+
Run the following command to save the patched container image to a tar file:
+$ docker save -o <path>/<file>.tar <image>
+
For example:
+$ docker save -o $WORKDIR/oig-new.tar oracle/oig-new:12.2.1.4.0
+
Oracle supports the deployment of Oracle Identity Governance on Kubernetes. See the following sections:
+ + + + + + + +The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).
+In this release, OIG domains are supported using the “domain on a persistent volume” +model only, where the domain home is located in a persistent volume (PV).
+The operator has several key features to assist you with deploying and managing OIG domains in a Kubernetes +environment. You can:
+The current production release for the Oracle Identity Governance domain deployment on Kubernetes is 23.4.1. This release uses the WebLogic Kubernetes Operator version 4.1.2.
+For 4.0.X WebLogic Kubernetes Operator refer to Version 23.3.1
+For 3.4.X WebLogic Kubernetes Operator refer to Version 23.1.1
+See the Release Notes for recent changes and known issues for Oracle Identity Governance domain deployment on Kubernetes.
+See here for limitations in this release.
+This documentation explains how to configure OIG on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.
+If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. +Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OIG and no other Oracle Identity Management products.
+Note: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Identity Governance deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:
+To view documentation for an earlier release, see:
+ + + + + + + + + +Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh
script.
Run the following command to delete the domain:
+$ cd $WORKDIR/kubernetes/delete-domain
+$ ./delete-weblogic-domain-resources.sh -d <domain_uid>
+
For example:
+$ cd $WORKDIR/kubernetes/delete-domain
+$ ./delete-weblogic-domain-resources.sh -d governancedomain
+
Drop the RCU schemas as follows:
+$ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
+[oracle@helper ~]$
+[oracle@helper ~]$ export CONNECTION_STRING=<db_host.domain>:<db_port>/<service_name>
+[oracle@helper ~]$ export RCUPREFIX=<rcu_schema_prefix>
+
+/u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \
+-dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \
+-component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \
+-component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f < /tmp/pwd.txt
+
For example:
+$ kubectl exec -it helper -n oigns -- /bin/bash
+[oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com
+[oracle@helper ~]$ export RCUPREFIX=OIGK8S
+/u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \
+-dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \
+-component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \
+-component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f < /tmp/pwd.txt
+
Delete the contents of the persistent volume:
+$ rm -rf <persistent_volume>/governancedomainpv/*
+
For example:
+$ rm -rf /scratch/shared/governancedomainpv/*
+
Delete the WebLogic Kubernetes Operator, by running the following command:
+$ helm delete weblogic-kubernetes-operator -n opns
+
Delete the label from the OIG namespace:
+$ kubectl label namespaces <domain_namespace> weblogic-operator-
+
For example:
+$ kubectl label namespaces oigns weblogic-operator-
+
Delete the service account for the operator:
+$ kubectl delete serviceaccount <sample-kubernetes-operator-sa> -n <domain_namespace>
+
For example:
+$ kubectl delete serviceaccount op-sa -n opns
+
Delete the operator namespace:
+$ kubectl delete namespace <sample-kubernetes-operator-ns>
+
For example:
+$ kubectl delete namespace opns
+
To delete NGINX:
+$ helm delete governancedomain-nginx-designconsole -n <domain_namespace>
+
For example:
+$ helm delete governancedomain-nginx-designconsole -n oigns
+
Then run:
+$ helm delete governancedomain-nginx -n <domain_namespace>
+
For example:
+$ helm delete governancedomain-nginx -n oigns
+
Then run:
+$ helm delete nginx-ingress -n <domain_namespace>
+
For example:
+$ helm delete nginx-ingress -n nginxssl
+
Then delete the NGINX namespace:
+$ kubectl delete namespace <namespace>
+
For example:
+$ kubectl delete namespace nginxssl
+
Delete the OIG namespace:
+$ kubectl delete namespace <domain_namespace>
+
For example:
+$ kubectl delete namespace oigns
+
As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.
+This document shows the basic operations for starting, stopping and scaling servers in the OIG domain.
+For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.
+ +Do not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.
+Note: The instructions below are for starting, stopping, or scaling servers manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.
+The default OIG deployment starts the Administration Server (AdminServer
), one OIG Managed Server (oim_server1
) and one SOA Managed Server (soa_server1
).
The deployment also creates, but doesn’t start, four extra OIG Managed Servers (oim-server2
to oim-server5
) and four more SOA Managed Servers (soa_server2
to soa_server5
).
All these servers are visible in the WebLogic Server Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
by navigating to Domain Structure > governancedomain > Environment > Servers.
To view the running servers using kubectl, run the following command:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output should look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 23h
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Running 0 23h
+governancedomain-soa-server1 1/1 Running 0 23h
+
The number of OIG Managed Servers running is dependent on the replicas
parameter configured for the cluster. To start more OIG Managed Servers perform the following steps:
Run the following kubectl command to edit the oim_cluster:
+$ kubectl edit cluster <cluster_name> -n <domain_namespace>
+
For example:
+$ kubectl edit cluster governancedomain-oim-cluster -n oigns
+
Note: This opens an edit session for the domain where parameters can be changed using standard vi
commands.
In the edit session, search for spec:
, and then look for the replicas
parameter under clusterName: oim_cluster
. By default the replicas parameter is set to “1” hence a single OIG Managed Server is started (oim_server1
):
spec:
+ clusterName: oim_cluster
+ replicas: 1
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
+...
+
To start more OIG Managed Servers, increase the replicas
value as desired. In the example below, one more Managed Server will be started by setting replicas
to “2”:
spec:
+ clusterName: oim_cluster
+ replicas: 2
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
+...
+
Save the file and exit (:wq)
+The output will look similar to the following:
+cluster.weblogic.oracle/governancedomain-oim-cluster edited
+
Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 23h
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Running 0 23h
+governancedomain-oim-server2 0/1 Running 0 7s
+governancedomain-soa-server1 1/1 Running 0 23h
+
One new pod (governancedomain-oim-server2
) is started, but currently has a READY
status of 0/1
. This means oim_server2
is not currently running but is in the process of starting. The server will take several minutes to start so keep executing the command until READY
shows 1/1
:
NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 23h
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Running 0 23h
+governancedomain-oim-server2 1/1 Running 0 5m27s
+governancedomain-soa-server1 1/1 Running 0 23h
+
Note: To check what is happening during server startup when READY
is 0/1
, run the following command to view the log of the pod that is starting:
$ kubectl logs <pod> -n <domain_namespace>
+
For example:
+$ kubectl logs governancedomain-oim-server2 -n oigns
+
As mentioned in the previous section, the number of OIG Managed Servers running is dependent on the replicas
parameter configured for the cluster. To stop one or more OIG Managed Servers, perform the following:
Run the following kubectl command to edit the oim_cluster:
+$ kubectl edit cluster <cluster_name> -n <domain_namespace>
+
For example:
+$ kubectl edit cluster governancedomain-oim-cluster -n oigns
+
In the edit session, search for spec:
, and then look for the replicas
parameter under clusterName: oim_cluster
. In the example below replicas
is set to “2” hence two OIG Managed Servers are started (oim_server1
and oim_server2
):
spec:
+ clusterName: oim_cluster
+ replicas: 2
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
+...
+
To stop OIG Managed Servers, decrease the replicas
value as desired. In the example below, we will stop one Managed Server by setting replicas to “1”:
spec:
+ clusterName: oim_cluster
+ replicas: 1
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
+...
+
Save the file and exit (:wq)
+Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 23h
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Running 0 23h
+governancedomain-oim-server2 1/1 Terminating 0 7m30s
+governancedomain-soa-server1 1/1 Running 0 23h
+
The exiting pod shows a STATUS
of Terminating
(governancedomain-oim-server2
). The server may take a minute or two to stop, so keep executing the command until the pod has disappeared:
NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 23h
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Running 0 23h
+governancedomain-soa-server1 1/1 Running 0 23h
+
To stop all the OIG Managed Servers and the Administration Server in one operation:
+Run the following kubectl command to edit the domain:
+$ kubectl edit domain <domain_uid> -n <domain_namespace>
+
For example:
+$ kubectl edit domain governancedomain -n oigns
+
In the edit session search for serverStartPolicy: IfNeeded
under the domain spec:
...
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ volumes:
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: governancedomain-domain-pvc
+ serverStartPolicy: IfNeeded
+ webLogicCredentialsSecret:
+ name: oig-domain-credentials
+ ...
+
Change serverStartPolicy: IfNeeded
to Never
as follows:
...
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ volumes:
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: governancedomain-domain-pvc
+ serverStartPolicy: Never
+ webLogicCredentialsSecret:
+ name: oig-domain-credentials
+ ...
+
Save the file and exit (:wq).
+Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Terminating 0 23h
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Terminating 0 23h
+governancedomain-soa-server1 1/1 Terminating 0 23h
+
The AdminServer pod and Managed Server pods will move to a STATUS
of Terminating
. After a few minutes, run the command again and the pods should have disappeared:
NAME READY STATUS RESTARTS AGE
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+
To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: Never
to IfNeeded
as follows:
...
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ volumes:
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: governancedomain-domain-pvc
+ serverStartPolicy: IfNeeded
+ webLogicCredentialsSecret:
+ name: oig-domain-credentials
+ ...
+
Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 0/1 Running 0 4s
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+
The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY
status 1/1
:
NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 6m57s
+governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Running 0 4m33s
+governancedomain-soa-server1 1/1 Running 0 4m33s
+
The WebLogic Kubernetes Operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.
+Note: Prior to running these scripts, you must have previously created and deployed the domain.
+The scripts are located in the $WORKDIR/kubernetes/domain-lifecycle
directory. For more information, see the README.
Kubernetes Horizontal Pod Autoscaler (HPA) is supported from Weblogic Kubernetes Operator 4.0.X and later.
+HPA allows automatic scaling (up and down) of the OIG Managed Servers. If load increases then extra OIG Managed Servers will be started as required, up to the value configuredManagedServerCount
defined when the domain was created (see Prepare the create domain script). Similarly, if load decreases, OIG Managed Servers will be automatically shutdown.
For more information on HPA, see Horizontal Pod Autoscaling.
+The instructions below show you how to configure and run an HPA to scale an OIG cluster (governancedomain-oim-cluster
) resource, based on CPU utilization or memory resource metrics. If required, you can also perform the following for the governancedomain-soa-cluster
.
Note: If you enable HPA and then decide you want to start/stop/scale OIG Managed servers manually as per Domain Life Cycle, it is recommended to delete HPA beforehand as per Delete the HPA.
+In order to use HPA, the OIG domain must have been created with the required resources
parameter as per Set the OIM server memory parameters. For example:
serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: "-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m"
+ resources:
+ limits:
+ cpu: "2"
+ memory: "8Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+
If you created the OIG domain without setting these parameters, then you can update the domain using the following steps:
+Run the following command to edit the cluster:
+$ kubectl edit cluster governancedomain-oim-cluster -n oigns
+
Note: This opens an edit session for the governancedomain-oim-cluster
where parameters can be changed using standard vi commands.
In the edit session, search for spec:
, and then look for the replicas parameter under clusterName: oim_cluster
. Change the entry so it looks as follows:
spec:
+ clusterName: oim_cluster
+ replicas: 1
+ serverPod:
+ env:
+ - name: USER_MEM_ARGS
+ value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: 1000m
+ memory: 4Gi
+ serverService:
+ precreateService: true
+ ...
+
Save the file and exit (:wq!)
+The output will look similar to the following:
+cluster.weblogic.oracle/governancedomain-oim-cluster edited
+
The OIG Managed Server pods will then automatically be restarted.
+Before deploying HPA you must deploy the Kubernetes Metrics Server.
+Check to see if the Kubernetes Metrics Server is already deployed:
+$ kubectl get pods -n kube-system | grep metric
+
If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.
+metrics-server-d9694457-mf69d 1/1 Running 0 5m13s
+
If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml
:
$ mkdir $WORKDIR/kubernetes/hpa
+$ cd $WORKDIR/kubernetes/hpa
+$ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
+
Deploy the Kubernetes Metrics Server by running the following command:
+$ kubectl apply -f components.yaml
+
The output will look similar to the following:
+serviceaccount/metrics-server created
+clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
+clusterrole.rbac.authorization.k8s.io/system:metrics-server created
+rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
+clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
+clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
+service/metrics-server created
+deployment.apps/metrics-server created
+apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
+
Run the following command to check Kubernetes Metric Server is running:
+$ kubectl get pods -n kube-system | grep metric
+
Make sure the pod has a READY
status of 1/1
:
metrics-server-d9694457-mf69d 1/1 Running 0 39s
+
If the Kubernetes Metric Server does not reach the READY 1/1
state, run the following commands:
$ kubectl describe pod <metrics-server-pod> -n kube-system
+$ kubectl logs <metrics-server-pod> -n kube-system
+
If you see errors such as:
+Readiness probe failed: HTTP probe failed with statuscode: 500
+
and:
+E0907 13:07:50.937308 1 scraper.go:140] "Failed to scrape node" err="Get \"https://100.105.18.113:10250/metrics/resource\": x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs" node="worker-node1"
+
then you may need to install a valid cluster certificate for your Kubernetes cluster.
+For testing purposes, you can resolve this issue by:
+Delete the Kubernetes Metrics Server by running the following command:
+$ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml
+
Edit the $WORKDIR/hpa/components.yaml
and locate the args:
section. Add kubelet-insecure-tls
to the arguments. For example:
spec:
+ containers:
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --kubelet-insecure-tls
+ - --metric-resolution=15s
+ image: registry.k8s.io/metrics-server/metrics-server:v0.6.4
+ ...
+
Deploy the Kubenetes Metrics Server using the command:
+$ kubectl apply -f components.yaml
+
Run the following and make sure the READY status shows 1/1
:
$ kubectl get pods -n kube-system | grep metric
+
The output should look similar to the following:
+metrics-server-d9694457-mf69d 1/1 Running 0 40s
+
The steps below show how to configure and run an HPA to scale the governancedomain-oim-cluster
, based on the CPU or memory utilization resource metrics.
The default OIG deployment creates the cluster governancedomain-oim-cluster
which starts one OIG Managed Server (oim_server1
). The deployment also creates, but doesn’t start, four extra OIG Managed Servers (oim-server2
to oim-server5
).
In the following example an HPA resource is created, targeted at the cluster resource governancedomain-oim-cluster
. This resource will autoscale OIG Managed Servers from a minimum of 1 cluster member up to 5 cluster members. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.
Navigate to the $WORKDIR/kubernetes/hpa
and create an autoscalehpa.yaml
file that contains the following.
#
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: governancedomain-oim-cluster-hpa
+ namespace: oigns
+spec:
+ scaleTargetRef:
+ apiVersion: weblogic.oracle/v1
+ kind: Cluster
+ name: governancedomain-oim-cluster
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 60
+ scaleUp:
+ stabilizationWindowSeconds: 60
+ minReplicas: 1
+ maxReplicas: 5
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 70
+
Note : minReplicas
and maxReplicas
should match your current domain settings.
Note: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.
+metrics:
+- type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: 70
+
Run the following command to create the autoscaler:
+$ kubectl apply -f autoscalehpa.yaml
+
The output will look similar to the following:
+horizontalpodautoscaler.autoscaling/governancedomain-oim-cluster-hpa created
+
Verify the status of the autoscaler by running the following:
+$ kubectl get hpa -n oigns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+governancedomain-oim-cluster-hpa Cluster/governancedomain-oim-cluster 16%/70% 1 5 1 20s
+
In the example above, this shows that CPU is currently running at 16% for the governancedomain-oim-cluster-hpa
.
Check the current status of the OIG Managed Servers:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 20m
+governancedomain-create-fmw-infra-sample-domain-job-8wd2b 0/1 Completed 0 2d18h
+governancedomain-oim-server1 1/1 Running 0 17m
+governancedomain-soa-server1 1/1 Running 0 17m
+helper 1/1 Running 0 2d18h
+
In the above only governancedomain-oim-server1
is running.
To test HPA can scale up the WebLogic cluster governancedomain-oim-cluster
, run the following commands:
$ kubectl exec --stdin --tty governancedomain-oim-server1 -n oigns -- /bin/bash
+
This will take you inside a bash shell inside the oim_server1
pod:
[oracle@governancedomain-oim-server1 oracle]$
+
Inside the bash shell, run the following command to increase the load on the CPU:
+[oracle@governancedomain-oim-server1 oracle]$ dd if=/dev/zero of=/dev/null
+
This command will continue to run in the foreground.
+In a command window outside the bash shell, run the following command to view the current CPU usage:
+$ kubectl get hpa -n oigns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+governancedomain-oim-cluster-hpa Cluster/governancedomain-oim-cluster 386%/70% 1 5 1 2m47s
+
In the above example the CPU has increased to 386%. As this is above the 70% limit, the autoscaler increases the replicas on the Cluster resource and the operator responds by starting additional cluster members.
+Run the following to see if any more OIG Managed Servers are started:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 30m
+governancedomain-create-fmw-infra-sample-domain-job-8wd2b 0/1 Completed 0 2d18h
+governancedomain-oim-server1 1/1 Running 0 27m
+governancedomain-oim-server2 1/1 Running 0 10m
+governancedomain-oim-server3 1/1 Running 0 10m
+governancedomain-oim-server4 1/1 Running 0 10m
+governancedomain-oim-server5 1/1 Running 0 10m
+governancedomain-soa-server1 1/1 Running 0 27m
+helper 1/1 Running 0 2d18h
+
In the example above four more OIG Managed Servers have been started (oim-server2
- oim-server5
).
Note: It may take some time for the servers to appear and start. Once the servers are at READY
status of 1/1
, the servers are started.
To stop the load on the CPU, in the bash shell, issue a Control C, and then exit the bash shell:
+[oracle@governancedomain-oim-server1 oracle]$ dd if=/dev/zero of=/dev/null
+^C
+[oracle@governancedomain-oim-server1 oracle]$ exit
+
Run the following command to view the current CPU usage:
+$ kubectl get hpa -n oigns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+governancedomain-oim-cluster-hpa Cluster/governancedomain-oim-cluster 33%/70% 1 5 5 37m
+
In the above example CPU has dropped to 33%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 43m
+governancedomain-create-fmw-infra-sample-domain-job-8wd2b 0/1 Completed 0 2d18h
+governancedomain-oim-server1 1/1 Running 0 40m
+governancedomain-oim-server2 1/1 Running 0 13m
+governancedomain-oim-server3 1/1 Running 0 13m
+governancedomain-oim-server4 1/1 Running 0 13m
+governancedomain-oim-server5 0/1 Terminating 0 13m
+governancedomain-soa-server1 1/1 Running 0 40m
+helper 1/1 Running 0 2d19h
+
Eventually, all the servers except oim-server1
will disappear:
NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 44m
+governancedomain-create-fmw-infra-sample-domain-job-8wd2b 0/1 Completed 0 2d18h
+governancedomain-oim-server1 1/1 Running 0 41m
+governancedomain-soa-server1 1/1 Running 0 41m
+helper 1/1 Running 0 2d20h
+
If you need to delete the HPA, you can do so by running the following command:
+$ cd $WORKDIR/kubernetes/hpa
+$ kubectl delete -f autoscalehpa.yaml
+
Important considerations for Oracle Identity Governance domains in Kubernetes.
+ + + + + + + +Learn about the domain lifecycle of an OIG domain.
+ + + + + + + + + + + + +Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain.
+ + + + + + + + + + + + +Describes the steps for running OIG utilities in Kubernetes.
+ + + + + + + + + + + + +Describes the steps for logging and visualization with Elasticsearch and Kibana.
+ + + + + + + + + + + + +Describes the steps for Monitoring the OIG domain and Publishing the logs to Elasticsearch.
+ + + + + + + + + + + + +Describes the steps for implementing the Horizontal Pod Autoscaler.
+ + + + + + + + + + + + +Learn about the steps to cleanup the OIG domain home.
+ + + + + + + + +After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.
+If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +Installing Elasticsearch (ELK) Stack and Kibana
+In order to create the logstash pod, you must create several files. These files contain variables which you must substitute with variables applicable to your environment.
+Most of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.
+The table below outlines the variables and values you must set:
+Variable | +Sample Value | +Description | +
---|---|---|
<ELK_VER> |
+8.3.1 |
+The version of logstash you want to install. | +
<ELK_SSL> |
+true |
+If SSL is enabled for ELK set the value to true , or if NON-SSL set to false . This value must be lowercase. |
+
<ELK_HOSTS> |
+https://elasticsearch.example.com:9200 |
+The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used. | +
<ELKNS> |
+oigns |
+The domain namespace. | +
<ELK_USER> |
+logstash_internal |
+The name of the user for logstash to access Elasticsearch. | +
<ELK_PASSWORD> |
+password |
+The password for ELK_USER. | +
<ELK_APIKEY> |
+apikey |
+The API key details. | +
You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt
.
Create a Kubernetes secret for Elasticsearch using the API Key or Password.
+a) If ELK uses an API Key for authentication:
+$ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_APIKEY>
+
For example:
+$ kubectl create secret generic elasticsearch-pw-elastic -n oigns --from-literal password=<ELK_APIKEY>
+
The output will look similar to the following:
+secret/elasticsearch-pw-elastic created
+
b) If ELK uses a password for authentication:
+$ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_PASSWORD>
+
For example:
+$ kubectl create secret generic elasticsearch-pw-elastic -n oigns --from-literal password=<ELK_PASSWORD>
+
The output will look similar to the following:
+secret/elasticsearch-pw-elastic created
+
Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.
+Create a Kubernetes secret to access the required images on hub.docker.com:
+Note: Before executing the command below, you must first have a user account on hub.docker.com.
+kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \
+--docker-username="<DOCKER_USER_NAME>" \
+--docker-password=<DOCKER_PASSWORD> --docker-email=<DOCKER_EMAIL_ID> \
+--namespace=<domain_namespace>
+
For example,
+kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \
+--docker-username="user@example.com" \
+--docker-password=password --docker-email=user@example.com \
+--namespace=oigns
+
The output will look similar to the following:
+secret/dockercred created
+
Run the following command to get the mountPath
of your domain:
$ kubectl describe domains <domain_uid> -n <domain_namespace> | grep "Mount Path"
+
For example:
+$ kubectl describe domains governancedomain -n oigns | grep "Mount Path"
+
The output will look similar to the following:
+Mount Path: /u01/oracle/user_projects/domains
+
Run the following command to get the OIG domain persistence volume details:
+$ kubectl get pv -n <domain_namespace>
+
For example:
+$ kubectl get pv -n oigns
+
The output will look similar to the following:
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+governancedomain-domain-pv 10Gi RWX Retain Bound oigns/governancedomain-domain-pvc governancedomain-oim-storage-class 28h
+
Make note of the CLAIM value, for example in this case governancedomain-oim-pvc
.
Copy the elk.crt
file to the $WORKDIR/kubernetes/elasticsearch-and-kibana
directory.
Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana
directory and run the following:
kubectl create configmap elk-cert --from-file=elk.crt -n <namespace>
+
For example:
+kubectl create configmap elk-cert --from-file=elk.crt -n oigns
+
The output will look similar to the following:
+configmap/elk-cert created
+
Create a logstash_cm.yaml
file in the $WORKDIR/kubernetes/elasticsearch-and-kibana
directory as follows:
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: oig-logstash-configmap
+ namespace: <ELKNS>
+data:
+ logstash.yml: |
+ #http.host: "0.0.0.0"
+ logstash-config.conf: |
+ input {
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log"
+ tags => "Adminserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log"
+ tags => "soaserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log"
+ tags => "Oimserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log"
+ tags => "Adminserver_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log"
+ tags => "Soa_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log"
+ tags => "Oimserver_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log"
+ tags => "Access_logs"
+ start_position => beginning
+ }
+ }
+ filter {
+ grok {
+ match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc} > <%{DATA:log_number}> <%{DATA:log_message}>" ]
+ }
+ if "_grokparsefailure" in [tags] {
+ mutate {
+ remove_tag => [ "_grokparsefailure" ]
+ }
+ }
+ }
+ output {
+ elasticsearch {
+ hosts => ["<ELK_HOSTS>"]
+ cacert => '/usr/share/logstash/config/certs/elk.crt'
+ index => "oiglogs-000001"
+ ssl => <ELK_SSL>
+ ssl_certificate_verification => false
+ user => "<ELK_USER>"
+ password => "${ELASTICSEARCH_PASSWORD}"
+ api_key => "${ELASTICSEARCH_PASSWORD}"
+ }
+ }
+
Change the values in the above file as follows:
+<ELKNS>
, <ELK_HOSTS>
, <ELK_SSL>
, and <ELK_USER>
to match the values for your environment./u01/oracle/user_projects/domains
to match the mountPath
returned earlier.governancedomain
, change each instance of governancedomain
to your domainUID.user
and password
lines.api_key
line.user
, password
, and api_key
lines.For example:
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: oig-logstash-configmap
+ namespace: oigns
+data:
+ logstash.yml: |
+ #http.host: "0.0.0.0"
+ logstash-config.conf: |
+ input {
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log"
+ tags => "Adminserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log"
+ tags => "soaserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log"
+ tags => "Oimserver_log"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log"
+ tags => "Adminserver_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log"
+ tags => "Soa_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log"
+ tags => "Oimserver_diagnostic"
+ start_position => beginning
+ }
+ file {
+ path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log"
+ tags => "Access_logs"
+ start_position => beginning
+ }
+ }
+ filter {
+ grok {
+ match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc} > <%{DATA:log_number}> <%{DATA:log_message}>" ]
+ }
+ if "_grokparsefailure" in [tags] {
+ mutate {
+ remove_tag => [ "_grokparsefailure" ]
+ }
+ }
+ }
+ output {
+ elasticsearch {
+ hosts => ["https://elasticsearch.example.com:9200"]
+ cacert => '/usr/share/logstash/config/certs/elk.crt'
+ index => "oiglogs-000001"
+ ssl => true
+ ssl_certificate_verification => false
+ user => "logstash_internal"
+ password => "${ELASTICSEARCH_PASSWORD}"
+ }
+ }
+
Run the following command to create the configmap:
+$ kubectl apply -f logstash_cm.yaml
+
The output will look similar to the following:
+configmap/oig-logstash-configmap created
+
Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana
directory and create a logstash.yaml
file as follows:
apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: oig-logstash
+ namespace: <ELKNS>
+spec:
+ selector:
+ matchLabels:
+ k8s-app: logstash
+ template: # create pods using pod definition in this template
+ metadata:
+ labels:
+ k8s-app: logstash
+ spec:
+ imagePullSecrets:
+ - name: dockercred
+ containers:
+ - command:
+ - logstash
+ image: logstash:<ELK_VER>
+ imagePullPolicy: IfNotPresent
+ name: oig-logstash
+ env:
+ - name: ELASTICSEARCH_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-pw-elastic
+ key: password
+ resources:
+ ports:
+ - containerPort: 5044
+ name: logstash
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ - name: shared-logs
+ mountPath: /shared-logs
+ - mountPath: /usr/share/logstash/pipeline/
+ name: oig-logstash-pipeline
+ - mountPath: /usr/share/logstash/config/logstash.yml
+ subPath: logstash.yml
+ name: config-volume
+ - mountPath: /usr/share/logstash/config/certs
+ name: elk-cert
+ volumes:
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: elk.crt
+ path: elk.crt
+ name: elk-cert
+ name: elk-cert
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: logstash-config.conf
+ path: logstash-config.conf
+ name: oig-logstash-configmap
+ name: oig-logstash-pipeline
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: logstash.yml
+ path: logstash.yml
+ name: oig-logstash-configmap
+ name: config-volume
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: governancedomain-domain-pvc
+ - name: shared-logs
+ emptyDir: {}
+
<ELKNS>
, and <ELK_VER>
to match the values for your environment./u01/oracle/user_projects/domains
to match the mountPath
returned earlierclaimName
value to match the claimName
returned earlierimage: logstash:<ELK_VER>
to the location of the image in your container registry e.g: container-registry.example.com/logstash:8.3.1
For example:
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: oig-logstash
+ namespace: oigns
+spec:
+ selector:
+ matchLabels:
+ k8s-app: logstash
+ template: # create pods using pod definition in this template
+ metadata:
+ labels:
+ k8s-app: logstash
+ spec:
+ imagePullSecrets:
+ - name: dockercred
+ containers:
+ - command:
+ - logstash
+ image: logstash:8.3.1
+ imagePullPolicy: IfNotPresent
+ name: oig-logstash
+ env:
+ - name: ELASTICSEARCH_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elasticsearch-pw-elastic
+ key: password
+ resources:
+ ports:
+ - containerPort: 5044
+ name: logstash
+ volumeMounts:
+ - mountPath: /u01/oracle/user_projects/domains
+ name: weblogic-domain-storage-volume
+ - name: shared-logs
+ mountPath: /shared-logs
+ - mountPath: /usr/share/logstash/pipeline/
+ name: oig-logstash-pipeline
+ - mountPath: /usr/share/logstash/config/logstash.yml
+ subPath: logstash.yml
+ name: config-volume
+ - mountPath: /usr/share/logstash/config/certs
+ name: elk-cert
+ volumes:
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: elk.crt
+ path: elk.crt
+ name: elk-cert
+ name: elk-cert
+ name: oig-logstash-configmap
+ name: elk-cert
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: logstash-config.conf
+ path: logstash-config.conf
+ name: oig-logstash-configmap
+ name: oig-logstash-pipeline
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: logstash.yml
+ path: logstash.yml
+ name: oig-logstash-configmap
+ name: config-volume
+ - name: weblogic-domain-storage-volume
+ persistentVolumeClaim:
+ claimName: governancedomain-domain-pvc
+ - name: shared-logs
+ emptyDir: {}
+
Deploy the logstash
pod by executing the following command:
$ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml
+
The output will look similar to the following:
+deployment.apps/oig-logstash created
+
Run the following command to check the logstash
pod is created correctly:
$ kubectl get pods -n <namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output should look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 90m
+governancedomain-create-fmw-infra-sample-domain-job-fqgnr 0/1 Completed 0 2d19h
+governancedomain-oim-server1 1/1 Running 0 88m
+governancedomain-soa-server1 1/1 Running 0 88m
+helper 1/1 Running 0 2d20h
+oig-logstash-77fbbc66f8-lsvcw 1/1 Running 0 3m25s
+
Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:
+$ kubectl logs -f oig-logstash-<pod> -n oigns
+
Most errors occur due to misconfiguration of the logstash_cm.yaml
or logstash.yaml
. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.
If the pod has errors, delete the pod and configmap as follows:
+$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml
+$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash_cm.yaml
+
Once you have resolved the issue in the yaml files, run the commands outlined earlier to recreate the configmap and logstash pod.
+To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.
+For Kibana 7.7.x and below:
+Access the Kibana console with http://<hostname>:<port>/app/kibana
and login with your username and password.
From the Navigation menu, navigate to Management > Kibana > Index Patterns.
+In the Create Index Pattern page enter oiglogs*
for the Index pattern and click Next Step.
In the Configure settings page, from the Time Filter field name drop down menu select @timestamp
and click Create index pattern.
Once the index pattern is created click on Discover in the navigation menu to view the OIG logs.
+For Kibana version 7.8.X and above:
+Access the Kibana console with http://<hostname>:<port>/app/kibana
and login with your username and password.
From the Navigation menu, navigate to Management > Stack Management.
+Click Data Views in the Kibana section.
+Click Create Data View and enter the following information:
+oiglogs*
@timestamp
Click Create Data View.
+From the Navigation menu, click Discover to view the log file entries.
+From the drop down menu, select oiglogs*
to view the log file entries.
After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain.
+The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.
+There are two ways to setup monitoring and you should choose one method or the other:
+ +The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh
sets up the monitoring for the OIG domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OIG domain. It also deploys the WebLogic Server Grafana dashboard.
For usage details execute ./setup-monitoring.sh -h
.
Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml
and change the domainUID
, domainNamespace
, and weblogicCredentialsSecretName
to correspond to your deployment. Also change wlsMonitoringExporterTosoaCluster
, wlsMonitoringExporterTooimCluster
, exposeMonitoringNodePort
to true
. For example:
version: create-governancedomain-monitoring-inputs-v1
+
+# Unique ID identifying your domain.
+# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster.
+domainUID: governancedomain
+
+# Name of the domain namespace
+domainNamespace: oigns
+
+# Boolean value indicating whether to install kube-prometheus-stack
+setupKubePrometheusStack: true
+
+# Additional parameters for helm install kube-prometheus-stack
+# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters
+# Sample :
+# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false
+additionalParamForKubePrometheusStack:
+
+# Name of the monitoring namespace
+monitoringNamespace: monitoring
+
+# Name of the Admin Server
+adminServerName: AdminServer
+#
+# Port number for admin server
+adminServerPort: 7001
+
+# Cluster name
+soaClusterName: soa_cluster
+
+# Port number for managed server
+soaManagedServerPort: 8001
+
+# WebLogic Monitoring Exporter to Cluster
+wlsMonitoringExporterTosoaCluster: true
+
+# Cluster name
+oimClusterName: oim_cluster
+
+# Port number for managed server
+oimManagedServerPort: 14000
+
+# WebLogic Monitoring Exporter to Cluster
+wlsMonitoringExporterTooimCluster: true
+
+
+# Boolean to indicate if the adminNodePort will be exposed
+exposeMonitoringNodePort: true
+
+# NodePort to expose Prometheus
+prometheusNodePort: 32101
+
+# NodePort to expose Grafana
+grafanaNodePort: 32100
+
+# NodePort to expose Alertmanager
+alertmanagerNodePort: 32102
+
+# Name of the Kubernetes secret for the Admin Server's username and password
+weblogicCredentialsSecretName: oig-domain-credentials
+
Note: If your cluster does not have access to the internet to pull external images, such as grafana or prometheus, you must load the images in a local container registry. You must then set additionalParamForKubePrometheusStack
to set the location of the image in your local container registry, for example:
# Additional parameters for helm install kube-prometheus-stack
+# Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters
+# Sample :
+# additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false
+additionalParamForKubePrometheusStack: --set grafana.image.repository=container-registry.example.com/grafana --set grafana.image.tag=8.3.4
+
Run the following command to setup monitoring:
+$ cd $WORKDIR/kubernetes/monitoring-service
+$ ./setup-monitoring.sh -i monitoring-inputs.yaml
+
The output should be similar to the following:
+Monitoring setup in monitoring in progress
+
+node/worker-node1 not labeled
+node/worker-node2 not labeled
+node/master-node not labeled
+Setup prometheus-community/kube-prometheus-stack started
+"prometheus-community" already exists with the same configuration, skipping
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "stable" chart repository
+...Successfully got an update from the "prometheus" chart repository
+...Successfully got an update from the "prometheus-community" chart repository
+Update Complete. ⎈Happy Helming!⎈
+Setup prometheus-community/kube-prometheus-stack in progress
+NAME: monitoring
+LAST DEPLOYED: <DATE>
+NAMESPACE: monitoring
+STATUS: deployed
+REVISION: 1
+NOTES:
+kube-prometheus-stack has been installed. Check its status by running:
+ kubectl --namespace monitoring get pods -l "release=monitoring"
+
+Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
+Setup prometheus-community/kube-prometheus-stack completed
+Deploy WebLogic Monitoring Exporter started
+Deploying WebLogic Monitoring Exporter with domainNamespace[oigns], domainUID[governancedomain], adminServerPodName[governancedomain-adminserver]
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159
+100 2196k 100 2196k 0 0 1763k 0 0:00:01 0:00:01 --:--:-- 20.7M
+created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir
+created /tmp/ci-GJSQsiXrFE
+/tmp/ci-GJSQsiXrFE $WORKDIR/kubernetes/monitoring-service
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service
+created /tmp/ci-KeyZrdouMD
+/tmp/ci-KeyZrdouMD $WORKDIR/kubernetes/monitoring-service
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service
+created /tmp/ci-QE9HawIIgT
+/tmp/ci-QE9HawIIgT $WORKDIR/kubernetes/monitoring-service
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service
+
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+
+Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ...
+Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-adminserver.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-soa.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-oim.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Disconnected from weblogic server: AdminServer
+
+
+Exiting WebLogic Scripting Tool.
+
+<DATE> <Warning> <JNDI> <BEA-050001> <WLContext.close() was called in a different thread than the one in which it was created.>
+Deploy WebLogic Monitoring Exporter completed
+secret/basic-auth created
+servicemonitor.monitoring.coreos.com/wls-exporter created
+Deploying WebLogic Server Grafana Dashboard....
+{"id":25,"slug":"weblogic-server-dashboard","status":"success","uid":"5yUwzbZWz","url":"/d/5yUwzbZWz/weblogic-server-dashboard","version":1}
+Deployed WebLogic Server Grafana Dashboard successfully
+
+Grafana is available at NodePort: 32100
+Prometheus is available at NodePort: 32101
+Altermanager is available at NodePort: 32102
+==============================================================
+
After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.
+Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery
Click on serviceMonitor/oigns/wls-exporter/0
and then show more. Verify all the targets are mentioned.
Note : It may take several minutes for serviceMonitor/oigns/wls-exporter/0
to appear, so refresh the page until it does.
Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100
and login with admin/admin
. Change your password when prompted.
In the Dashboards
panel, click on WebLogic Server Dashboard
. The dashboard for your OIG domain should be displayed. If it is not displayed, click the Search
icon in the left hand menu and search for WebLogic Server Dashboard
.
To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh
script. For usage details execute ./delete-monitoring.sh -h
To uninstall run the following command:
+$ cd $WORKDIR/kubernetes/monitoring-service
+$ ./delete-monitoring.sh -i monitoring-inputs.yaml
+
Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OIG domain.
+Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux
. To check if your nodes are labelled, run the following:
$ kubectl get nodes --show-labels
+
If the nodes are labelled the output will look similar to the following:
+NAME STATUS ROLES AGE VERSION LABELS
+worker-node1 Ready <none> 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux
+worker-node2 Ready <none> 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux
+master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master=
+
If the nodes are not labelled, run the following command:
+$ kubectl label nodes --all kubernetes.io/os=linux
+
Clone Prometheus by running the following commands:
+$ cd $WORKDIR/kubernetes/monitoring-service
+$ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0
+
Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.
+If your cluster does not have access to the internet to pull external images, such as grafana, you must load the images in a local container registry.
+For grafana, edit the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/grafana-deployment.yaml
and change image: grafana/grafana:7.3.4
to your local container registry image location, for example image: container-registry.example.com/grafana/grafana:8.3.4
.
For any other images check the $WORKDIR/kubernetes/monitoring-service/kube-prometheus/manifests/*deployment.yaml
files.
Run the following command to create the namespace and custom resource definitions:
+$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus
+$ kubectl create -f manifests/setup
+
The output will look similar to the following:
+namespace/monitoring created
+customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created
+customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created
+Warning: spec.template.spec.nodeSelector[beta.kubernetes.io/os]: deprecated since v1.14; use "kubernetes.io/os" instead
+clusterrole.rbac.authorization.k8s.io/prometheus-operator created
+clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created
+deployment.apps/prometheus-operator created
+service/prometheus-operator created
+serviceaccount/prometheus-operator created
+
Run the following command to created the rest of the resources:
+$ kubectl create -f manifests/
+
The output will look similar to the following:
+alertmanager.monitoring.coreos.com/main created
+secret/alertmanager-main created
+service/alertmanager-main created
+serviceaccount/alertmanager-main created
+servicemonitor.monitoring.coreos.com/alertmanager created
+secret/grafana-datasources created
+configmap/grafana-dashboard-apiserver created
+configmap/grafana-dashboard-cluster-total created
+configmap/grafana-dashboard-controller-manager created
+configmap/grafana-dashboard-k8s-resources-cluster created
+configmap/grafana-dashboard-k8s-resources-namespace created
+configmap/grafana-dashboard-k8s-resources-node created
+configmap/grafana-dashboard-k8s-resources-pod created
+configmap/grafana-dashboard-k8s-resources-workload created
+configmap/grafana-dashboard-k8s-resources-workloads-namespace created
+configmap/grafana-dashboard-kubelet created
+configmap/grafana-dashboard-namespace-by-pod created
+configmap/grafana-dashboard-namespace-by-workload created
+configmap/grafana-dashboard-node-cluster-rsrc-use created
+configmap/grafana-dashboard-node-rsrc-use created
+configmap/grafana-dashboard-nodes created
+configmap/grafana-dashboard-persistentvolumesusage created
+configmap/grafana-dashboard-pod-total created
+configmap/grafana-dashboard-prometheus-remote-write created
+configmap/grafana-dashboard-prometheus created
+configmap/grafana-dashboard-proxy created
+configmap/grafana-dashboard-scheduler created
+configmap/grafana-dashboard-statefulset created
+configmap/grafana-dashboard-workload-total created
+configmap/grafana-dashboards created
+Warning: spec.template.spec.nodeSelector[beta.kubernetes.io/os]: deprecated since v1.14; use "kubernetes.io/os" instead
+deployment.apps/grafana created
+service/grafana created
+serviceaccount/grafana created
+servicemonitor.monitoring.coreos.com/grafana created
+clusterrole.rbac.authorization.k8s.io/kube-state-metrics created
+clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created
+deployment.apps/kube-state-metrics created
+service/kube-state-metrics created
+serviceaccount/kube-state-metrics created
+servicemonitor.monitoring.coreos.com/kube-state-metrics created
+clusterrole.rbac.authorization.k8s.io/node-exporter created
+clusterrolebinding.rbac.authorization.k8s.io/node-exporter created
+daemonset.apps/node-exporter created
+service/node-exporter created
+serviceaccount/node-exporter created
+servicemonitor.monitoring.coreos.com/node-exporter created
+apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
+clusterrole.rbac.authorization.k8s.io/prometheus-adapter created
+clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
+clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created
+clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created
+clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created
+configmap/adapter-config created
+deployment.apps/prometheus-adapter created
+rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created
+service/prometheus-adapter created
+serviceaccount/prometheus-adapter created
+servicemonitor.monitoring.coreos.com/prometheus-adapter created
+clusterrole.rbac.authorization.k8s.io/prometheus-k8s created
+clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+servicemonitor.monitoring.coreos.com/prometheus-operator created
+prometheus.monitoring.coreos.com/k8s created
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+role.rbac.authorization.k8s.io/prometheus-k8s-config created
+role.rbac.authorization.k8s.io/prometheus-k8s created
+role.rbac.authorization.k8s.io/prometheus-k8s created
+role.rbac.authorization.k8s.io/prometheus-k8s created
+prometheusrule.monitoring.coreos.com/prometheus-k8s-rules created
+service/prometheus-k8s created
+serviceaccount/prometheus-k8s created
+servicemonitor.monitoring.coreos.com/prometheus created
+servicemonitor.monitoring.coreos.com/kube-apiserver created
+servicemonitor.monitoring.coreos.com/coredns created
+servicemonitor.monitoring.coreos.com/kube-controller-manager created
+servicemonitor.monitoring.coreos.com/kube-scheduler created
+servicemonitor.monitoring.coreos.com/kubelet created
+
Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:
+$ kubectl patch svc grafana -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32100 }]'
+
+$ kubectl patch svc prometheus-k8s -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32101 }]'
+
+$ kubectl patch svc alertmanager-main -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32102 }]'
+
Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.
+The output will look similar to the following:
+service/grafana patched
+service/prometheus-k8s patched
+service/alertmanager-main patched
+
Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:
+$ kubectl get pods,services -o wide -n monitoring
+
The output should look similar to the following:
+pod/alertmanager-main-0 2/2 Running 0 40s 10.244.1.29 worker-node1 <none> <none>
+pod/alertmanager-main-1 2/2 Running 0 40s 10.244.2.68 worker-node2 <none> <none>
+pod/alertmanager-main-2 2/2 Running 0 40s 10.244.1.28 worker-node1 <none> <none>
+pod/grafana-f8cd57fcf-zpjh2 1/1 Running 0 40s 10.244.2.69 worker-node2 <none> <none>
+pod/kube-state-metrics-587bfd4f97-zw9zj 3/3 Running 0 38s 10.244.1.30 worker-node1 <none> <none>
+pod/node-exporter-2cgrm 2/2 Running 0 38s 10.196.54.36 master-node <none> <none>
+pod/node-exporter-fpl7f 2/2 Running 0 38s 10.247.95.26 worker-node1 <none> <none>
+pod/node-exporter-kvvnr 2/2 Running 0 38s 10.250.40.59 worker-node2 <none> <none>
+pod/prometheus-adapter-69b8496df6-9vfdp 1/1 Running 0 38s 10.244.2.70 worker-node2 <none> <none>
+pod/prometheus-k8s-0 2/2 Running 0 37s 10.244.2.71 worker-node2 <none> <none>
+pod/prometheus-k8s-1 2/2 Running 0 37s 10.244.1.31 worker-node1 <none> <none>
+pod/prometheus-operator-7649c7454f-g5b4l 2/2 Running 0 47s 10.244.2.67 worker-node2 <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/alertmanager-main NodePort 10.105.76.223 <none> 9093:32102/TCP 41s alertmanager=main,app=alertmanager
+service/alertmanager-operated ClusterIP None <none> 9093/TCP,9094/TCP,9094/UDP 40s app=alertmanager
+service/grafana NodePort 10.107.86.157 <none> 3000:32100/TCP 40s app=grafana
+service/kube-state-metrics ClusterIP None <none> 8443/TCP,9443/TCP 40s app.kubernetes.io/name=kube-state-metrics
+service/node-exporter ClusterIP None <none> 9100/TCP 39s app.kubernetes.io/name=node-exporter
+service/prometheus-adapter ClusterIP 10.102.244.224 <none> 443/TCP 39s name=prometheus-adapter
+service/prometheus-k8s NodePort 10.100.241.34 <none> 9090:32101/TCP 39s app=prometheus,prometheus=k8s
+service/prometheus-operated ClusterIP None <none> 9090/TCP 39s app=prometheus
+service/prometheus-operator ClusterIP None <none> 8443/TCP 47s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator
+
Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war
package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain.
Set the below environment values and run the script get-wls-exporter.sh
to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy
:
$ cd $WORKDIR/kubernetes/monitoring-service/scripts
+$ export adminServerPort=7001
+$ export wlsMonitoringExporterTosoaCluster=true
+$ export soaManagedServerPort=8001
+$ export wlsMonitoringExporterTooimCluster=true
+$ export oimManagedServerPort=14000
+$ sh get-wls-exporter.sh
+
The output will look similar to the following:
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159
+100 2196k 100 2196k 0 0 1430k 0 0:00:01 0:00:01 --:--:-- 8479k
+created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir
+domainNamespace is empty, setting to default oimcluster
+domainUID is empty, setting to default oimcluster
+weblogicCredentialsSecretName is empty, setting to default "oimcluster-domain-credentials"
+adminServerPort is empty, setting to default "7001"
+soaClusterName is empty, setting to default "soa_cluster"
+oimClusterName is empty, setting to default "oim_cluster"
+created /tmp/ci-NEZy7NOfoz
+/tmp/ci-NEZy7NOfoz $WORKDIR/kubernetes/monitoring-service/scripts
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service/scripts
+created /tmp/ci-J7QJ4Nc1lo
+/tmp/ci-J7QJ4Nc1lo $WORKDIR/kubernetes/monitoring-service/scripts
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service/scripts
+created /tmp/ci-f4GbaxM2aJ
+/tmp/ci-f4GbaxM2aJ $WORKDIR/kubernetes/monitoring-service/scripts
+in temp dir
+ adding: WEB-INF/weblogic.xml (deflated 61%)
+ adding: config.yml (deflated 60%)
+$WORKDIR/kubernetes/monitoring-service/scripts
+
Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Identity Governance domain:
+$ cd $WORKDIR/kubernetes/monitoring-service/scripts
+$ kubectl cp wls-exporter-deploy <domain_namespace>/<domain_uid>-adminserver:/u01/oracle
+$ kubectl cp deploy-weblogic-monitoring-exporter.py <domain_namespace>/<domain_uid>-adminserver:/u01/oracle/wls-exporter-deploy
+$ kubectl exec -it -n <domain_namespace> <domain_uid>-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName <domain_uid> -adminServerName AdminServer -adminURL <domain_uid>-adminserver:7001 -username weblogic -password <password> -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true
+
For example:
+$ cd $WORKDIR/kubernetes/monitoring-service/scripts
+$ kubectl cp wls-exporter-deploy oigns/governancedomain-adminserver:/u01/oracle
+$ kubectl cp deploy-weblogic-monitoring-exporter.py oigns/governancedomain-adminserver:/u01/oracle/wls-exporter-deploy
+$ kubectl exec -it -n oigns governancedomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName governancedomain -adminServerName AdminServer -adminURL governancedomain-adminserver:7001 -username weblogic -password <password> -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true
+
The output will look similar to the following:
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+
+Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ...
+Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomaindomain".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-adminserver.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .>
+..Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-soa.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Deploying .........
+Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ...
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .>
+.Completed the deployment of Application with status completed
+Current Status of your Deployment:
+Deployment command type: deploy
+Deployment State : completed
+Deployment Message : no message
+Starting application wls-exporter-oim.
+<DATE> <Info> <J2EE Deployment SPI> <BEA-260121> <Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .>
+.Completed the start of Application with status completed
+Current Status of your Deployment:
+Deployment command type: start
+Deployment State : completed
+Deployment Message : no message
+Disconnected from weblogic server: AdminServer
+
+Exiting WebLogic Scripting Tool.
+
+<DATE> <Warning> <JNDI> <BEA-050001> <WLContext.close() was called in a different thread than the one in which it was created.>
+
Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.
+The exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml
has basicAuth with credentials as username: weblogic
and password: <password>
in base64 encoded.
Run the following command to get the base64 encoded version of the weblogic password:
+$ echo -n "<password>" | base64
+
The output will look similar to the following:
+V2VsY29tZTE=
+
Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml
and change the password:
value to the value returned above. Also change any reference to the namespace
and weblogic.domainName:
values to match your OIG namespace and domain name. For example:
apiVersion: v1
+kind: Secret
+metadata:
+ name: basic-auth
+ namespace: oigns
+data:
+ password: V2VsY29tZTE=
+ user: d2VibG9naWM=
+type: Opaque
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: wls-exporter
+ namespace: oigns
+ labels:
+ k8s-app: wls-exporter
+ release: monitoring
+spec:
+ namespaceSelector:
+ matchNames:
+ - oigns
+ selector:
+ matchLabels:
+ weblogic.domainName: governancedomain
+ endpoints:
+ - basicAuth:
+ password:
+ name: basic-auth
+ key: password
+ username:
+ name: basic-auth
+ key: user
+ port: default
+ relabelings:
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ interval: 10s
+ honorLabels: true
+ path: /wls-exporter/metrics
+
Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml
and change the namespace
to match your OIG namespace. For example:
apiVersion: rbac.authorization.k8s.io/v1
+items:
+- apiVersion: rbac.authorization.k8s.io/v1
+ kind: Role
+ metadata:
+ name: prometheus-k8s
+ namespace: oigns
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+kind: RoleList
+
Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml
and change the namespace
to match your OIG namespace. For example:
apiVersion: rbac.authorization.k8s.io/v1
+items:
+- apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: prometheus-k8s
+ namespace: oigns
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: prometheus-k8s
+ subjects:
+ - kind: ServiceAccount
+ name: prometheus-k8s
+ namespace: monitoring
+kind: RoleBindingList
+
Run the following command to enable Prometheus:
+$ cd $WORKDIR/kubernetes/monitoring-service/manifests
+$ kubectl apply -f .
+
The output will look similar to the following:
+rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
+role.rbac.authorization.k8s.io/prometheus-k8s created
+secret/basic-auth created
+servicemonitor.monitoring.coreos.com/wls-exporter created
+
After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.
+Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery
Click on oigns/wls-exporter/0
and then show more. Verify all the targets are mentioned.
Note: It may take several minutes for oigns/wls-exporter/0
to appear, so refresh the page until it does.
Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100
and login with admin/admin
. Change your password when prompted.
Import the Grafana dashboard by navigating on the left hand menu to Create > Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json
and paste. Then click Load and Import. The dashboard should be displayed.
To clean up a manual installation:
+Run the following commands:
+$ cd $WORKDIR/kubernetes/monitoring-service/manifests/
+$ kubectl delete -f .
+
Delete the deployments:
+$ cd $WORKDIR/kubernetes/monitoring-service/scripts/
+$ kubectl cp undeploy-weblogic-monitoring-exporter.py <domain_namespace>/<domain_uid>-adminserver:/u01/oracle/wls-exporter-deploy
+$ kubectl exec -it -n <domain_namespace> <domain_uid>-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName <domain_uid> -adminServerName AdminServer -adminURL <domain_uid>-adminserver:7001 -username weblogic -password <password> -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true
+
Delete Prometheus:
+$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus
+$ kubectl delete -f manifests
+$ kubectl delete -f manifests/setup
+
Run OIG utlities inside the OIG Kubernetes cluster.
+Access a bash shell inside the <domain_uid>-oim-server1
pod:
$ kubectl -n oigns exec -it <domain_uid>-oim-server1 -- bash
+
For example:
+$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash
+
This will take you into a bash shell in the running <domain_uid>-oim-server1
pod:
[oracle@governancedomain-oim-server1 oracle]$
+
Navigate to the /u01/oracle/idm/server/bin
directory and execute the utility as required. For example:
[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin
+[oracle@governancedomain-oim-server1 bin]$ ./<filename>.sh
+
Note: Some utilties such as PurgeCache.sh, GenerateSnapshot.sh etc, may prompt to enter the t3 URL, for example:
+[oracle@governancedomain-oim-server1 bin]$ sh GenerateSnapshot.sh
+For running the Utilities the following environment variables need to be set
+APP_SERVER is weblogic
+OIM_ORACLE_HOME is /u01/oracle/idm/
+JAVA_HOME is /u01/jdk
+MW_HOME is /u01/oracle
+WL_HOME is /u01/oracle/wlserver
+DOMAIN_HOME is /u01/oracle/user_projects/domains/governancedomain
+Executing -Dweblogic.security.SSL.trustedCAKeyStore= in IPv4 mode
+[Enter Xellerate admin username :]xelsysadm
+[Enter password for xelsysadm :]
+[Threads to use [ 8 ]]
+[Enter serverURL :[t3://oimhostname:oimportno ]]
+
To find the t3 URL run:
+$ kubectl get services -n oigns | grep oim-cluster
+
The output will look similar to the following:
+governancedomain-cluster-oim-cluster ClusterIP 10.110.161.82 <none> 14002/TCP,14000/TCP 4d
+
In this case the t3 URL is: t3://governancedomain-cluster-oim-cluster:14000
.
Copy the input file to pass to a directory of your choice.
+Run the following command to copy the input file to the running governancedomain-oim-server1
pod.
$ kubectl -n oigns cp /<path>/<inputFile> governancedomain-oim-server1:/u01/oracle/idm/server/bin/
+
Access a bash shell inside the governancedomain-oim-server1
pod:
$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash
+
This will take you into a bash shell in the running governancedomain-oim-server1
pod:
[oracle@governancedomain-oim-server1 oracle]$
+
Navigate to the /u01/oracle/idm/server/bin
directory and execute the utility as required, passing the input file. For example:
[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin
+[oracle@governancedomain-oim-server1 bin]$ ./<filename>.sh -inputFile <inputFile>
+
Note As pods are stateless the copied input file will remain until the pod restarts.
+To edit a property/profile file in the Kubernetes cluster:
+Copy the input file from the pod to a on the local system, for example:
+$ kubectl -n oigns cp governancedomain-oim-server1:/u01/oracle/idm/server/bin/<file.properties_profile> /<path>/<file.properties_profile>
+
Note: If you see the message tar: Removing leading '/' from member names
this can be ignored.
Edit the </path>/<file.properties_profile>
in an editor of your choice.
Copy the file back to the pod:
+$ kubectl -n oigns cp /<path>/<file.properties_profile> governancedomain-oim-server1:/u01/oracle/idm/server/bin/
+
Note: As pods are stateless the copied input file will remain until the pod restarts. Preserve a local copy in case you need to copy files back after pod restart.
+To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain.
+Check to see if the helper pod exists by running:
+$ kubectl get pods -n <domain_namespace> | grep helper
+
For example:
+$ kubectl get pods -n oigns | grep helper
+
The output should look similar to the following:
+helper 1/1 Running 0 26h
+
If the helper pod doesn’t exist then see Step 1 in Prepare your environment to create it.
+Run the following command to start a bash shell in the helper pod:
+$ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
+
For example:
+$ kubectl exec -it helper -n oigns -- /bin/bash
+
This will take you into a bash shell in the running helper pod:
+[oracle@helper ~]$
+
Connect to WLST using the following commands:
+[oracle@helper ~]$ cd $ORACLE_HOME/oracle_common/common/bin
+[oracle@helper ~]$ ./wlst.sh
+
The output will look similar to the following:
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away.
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+
+wls:/offline>
+
To access t3 for the Administration Server connect as follows:
+wls:/offline> connect('weblogic','<password>','t3://governancedomain-adminserver:7001')
+
The output will look similar to the following:
+Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ...
+Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+wls:/governancedomain/serverConfig/>
+
Or to access t3 for the OIG Cluster service, connect as follows:
+wls:/offline> connect('weblogic','<password>','t3://governancedomain-cluster-oim-cluster:14000')
+
The output will look similar to the following:
+Connecting to t3://governancedomain-cluster-oim-cluster:14000 with userid weblogic ...
+Successfully connected to managed Server "oim_server1" that belongs to domain "governancedomain".
+
+Warning: An insecure protocol was used to connect to the server.
+To ensure on-the-wire security, the SSL port or Admin port should be used instead.
+
+wls:/governancedomain/serverConfig/>
+
For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.
+wls:/governancedomain/serverConfig/> cd('/Servers')
+wls:/governancedomain/serverConfig/Servers> ls ()
+dr-- AdminServer
+dr-- oim_server1
+dr-- oim_server2
+dr-- oim_server3
+dr-- oim_server4
+dr-- oim_server5
+dr-- soa_server1
+dr-- soa_server2
+dr-- soa_server3
+dr-- soa_server4
+dr-- soa_server5
+
+wls:/governancedomain/serverConfig/Servers>
+
By default the SSL port is not enabled for the Administration Server or OIG Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console
and navigate to Lock & Edit -> Environment ->Servers -> server_name ->Configuration -> General -> SSL Listen Port Enabled -> Provide SSL Port ( For Administration Server: 7002 and for OIG Managed Server (oim_server1): 14101) - > Save -> Activate Changes.
Note: If configuring the OIG Managed Servers for SSL you must enable SSL on the same port for all servers (oim_server1 through oim_server4)
+Create a myscripts
directory as follows:
$ cd $WORKDIR/kubernetes
+$ mkdir myscripts
+$ cd myscripts
+
Create a sample yaml template file in the myscripts
directory called <domain_uid>-adminserver-ssl.yaml
to create a Kubernetes service for the Administration Server:
Note: Update the domainName
, domainUID
and namespace
based on your environment.
apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ serviceType: SERVER
+ weblogic.domainName: governancedomain
+ weblogic.domainUID: governancedomain
+ weblogic.resourceVersion: domain-v2
+ weblogic.serverName: AdminServer
+ name: governancedomain-adminserver-ssl
+ namespace: oigns
+spec:
+ clusterIP: None
+ ports:
+ - name: default
+ port: 7002
+ protocol: TCP
+ targetPort: 7002
+ selector:
+ weblogic.createdByOperator: "true"
+ weblogic.domainUID: governancedomain
+ weblogic.serverName: AdminServer
+ type: ClusterIP
+
and create the following sample yaml template file <domain_uid>-oim-cluster-ssl.yaml
for the OIG Managed Server:
apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ serviceType: SERVER
+ weblogic.domainName: governancedomain
+ weblogic.domainUID: governancedomain
+ weblogic.resourceVersion: domain-v2
+ name: governancedomain-cluster-oim-cluster-ssl
+ namespace: oigns
+spec:
+ clusterIP: None
+ ports:
+ - name: default
+ port: 14101
+ protocol: TCP
+ targetPort: 14101
+ selector:
+ weblogic.clusterName: oim_cluster
+ weblogic.createdByOperator: "true"
+ weblogic.domainUID: governancedomain
+ type: ClusterIP
+
Apply the template using the following command for the Administration Server:
+$ kubectl apply -f governancedomain-adminserver-ssl.yaml
+service/governancedomain-adminserver-ssl created
+
or using the following command for the OIG Managed Server:
+$ kubectl apply -f governancedomain-oim-cluster-ssl.yaml
+service/governancedomain-cluster-oim-cluster-ssl created
+
Validate that the Kubernetes Services to access SSL ports are created successfully:
+$ kubectl get svc -n <domain_namespace> |grep ssl
+
For example:
+$ kubectl get svc -n oigns |grep ssl
+
The output will look similar to the following:
+governancedomain-adminserver-ssl ClusterIP None <none> 7002/TCP 74s
+governancedomain-cluster-oim-cluster-ssl ClusterIP None <none> 14101/TCP 21s
+
Connect to a bash shell of the helper pod:
+$ kubectl exec -it helper -n oigns -- /bin/bash
+
In the bash shell run the following:
+[oracle@helper bin]$ export WLST_PROPERTIES="-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust"
+[oracle@helper bin]$ cd /u01/oracle/oracle_common/common/bin
+[oracle@helper bin]$ ./wlst.sh
+Initializing WebLogic Scripting Tool (WLST) ...
+
+Welcome to WebLogic Server Administration Scripting Shell
+
+Type help() for help on available commands
+wls:/offline>
+
Connect to the Administration Server t3s service:
+wls:/offline> connect('weblogic','<password>','t3s://governancedomain-adminserver-ssl:7002')
+Connecting to t3s://governancedomain-adminserver-ssl:7002 with userid weblogic ...
+<DATE> <Info> <Security> <BEA-090905> <Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.>
+<DATE> <Info> <Security> <BEA-090906> <Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.>
+<DATE> <Info> <Security> <BEA-090909> <Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.>
+Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain".
+
+wls:/governancedomain/serverConfig/>
+
To connect to the OIG Managed Server t3s service:
+wls:/offline> connect('weblogic','<password>','t3s://governancedomain-cluster-oim-cluster-ssl:14101')
+Connecting to t3s://governancedomain-cluster-oim-cluster-ssl:14101 with userid weblogic ...
+<DATE> <Info> <Security> <BEA-090905> <Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.>
+<DATE> <Info> <Security> <BEA-090906> <Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.>
+<DATE> <Info> <Security> <BEA-090909> <Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.>
+Successfully connected to managed Server "oim_server1" that belongs to domain "governancedomain".
+
+wls:/governancedomain/serverConfig/>
+
This section shows you how to upgrade the WebLogic Kubernetes Operator, upgrade the OIG image, and patch the OIG domain. It also shows you how to upgrade the Elasticsearch and Kibana stack, and the Ingress.
+The upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to.
+Please refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.
+ + + + + + + +Instructions on how to update the WebLogic Kubernetes Operator version.
+ + + + + + + + + + + + +Instructions on how to update your OIG Kubernetes cluster with a new OIG container image.
+ + + + + + + + + + + + +Instructions on how to upgrade the ingress.
+ + + + + + + + + + + + +Instructions on how to upgrade Elastic Search and Kibana.
+ + + + + + + + +The OIG domain patching script automatically performs the update of your OIG Kubernetes cluster with a new OIG container image.
+Note: Before following the steps below, you must have upgraded to WebLogic Kubernetes Operator 4.1.2.
+The script executes the following steps sequentially:
+serverStartPolicy
set as Never
in the domain definition yaml.serverStartPolicy
to IfNeeded
and image
to new image tag.The script exits with a failure if a configurable timeout is reached before the target pod count is reached, depending upon the domain configuration. It also exits if there is any failure while patching the database schema and domain.
+Note: The script execution will cause downtime while patching the OIG deployment and database schemas.
+Before you begin, perform the following steps:
+Review the Domain resource documentation.
+Ensure that you have a running OIG deployment in your cluster.
+Ensure that the database is up and running.
+Download the latest code repository as follows:
+Create a working directory to setup the source code.
+$ mkdir <workdir>
+
For example:
+$ mkdir /scratch/OIGK8Slatest
+
Download the latest OIG deployment scripts from the OIG repository.
+$ cd <workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ cd /scratch/OIGK8Slatest
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleIdentityGovernance
+
For example:
+$ export WORKDIR=/scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance
+
Run the patch domain script as follows. Specify the inputs required by the script. If you need help understanding the inputs run the command help -h
.
$ cd $WORKDIR/kubernetes/domain-lifecycle
+$ ./patch_oig_domain.sh -h
+$ ./patch_oig_domain.sh -i <target_image_tag> -n <oig_namespace>
+
For example:
+$ cd $WORKDIR/kubernetes/domain-lifecycle
+$ ./patch_oig_domain.sh -h
+$ ./patch_oig_domain.sh -i 12.2.1.4-jdk8-ol7-<October`23> -n oigns
+
The output will look similar to the following
+[INFO] Found domain name: governancedomain
+[INFO] Image Registry: container-registry.oracle.com/middleware/oig_cpu
+[INFO] Domain governancedomain is currently running with image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<April`23>
+current no of pods under governancedomain are 3
+[INFO] The pod helper already exists in namespace oigns.
+[INFO] Deleting pod helper
+pod "helper" deleted
+[INFO] Fetched Image Pull Secret: orclcred
+[INFO] Creating new helper pod with image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October`23>
+pod/helper created
+Checking helper Running
+[INFO] Stopping Admin, SOA and OIM servers in domain governancedomain. This may take some time, monitor log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-<DATE>/stop_servers.log for details
+[INFO] All servers are now stopped successfully. Proceeding with DB Schema changes
+[INFO] Patching OIM schemas...
+[INFO] DB schema update successful. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-<DATE>/patch_oim_wls.log for details
+[INFO] Starting Admin, SOA and OIM servers with new image container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October`23>
+[INFO] Waiting for 3 weblogic pods to be ready..This may take several minutes, do not close the window. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-<DATE>/monitor_weblogic_pods.log for progress
+[SUCCESS] All servers under governancedomain are now in ready state with new image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October`23>
+
The logs are available at $WORKDIR/kubernetes/domain-lifecycle
by default. A custom log location can also be provided to the script.
Note: If the patch domain script creation fails, refer to the Troubleshooting section.
+This section shows how to upgrade the ingress.
+To determine if this step is required for the version you are upgrading to, refer to the Release Notes.
+To upgrade the existing ingress rules, follow the steps below:
+List the existing ingress:
+$ helm list -n <domain_namespace>
+
For example:
+$ helm list -n oigns
+
The output will look similar to the following:
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+governancedomain-nginx oigns 1 <DATE> deployed ingress-per-domain-0.1.0 1.0
+
Make sure you have downloaded the latest code as per Download the latest code repository.
+Edit the $WORKDIR/kubernetes/charts/ingress-per-domain/values.yaml
and change the domainUID
parameter to match your domainUID, for example domainUID: governancedomain
. Change sslType
to NONSSL
or SSL
depending on your existing configuration. For example:
# Load balancer type. Supported values are: NGINX
+type: NGINX
+
+# SSL configuration Type. Supported Values are : NONSSL,SSL
+sslType: SSL
+
+# domainType. Supported values are: oim
+domainType: oim
+
+#WLS domain as backend to the load balancer
+wlsDomain:
+ domainUID: governancedomain
+ adminServerName: AdminServer
+ adminServerPort: 7001
+ adminServerSSLPort:
+ soaClusterName: soa_cluster
+ soaManagedServerPort: 8001
+ soaManagedServerSSLPort:
+ oimClusterName: oim_cluster
+ oimManagedServerPort: 14000
+ oimManagedServerSSLPort:
+
+
+# Host specific values
+hostName:
+ enabled: false
+ admin:
+ runtime:
+ internal:
+
+# Ngnix specific values
+nginx:
+ nginxTimeOut: 180
+
Upgrade the governancedomain-nginx
with the following command:
$ cd $WORKDIR
+$ helm upgrade <ingress> kubernetes/charts/ingress-per-domain/ --namespace <domain_namespace> --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values
+
For example:
+$ cd $WORKDIR
+$ helm upgrade governancedomain-nginx kubernetes/charts/ingress-per-domain/ --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values
+
The output will look similar to the following:
+Release "governancedomain-nginx" has been upgraded. Happy Helming!
+NAME: governancedomain-nginx
+LAST DEPLOYED: <DATE>
+NAMESPACE: oigns
+STATUS: deployed
+REVISION: 2
+TEST SUITE: None
+
List the ingress:
+$ kubectl get ing -n oigns
+
The output will look similar to the following:
+NAME CLASS HOSTS ADDRESS PORTS AGE
+governancedomain-nginx <none> * 10.107.182.40 80 18s
+
Describe the ingress and make sure all the listed paths are accessible:
+$ kubectl describe ing governancedomain-nginx -n oigns
+
The output will look similar to the following:
+Name: governancedomain-nginx
+Namespace: oigns
+Address: 10.107.182.40
+Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
+Rules:
+ Host Path Backends
+ ---- ---- --------
+ *
+ /console governancedomain-adminserver:7001 (10.244.4.240:7001)
+ /consolehelp governancedomain-adminserver:7001 (10.244.4.240:7001)
+ /em governancedomain-adminserver:7001 (10.244.4.240:7001)
+ /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001)
+ /soa governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001)
+ /integration governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001)
+ /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001)
+ /identity governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /admin governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /oim governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /iam governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /ucs governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+ /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000)
+Annotations: kubernetes.io/ingress.class: nginx
+ meta.helm.sh/release-name: governancedomain-nginx
+ meta.helm.sh/release-namespace: oigns
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/affinity-mode: persistent
+ nginx.ingress.kubernetes.io/configuration-snippet:
+ more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL";
+ more_set_input_headers "X-Forwarded-Proto: https";
+ more_set_input_headers "WL-Proxy-SSL: true";
+ nginx.ingress.kubernetes.io/enable-access-log: false
+ nginx.ingress.kubernetes.io/ingress.allow-http: false
+ nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k
+ nginx.ingress.kubernetes.io/proxy-read-timeout: 180
+ nginx.ingress.kubernetes.io/proxy-send-timeout: 180
+ nginx.ingress.kubernetes.io/session-cookie-name: sticky
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Sync 51m (x3 over 54m) nginx-ingress-controller Scheduled for sync
+
These instructions apply to upgrading operators from 3.X.X to 4.X, or from within the 4.x release family as additional versions are released.
+On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:
+$ mkdir <workdir>/weblogic-kubernetes-operator-4.X.X
+$ cd <workdir>/weblogic-kubernetes-operator-4.X.X
+$ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X
+
For example:
+$ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X
+$ cd /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X
+$ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v4.X.X
+
This will create the directory <workdir>/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
Run the following helm command to upgrade the operator:
+$ cd <workdir>/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
+$ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace <sample-kubernetes-operator-ns> --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator
+
For example:
+$ cd /scratch/OIGK8S/weblogic-kubernetes-operator-4.X.X/weblogic-kubernetes-operator
+$ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.X.X --namespace operator --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator
+
The output will look similar to the following:
+Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming!
+NAME: weblogic-kubernetes-operator
+LAST DEPLOYED: <DATE>
+NAMESPACE: operator
+STATUS: deployed
+REVISION: 2
+TEST SUITE: None
+
Verify that the operator’s pod and services are running by executing the following command:
+$ kubectl get all -n <sample-kubernetes-operator-ns>
+
For example:
+$ kubectl get all -n opns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+pod/weblogic-operator-b7d6df78c-mfrc4 1/1 Running 0 40s
+pod/weblogic-operator-webhook-7996b8b58b-frtwp 1/1 Running 0 42s
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/weblogic-operator-webhook-svc ClusterIP 10.106.51.57 <none> 8083/TCP,8084/TCP 42s
+
+NAME READY UP-TO-DATE AVAILABLE AGE
+deployment.apps/weblogic-operator 1/1 1 1 6d
+deployment.apps/weblogic-operator-webhook 1/1 1 1 42s
+
+NAME DESIRED CURRENT READY AGE
+replicaset.apps/weblogic-operator-5884685f4f 0 0 0 6d
+replicaset.apps/weblogic-operator-b7d6df78c 1 1 1 40s
+replicaset.apps/weblogic-operator-webhook-7996b8b58b 1 1 1 42s
+
Note: When you upgrade a 3.x WebLogic Kubernetes Operator to 4.x, the upgrade process creates a WebLogic Domain resource conversion webhook deployment, and associated resources in the same namespace. The webhook automatically and transparently upgrades the existing WebLogic Domains from the 3.x schema to the 4.x schema. For more information, see Domain Upgrade in the WebLogic Kubernetes Operator documentation.
+Note: In WebLogic Kubernetes Operator 4.X, changes are made to serverStartPolicy
that affect starting/stopping of the domain. Refer to the serverStartPolicy
entry in the create-domain-inputs.yaml for more information. Also see Domain Life Cycle.
This section shows how to upgrade Elasticsearch and Kibana.
+To determine if this step is required for the version you are upgrading to, refer to the Release Notes.
+From October 22 (22.4.1) onwards, OIG logs should be stored on a centralized Elasticsearch and Kibana stack.
+Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.
+If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below:
+Edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml
and change all instances of namespace to correspond to your deployment.
Delete the Elasticsearch and Kibana resources using the following command:
+$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml
+
Follow these post install configuration steps.
+ + + + + + + +Perform post install tasks.
+ + + + + + + + + + + + +Install and Configure Connectors.
+ + + + + + + + +Download the Connector you are interested in from Oracle Identity Manager Connector Downloads.
+Copy the connector zip file to a staging directory on the master node e.g. <workdir>/stage
and unzip it:
$ cp $HOME/Downloads/<connector>.zip <workdir>/<stage>/
+$ cd <workdir>/<stage>
+$ unzip <connector>.zip
+$ chmod -R 755 *
+
For example:
+$ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/
+$ cd /scratch/OIGK8S/stage/
+$ unzip exchange-12.2.1.3.0.zip
+$ chmod -R 755 *
+
There are two options to copy OIG Connectors to your Kubernetes cluster:
+kubectl cp
command to copy the connector to the Persistent VolumeIt is recommended to use option a), however there may be cases, for example when using a Managed Service such as Oracle Kubernetes Engine on Oracle Cloud Infrastructure, where it may not be feasible to directly mount the domain directory. In such cases option b) should be used.
+Copy the connector zip file to the persistent volume. For example:
+$ cp -R <path_to>/<connector> <persistent_volume>/governancedomainpv/ConnectorDefaultDirectory/
+
For example:
+$ cp -R /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 /scratch/shared/governancedomainpv/ConnectorDefaultDirectory/
+
kubectl cp
command to copy the connector to the persistent volumeRun the following command to copy over the connector:
+$ kubectl -n <domain_namespace> cp <path_to>/<connector> <cluster_name>:/u01/oracle/idm/server/ConnectorDefaultDirectory/
+
For example:
+$ kubectl -n oigns cp /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 governancedomain-oim-server1:/u01/oracle/idm/server/ConnectorDefaultDirectory/
+
The connectors are installed as they are on a standard on-premises setup, via Application On Boarding or via Connector Installer.
+Refer to your Connector specific documentation for instructions.
+ + + + + + +Follow these post install configuration steps.
+ +Navigate to the following directory:
+cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain
+
Create a setUserOverrides.sh
with the following contents:
DERBY_FLAG=false
+JAVA_OPTIONS="${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true"
+MEM_ARGS="-Xms8192m -Xmx8192m"
+
Copy the setUserOverrides.sh
file to the Administration Server pod:
$ chmod 755 setUserOverrides.sh
+$ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh
+
Where oigns
is the OIG namespace and governancedomain
is the domain_UID
.
Stop the OIG domain using the following command:
+$ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
+
For example:
+$ kubectl -n oigns patch domains governancedomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "Never" }]'
+
The output will look similar to the following:
+domain.weblogic.oracle/governancedomain patched
+
Check that all the pods are stopped:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Terminating 0 18h
+governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Terminating 0 18h
+governancedomain-soa-server1 1/1 Terminating 0 18h
+helper 1/1 Running 0 41h
+
The Administration Server pods and Managed Server pods will move to a STATUS of Terminating
. After a few minutes, run the command again and the pods should have disappeared:
NAME READY STATUS RESTARTS AGE
+governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h
+helper 1/1 Running 0 41h
+
Start the domain using the following command:
+$ kubectl -n <domain_namespace> patch domains <domain_uid> --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
+
For example:
+$ kubectl -n oigns patch domains governancedomain --type='json' -p='[{"op": "replace", "path": "/spec/serverStartPolicy", "value": "IfNeeded" }]'
+
Run the following kubectl command to view the pods:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+governancedomain-create-fmw -infra-domain-job-vj69h 0/1 Completed 0 24h
+governancedomain-introspect-domain-job-7qx29 1/1 Running 0 8s
+helper 1/1 Running 0 41h
+
The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY
status 1/1
:
NAME READY STATUS RESTARTS AGE
+governancedomain-adminserver 1/1 Running 0 6m4s
+governancedomain-create-fmw-infra-domain-job-vj69h 0/1 Completed 0 24h
+governancedomain-oim-server1 1/1 Running 0 3m5s
+governancedomain-soa-server1 1/1 Running 0 3m5s
+helper 1/1 Running 0 41h
+
Login to Oracle Enterprise Manager using the following URL:
+https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em
Click the Target Navigation icon in the top left of the screen and navigate to the following:
+Enter a new value for the OimFrontEndURL
attribute, in the format:
https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}
https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
If using HTTP instead of HTTPS for your ingress, change the URL appropriately.
+Then click Apply
.
To prepare for Oracle Identity Governance deployment in a Kubernetes environment, complete the following steps:
+Preparing the environment for domain creation
+a. Creating Kubernetes secrets for the domain and RCU
+b. Create a Kubernetes persistent volume and persistent volume claim
+As per the Prerequisites a Kubernetes cluster should have already been configured.
+Run the following command on the master node to check the cluster and worker nodes are running:
+$ kubectl get nodes,pods -n kube-system
+
The output will look similar to the following:
+NAME STATUS ROLES AGE VERSION
+node/worker-node1 Ready <none> 17h v1.26.6+1.el8
+node/worker-node2 Ready <none> 17h v1.26.6+1.el8
+node/master-node Ready master 23h v1.26.6+1.el8
+
+NAME READY STATUS RESTARTS AGE
+pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h
+pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h
+pod/etcd-master 1/1 Running 0 21h
+pod/kube-apiserver-master-node 1/1 Running 0 21h
+pod/kube-controller-manager-master-node 1/1 Running 0 21h
+pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h
+pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h
+pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h
+pod/kube-proxy-2kxv2 1/1 Running 0 17h
+pod/kube-proxy-82vvj 1/1 Running 0 17h
+pod/kube-proxy-nrgw9 1/1 Running 0 23h
+pod/kube-scheduler-master 1/1 Running 0 21h
+
The OIG Kubernetes deployment requires access to an OIG container image. The image can be obtained in the following ways:
+The latest prebuilt OIG October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..
+Note: Before using this image you must login to Oracle Container Registry, navigate to Middleware
> oig_cpu
and accept the license agreement.
You can use this image in the following ways:
+You can build your own OIG container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OIG container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.
+You can use an image built with WebLogic Image Tool in the following ways:
+Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.
+Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OIG domains, you need to set up the deployment scripts on the master node as below:
+Create a working directory to setup the source code.
+$ mkdir <workdir>
+
For example:
+$ mkdir /scratch/OIGK8S
+
Download the latest OIG deployment scripts from the OIG repository.
+$ cd <workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ cd /scratch/OIGK8S
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleIdentityGovernance
+
For example:
+$ export WORKDIR=/scratch/OIGK8S/fmw-kubernetes/OracleIdentityGovernance
+
Run the following command and see if the WebLogic custom resource definition name already exists:
+$ kubectl get crd
+
In the output you should see:
+No resources found in default namespace.
+
If you see any of the following:
+NAME AGE
+clusters.weblogic.oracle 5d
+domains.weblogic.oracle 5d
+
then run the following command to delete the existing crd’s:
+$ kubectl delete crd clusters.weblogic.oracle
+$ kubectl delete crd domains.weblogic.oracle
+
On the master node run the following command to create a namespace for the operator:
+$ kubectl create namespace <sample-kubernetes-operator-ns>
+
For example:
+$ kubectl create namespace opns
+
The output will look similar to the following:
+namespace/opns created
+
Create a service account for the operator in the operator’s namespace by running the following command:
+$ kubectl create serviceaccount -n <sample-kubernetes-operator-ns> <sample-kubernetes-operator-sa>
+
For example:
+$ kubectl create serviceaccount -n opns op-sa
+
The output will look similar to the following:
+serviceaccount/op-sa created
+
Run the following helm command to install and start the operator:
+$ cd $WORKDIR
+$ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \
+--namespace <sample-kubernetes-operator-ns> \
+--set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \
+--set serviceAccount=<sample-kubernetes-operator-sa> \
+--set “enableClusterRoleBinding=true” \
+--set "domainNamespaceSelectionStrategy=LabelSelector" \
+--set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \
+--set "javaLoggingLevel=FINE" --wait
+
For example:
+$ cd $WORKDIR
+$ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \
+--namespace opns \
+--set image=ghcr.io/oracle/weblogic-kubernetes-operator:4.1.2 \
+--set serviceAccount=op-sa \
+--set "enableClusterRoleBinding=true" \
+--set "domainNamespaceSelectionStrategy=LabelSelector" \
+--set "domainNamespaceLabelSelector=weblogic-operator\=enabled" \
+--set "javaLoggingLevel=FINE" --wait
+
The output will look similar to the following:
+NAME: weblogic-kubernetes-operator
+LAST DEPLOYED: <DATE>
+NAMESPACE: opns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+
Verify that the operator’s pod and services are running by executing the following command:
+$ kubectl get all -n <sample-kubernetes-operator-ns>
+
For example:
+$ kubectl get all -n opns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+pod/weblogic-operator-b7d6df78c-vxnpt 1/1 Running 0 33s
+pod/weblogic-operator-webhook-7996b8b58b-68l8s 1/1 Running 0 33s
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/weblogic-operator-webhook-svc ClusterIP 10.109.163.130 <none> 8083/TCP,8084/TCP 34s
+
+NAME READY UP-TO-DATE AVAILABLE AGE
+deployment.apps/weblogic-operator 1/1 1 1 33s
+deployment.apps/weblogic-operator-webhook 1/1 1 1 33s
+
+NAME DESIRED CURRENT READY AGE
+replicaset.apps/weblogic-operator-b7d6df78c 1 1 1 33s
+replicaset.apps/weblogic-operator-webhook-7996b8b58b 1 1 1 33s
+
Verify the operator pod’s log:
+$ kubectl logs -n <sample-kubernetes-operator-ns> -c weblogic-operator deployments/weblogic-operator
+
For example:
+$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator
+
The output will look similar to the following:
+{"timestamp":"<DATE>","thread":37,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678902295852,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
+{"timestamp":"<DATE>","thread":42,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678902300853,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
+{"timestamp":"<DATE>","thread":21,"fiber":"","namespace":"","domainUID":"","level":"FINE","class":"oracle.kubernetes.operator.DeploymentLiveness","method":"run","timeInMillis":1678902305854,"message":"Liveness file last modified time set","exception":"","code":"","headers":{},"body":""}
+
Run the following command to create a namespace for the domain:
+$ kubectl create namespace <domain_namespace>
+
For example:
+$ kubectl create namespace oigns
+
The output will look similar to the following:
+namespace/oigns created
+
Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:
+$ kubectl label namespaces <domain_namespace> weblogic-operator=enabled
+
For example:
+$ kubectl label namespaces oigns weblogic-operator=enabled
+
The output will look similar to the following:
+namespace/oigns labeled
+
Run the following command to check the label was created:
+$ kubectl describe namespace <domain_namespace>
+
For example:
+$ kubectl describe namespace oigns
+
The output will look similar to the following:
+Name: oigns
+Labels: kubernetes.io/metadata.name=oigns
+ weblogic-operator=enabled
+Annotations: <none>
+Status: Active
+
+No resource quota.
+
+No LimitRange resource.
+
In this section you create a secret that stores the credentials for the container registry where the OIG image is stored.
+If you are not using a container registry and have loaded the images on each of the master and worker nodes, then there is no need to create the registry secret.
+Run the following command to create the secret:
+kubectl create secret docker-registry "orclcred" --docker-server=<CONTAINER_REGISTRY> \
+--docker-username="<USER_NAME>" \
+--docker-password=<PASSWORD> --docker-email=<EMAIL_ID> \
+--namespace=<domain_namespace>
+
For example, if using Oracle Container Registry:
+kubectl create secret docker-registry "orclcred" --docker-server=container-registry.oracle.com \
+--docker-username="user@example.com" \
+--docker-password=password --docker-email=user@example.com \
+--namespace=oigns
+
Replace <USER_NAME>
and <PASSWORD>
with the credentials for the registry with the following caveats:
If using Oracle Container Registry to pull the OIG container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware
> oig_cpu
and accept the license agreement.
If using your own container registry to store the OIG container image, this is the username and password (or token) for your container registry.
+The output will look similar to the following:
+secret/orclcred created
+
In this section you create the RCU schemas in the Oracle Database.
+Before following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.
+If using Oracle Container Registry or your own container registry for your OIG container image, run the following command to create a helper pod to run RCU:
+$ kubectl run --image=<image_name-from-registry> --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1", "spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n <domain_namespace> -- sleep infinity
+
For example:
+$ kubectl run --image=container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-<October`23> --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n oigns -- sleep infinity
+
If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command:
+$ kubectl run helper --image <image> -n oigns -- sleep infinity
+
For example:
+$ kubectl run helper --image oracle/oig:12.2.1.4-jdk8-ol7-<October`23> -n oigns -- sleep infinity
+
The output will look similar to the following:
+pod/helper created
+
Run the following command to check the pod is running:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+helper 1/1 Running 0 3m
+
Note: If you are pulling the image from a container registry it may take several minutes before the pod has a STATUS
of 1\1
. While the pod is starting you can check the status of the pod, by running the following command:
$ kubectl describe pod helper -n oigns
+
Run the following command to start a bash shell in the helper pod:
+$ kubectl exec -it helper -n <domain_namespace> -- /bin/bash
+
For example:
+$ kubectl exec -it helper -n oigns -- /bin/bash
+
This will take you into a bash shell in the running helper pod:
+[oracle@helper oracle]$
+
In the helper bash shell run the following commands to set the environment:
+[oracle@helper oracle]$ export DB_HOST=<db_host.domain>
+[oracle@helper oracle]$ export DB_PORT=<db_port>
+[oracle@helper oracle]$ export DB_SERVICE=<service_name>
+[oracle@helper oracle]$ export RCUPREFIX=<rcu_schema_prefix>
+[oracle@helper oracle]$ export RCU_SCHEMA_PWD=<rcu_schema_pwd>
+[oracle@helper oracle]$ echo -e <db_pwd>"\n"<rcu_schema_pwd> > /tmp/pwd.txt
+[oracle@helper oracle]$ cat /tmp/pwd.txt
+
where:
+<db_host.domain>
is the database server hostname
<db_port>
is the database listener port
<service_name>
is the database service name
<rcu_schema_prefix>
is the RCU schema prefix you want to set
<rcu_schema_pwd>
is the password you want to set for the <rcu_schema_prefix>
<db_pwd>
is the SYS password for the database
For example:
+[oracle@helper oracle]$ export DB_HOST=mydatabasehost.example.com
+[oracle@helper oracle]$ export DB_PORT=1521
+[oracle@helper oracle]$ export DB_SERVICE=orcl.example.com
+[oracle@helper oracle]$ export RCUPREFIX=OIGK8S
+[oracle@helper oracle]$ export RCU_SCHEMA_PWD=<password>
+[oracle@helper oracle]$ echo -e <password>"\n"<password> > /tmp/pwd.txt
+[oracle@helper oracle]$ cat /tmp/pwd.txt
+<password>
+<password>
+
In the helper bash shell run the following commands to create the RCU schemas in the database:
+[oracle@helper oracle]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \
+$DB_HOST:$DB_PORT/$DB_SERVICE -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \
+-selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component OIM -component MDS -component SOAINFRA -component OPSS \
+-f < /tmp/pwd.txt
+
The output will look similar to the following:
+RCU Logfile: /tmp/RCU<DATE>/logs/rcu.log
+
+Processing command line ....
+Repository Creation Utility - Checking Prerequisites
+Checking Global Prerequisites
+
+
+Repository Creation Utility - Checking Prerequisites
+Checking Component Prerequisites
+Repository Creation Utility - Creating Tablespaces
+Validating and Creating Tablespaces
+Create tablespaces in the repository database
+Repository Creation Utility - Create
+Repository Create in progress.
+ Percent Complete: 10
+Executing pre create operations
+ Percent Complete: 25
+ Percent Complete: 25
+ Percent Complete: 26
+ Percent Complete: 27
+ Percent Complete: 28
+ Percent Complete: 28
+ Percent Complete: 29
+ Percent Complete: 29
+Creating Common Infrastructure Services(STB)
+ Percent Complete: 36
+ Percent Complete: 36
+ Percent Complete: 44
+ Percent Complete: 44
+ Percent Complete: 44
+Creating Audit Services Append(IAU_APPEND)
+ Percent Complete: 51
+ Percent Complete: 51
+ Percent Complete: 59
+ Percent Complete: 59
+ Percent Complete: 59
+Creating Audit Services Viewer(IAU_VIEWER)
+ Percent Complete: 66
+ Percent Complete: 66
+ Percent Complete: 67
+ Percent Complete: 67
+ Percent Complete: 68
+ Percent Complete: 68
+Creating Metadata Services(MDS)
+ Percent Complete: 76
+ Percent Complete: 76
+ Percent Complete: 76
+ Percent Complete: 77
+ Percent Complete: 77
+ Percent Complete: 78
+ Percent Complete: 78
+ Percent Complete: 78
+Creating Weblogic Services(WLS)
+ Percent Complete: 82
+ Percent Complete: 82
+ Percent Complete: 83
+ Percent Complete: 84
+ Percent Complete: 86
+ Percent Complete: 88
+ Percent Complete: 88
+ Percent Complete: 88
+Creating User Messaging Service(UCSUMS)
+ Percent Complete: 92
+ Percent Complete: 92
+ Percent Complete: 95
+ Percent Complete: 95
+ Percent Complete: 100
+Creating Audit Services(IAU)
+Creating Oracle Platform Security Services(OPSS)
+Creating SOA Infrastructure(SOAINFRA)
+Creating Oracle Identity Manager(OIM)
+Executing post create operations
+
+Repository Creation Utility: Create - Completion Summary
+
+Database details:
+-----------------------------
+Host Name : mydatabasehost.example.com
+Port : 1521
+Service Name : ORCL.EXAMPLE.COM
+Connected As : sys
+Prefix for (prefixable) Schema Owners : OIGK8S
+RCU Logfile : /tmp/RCU<DATE>/logs/rcu.log
+
+Component schemas created:
+-----------------------------
+Component Status Logfile
+
+Common Infrastructure Services Success /tmp/RCU<DATE>/logs/stb.log
+Oracle Platform Security Services Success /tmp/RCU<DATE>/logs/opss.log
+SOA Infrastructure Success /tmp/RCU<DATE>/logs/soainfra.log
+Oracle Identity Manager Success /tmp/RCU<DATE>/logs/oim.log
+User Messaging Service Success /tmp/RCU<DATE>/logs/ucsums.log
+Audit Services Success /tmp/RCU<DATE>/logs/iau.log
+Audit Services Append Success /tmp/RCU<DATE>/logs/iau_append.log
+Audit Services Viewer Success /tmp/RCU<DATE>/logs/iau_viewer.log
+Metadata Services Success /tmp/RCU<DATE>/logs/mds.log
+WebLogic Services Success /tmp/RCU<DATE>/logs/wls.log
+
+Repository Creation Utility - Create : Operation Completed
+[oracle@helper oracle]$
+
Run the following command to patch schemas in the database:
+ +This command should be run if you are using an OIG image that contains OIG bundle patches. If using an OIG image without OIG bundle patches, then you can skip this step.
+[oracle@helper oracle]$ /u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin/ant \
+-f /u01/oracle/idm/server/setup/deploy-files/automation.xml \
+run-patched-sql-files \
+-logger org.apache.tools.ant.NoBannerLogger \
+-logfile /u01/oracle/idm/server/bin/patch_oim_wls.log \
+-DoperationsDB.host=$DB_HOST \
+-DoperationsDB.port=$DB_PORT \
+-DoperationsDB.serviceName=$DB_SERVICE \
+-DoperationsDB.user=${RCUPREFIX}_OIM \
+-DOIM.DBPassword=$RCU_SCHEMA_PWD \
+-Dojdbc=/u01/oracle/oracle_common/modules/oracle.jdbc/ojdbc8.jar
+
The output will look similar to the following:
+Buildfile: /u01/oracle/idm/server/setup/deploy-files/automation.xml
+
Verify the database was patched successfully by viewing the patch_oim_wls.log
:
[oracle@helper oracle]$ cat /u01/oracle/idm/server/bin/patch_oim_wls.log
+
The output should look similar to below:
+...
+[sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_bkp.sql
+[sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_fix.sql
+[sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_restore_bkp.sql
+[sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_ddl_alter_pwr_add_column.sql
+[sql] 67 of 67 SQL statements executed successfully
+
+BUILD SUCCESSFUL
+Total time: 6 seconds
+
Exit the helper bash shell by issuing the command exit
.
In this section you prepare the environment for the OIG domain creation. This involves the following steps:
+a. Creating Kubernetes secrets for the domain and RCU
+b. Create a Kubernetes persistent volume and persistent volume claim
+Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:
+$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials
+$ ./create-weblogic-credentials.sh -u weblogic -p <pwd> -n <domain_namespace> -d <domain_uid> -s <kubernetes_domain_secret>
+
where:
+-u weblogic
is the WebLogic username
-p <pwd>
is the password for the WebLogic user
-n <domain_namespace>
is the domain namespace
-d <domain_uid>
is the domain UID to be created. The default is domain1 if not specified
-s <kubernetes_domain_secret>
is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified
For example:
+$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials
+$ ./create-weblogic-credentials.sh -u weblogic -p <password> -n oigns -d governancedomain -s oig-domain-credentials
+
The output will look similar to the following:
+secret/oig-domain-credentials created
+secret/oig-domain-credentials labeled
+The secret oig-domain-credentials has been successfully created in the oigns namespace.
+
Verify the secret is created using the following command:
+$ kubectl get secret <kubernetes_domain_secret> -o yaml -n <domain_namespace>
+
For example:
+$ kubectl get secret oig-domain-credentials -o yaml -n oigns
+
The output will look similar to the following:
+$ kubectl get secret oig-domain-credentials -o yaml -n oigns
+apiVersion: v1
+data:
+ password: V2VsY29tZTE=
+ username: d2VibG9naWM=
+kind: Secret
+metadata:
+ creationTimestamp: "<DATE>"
+ labels:
+ weblogic.domainName: governancedomain
+ weblogic.domainUID: governancedomain
+ name: oig-domain-credentials
+ namespace: oigns
+ resourceVersion: "3216738"
+ uid: c2ec07e0-0135-458d-bceb-c648d2a9ac54
+type: Opaque
+
Create a Kubernetes secret for RCU in the same Kubernetes namespace as the domain, using the create-rcu-credentials.sh
script:
$ cd $WORKDIR/kubernetes/create-rcu-credentials
+$ ./create-rcu-credentials.sh -u <rcu_prefix> -p <rcu_schema_pwd> -a sys -q <sys_db_pwd> -d <domain_uid> -n <domain_namespace> -s <kubernetes_rcu_secret>
+
where:
+-u <rcu_prefix>
is the name of the RCU schema prefix created previously
-p <rcu_schema_pwd>
is the password for the RCU schema prefix
-a <sys_db_user>
is the database user with sys dba privilege
-q <sys_db_pwd>
is the sys database password
-d <domain_uid>
is the domain_uid that you created earlier
-n <domain_namespace>
is the domain namespace
-s <kubernetes_rcu_secret>
is the name of the rcu secret to create
For example:
+$ cd $WORKDIR/kubernetes/create-rcu-credentials
+$ ./create-rcu-credentials.sh -u OIGK8S -p <password> -a sys -q <password> -d governancedomain -n oigns -s oig-rcu-credentials
+
The output will look similar to the following:
+secret/oig-rcu-credentials created
+secret/oig-rcu-credentials labeled
+The secret oig-rcu-credentials has been successfully created in the oigns namespace.
+
Verify the secret is created using the following command:
+$ kubectl get secret <kubernetes_rcu_secret> -o yaml -n <domain_namespace>
+
For example:
+$ kubectl get secret oig-rcu-credentials -o yaml -n oigns
+
The output will look similar to the following:
+apiVersion: v1
+data:
+ password: V2VsY29tZTE=
+ sys_password: V2VsY29tZTE=
+ sys_username: c3lz
+ username: T0lHSzhT
+kind: Secret
+metadata:
+ creationTimestamp: "<DATE>"
+ labels:
+ weblogic.domainName: governancedomain
+ weblogic.domainUID: governancedomain
+ name: oig-rcu-credentials
+ namespace: oigns
+ resourceVersion: "3217023"
+ uid: ce70b91a-fbbc-4839-9616-4cc2c1adeb4f
+type: Opaque
+
As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.
+A persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.
+When a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.
+The example below uses an NFS mounted volume (<persistent_volume>/governancedomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.
+Note: The persistent volume directory needs to be accessible to both the master and worker node(s). In this example /scratch/shared/governancedomainpv
is accessible from all nodes via NFS.
Make a backup copy of the create-pv-pvc-inputs.yaml
file and create required directories:
$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
+$ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig
+$ mkdir output
+$ mkdir -p <persistent_volume>/governancedomainpv
+$ sudo chown -R 1000:0 <persistent_volume>/governancedomainpv
+
For example:
+$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
+$ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig
+$ mkdir output
+$ mkdir -p /scratch/shared/governancedomainpv
+$ sudo chown -R 1000:0 /scratch/shared/governancedomainpv
+
On the master node run the following command to ensure it is possible to read and write to the persistent volume:
+cd <persistent_volume>/governancedomainpv
+touch file.txt
+ls filemaster.txt
+
For example:
+cd /scratch/shared/governancedomainpv
+touch filemaster.txt
+ls filemaster.txt
+
On the first worker node run the following to ensure it is possible to read and write to the persistent volume:
+cd /scratch/shared/governancedomainpv
+ls filemaster.txt
+touch fileworker1.txt
+ls fileworker1.txt
+
Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it’s possible to read and write from each node to the persistent volume, delete the files created.
+Navigate to $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
:
$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc
+
and edit the create-pv-pvc-inputs.yaml
file and update the following parameters to reflect your settings. Save the file when complete:
baseName: <domain>
+domainUID: <domain_uid>
+namespace: <domain_namespace>
+weblogicDomainStorageType: NFS
+weblogicDomainStorageNFSServer: <nfs_server>
+weblogicDomainStoragePath: <physical_path_of_persistent_storage>
+weblogicDomainStorageSize: 10Gi
+
For example:
+# The base name of the pv and pvc
+baseName: domain
+
+# Unique ID identifying a domain.
+# If left empty, the generated pv can be shared by multiple domains
+# This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster.
+domainUID: governancedomain
+
+# Name of the namespace for the persistent volume claim
+namespace: oigns
+
+# Persistent volume type for the persistent storage.
+# The value must be 'HOST_PATH' or 'NFS'.
+# If using 'NFS', weblogicDomainStorageNFSServer must be specified.
+weblogicDomainStorageType: NFS
+
+# The server name or ip address of the NFS server to use for the persistent storage.
+# The following line must be uncomment and customized if weblogicDomainStorateType is NFS:
+weblogicDomainStorageNFSServer: mynfsserver
+
+# Physical path of the persistent storage.
+# When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the
+# domain storage on the Kubernetes host.
+# When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set
+# to the IP address or name of the DNS server, and this value should be set to the exported path
+# on that server.
+# Note that the path where the domain is mounted in the WebLogic containers is not affected by this
+# setting, that is determined when you create your domain.
+# The following line must be uncomment and customized:
+weblogicDomainStoragePath: /scratch/shared/governancedomainpv
+
+# Reclaim policy of the persistent storage
+# The valid values are: 'Retain', 'Delete', and 'Recycle'
+weblogicDomainStorageReclaimPolicy: Retain
+
+# Total storage allocated to the persistent storage.
+weblogicDomainStorageSize: 10Gi
+
Execute the create-pv-pvc.sh
script to create the PV and PVC configuration files:
$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output
+
The output will be similar to the following:
+Input parameters being used
+export version="create-weblogic-sample-domain-pv-pvc-inputs-v1"
+export baseName="domain"
+export domainUID="governancedomain"
+export namespace="oigns"
+export weblogicDomainStorageType="NFS"
+export weblogicDomainStorageNFSServer="mynfsserver"
+export weblogicDomainStoragePath="/scratch/shared/governancedomainpv"
+export weblogicDomainStorageReclaimPolicy="Retain"
+export weblogicDomainStorageSize="10Gi"
+
+Generating output/pv-pvcs/governancedomain-domain-pv.yaml
+Generating output/pv-pvcs/governancedomain-domain-pvc.yaml
+The following files were generated:
+ output/pv-pvcs/governancedomain-domain-pv.yaml
+ output/pv-pvcs/governancedomain-domain-pvc.yaml
+
+Completed
+
Run the following to show the files are created:
+$ ls output/pv-pvcs
+create-pv-pvc-inputs.yaml governancedomain-domain-pv.yaml governancedomain-domain-pvc.yaml
+
Run the following kubectl
command to create the PV and PVC in the domain namespace:
$ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n <domain_namespace>
+$ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n <domain_namespace>
+
For example:
+$ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n oigns
+$ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n oigns
+
The output will look similar to the following:
+persistentvolume/governancedomain-domain-pv created
+persistentvolumeclaim/governancedomain-domain-pvc created
+
Run the following commands to verify the PV and PVC were created successfully:
+$ kubectl describe pv <pv_name>
+$ kubectl describe pvc <pvc_name> -n <domain_namespace>
+
For example:
+$ kubectl describe pv governancedomain-domain-pv
+$ kubectl describe pvc governancedomain-domain-pvc -n oigns
+
The output will look similar to the following:
+$ kubectl describe pv governancedomain-domain-pv
+
+Name: governancedomain-domain-pv
+Labels: weblogic.domainUID=governancedomain
+Annotations: pv.kubernetes.io/bound-by-controller: yes
+Finalizers: [kubernetes.io/pv-protection]
+StorageClass: governancedomain-domain-storage-class
+Status: Bound
+Claim: oigns/governancedomain-domain-pvc
+Reclaim Policy: Retain
+Access Modes: RWX
+VolumeMode: Filesystem
+Capacity: 10Gi
+Node Affinity: <none>
+Message:
+Source:
+ Type: NFS (an NFS mount that lasts the lifetime of a pod)
+ Server: mynfsserver
+ Path: /scratch/shared/governancedomainpv
+ ReadOnly: false
+Events: <none>
+
$ kubectl describe pvc governancedomain-domain-pvc -n oigns
+
+Name: governancedomain-domain-pvc
+Namespace: oigns
+StorageClass: governancedomain-domain-storage-class
+Status: Bound
+Volume: governancedomain-domain-pv
+Labels: weblogic.domainUID=governancedomain
+Annotations: pv.kubernetes.io/bind-completed: yes
+ pv.kubernetes.io/bound-by-controller: yes
+Finalizers: [kubernetes.io/pvc-protection]
+Capacity: 10Gi
+Access Modes: RWX
+VolumeMode: Filesystem
+Mounted By: <none>
+Events: <none>
+
You are now ready to create the OIG domain as per Create OIG Domains
+This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 4.1.2.
+A running Kubernetes cluster that meets the following requirements:
+cluster-admin
role to install the WebLogic Kubernetes Operator.date
command simultaneously on all the nodes in each cluster and then syncrhonize accordingly.A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OIG as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases.
+Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. +Please refer to your vendor specific documentation for this information. Also see Getting Started.
+Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OIG domains:
+Review the latest changes and known issues for Oracle Identity Governance on Kubernetes.
+Date | +Version | +Change | +
---|---|---|
October, 2023 | +23.4.1 | +Supports Oracle Identity Governance 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | This release contains the following changes: | +
+ | + | + Support for WebLogic Kubernetes Operator 4.1.2. | +
+ | + | + Ability to set resource requests and limits for CPU and memory on a cluster resource. See, Setting the OIM server memory parameters. | +
+ | + | + Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler. | +
+ | + | If upgrading to October 23 (23.4.1) from October 22 (22.4.1) or later, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.1.2 | +
+ | + | 2. Patch the OIG container image to October 23 | +
+ | + | If upgrading to October 23 (23.4.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.1.2 | +
+ | + | 2. Patch the OIG container image to October 23 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana | +
+ | + | See Patch and Upgrade for these instructions. | +
July, 2023 | +23.3.1 | +Supports Oracle Identity Governance 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | If upgrading to July 23 (23.3.1) from April 23 (23.2.1), upgrade as follows: | +
+ | + | 1. Patch the OIG container image to July 23 | +
+ | + | If upgrading to July 23 (23.3.1) from October 22 (22.4.1), or January 23 (23.1.1) release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.0.4 | +
+ | + | 2. Patch the OIG container image to July 23 | +
+ | + | If upgrading to July 23 (23.3.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.0.4 | +
+ | + | 2. Patch the OIG container image to July 23 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana | +
+ | + | See Patch and Upgrade for these instructions. | +
April, 2023 | +23.2.1 | +Supports Oracle Identity Governance 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | Support for WebLogic Kubernetes Operator 4.0.4. | +
+ | + | Changes to stopping/starting pods due to domain and cluster configuration being separated and parameter changes (IF_NEEDED, NEVER to IfNeeded, Never). | +
+ | + | If upgrading to April 23 (23.2.1) from October 22 (22.4.1) or later, you must upgrade in the following order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.0.4 | +
+ | + | 2. Patch the OIG container image to April 23 | +
+ | + | If upgrading to April 23 (23.2.1) from a release prior to October 22 (22.4.1), you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 4.0.4 | +
+ | + | 2. Patch the OIG container image to April 23 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. | +
January, 2023 | +23.1.1 | +Supports Oracle Identity Governance 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | If upgrading to January 23 (23.1.1) from October 22 (22.4.1) release, you only need to patch the OIG container image to January 23. | +
+ | + | If upgrading to January 23 (23.1.1) from a release prior to October 22 (22.4.1) release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 3.4.2 | +
+ | + | 2. Patch the OIG container image to January 23 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana See Patch and Upgrade for these instructions. | +
October, 2022 | +22.4.1 | +Supports Oracle Identity Governance 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | Support for WebLogic Kubernetes Operator 3.4.2. | +
+ | + | Additional Ingress mappings added. | +
+ | + | Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. | +
+ | + | OIG container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support. | +
+ | + | If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order: | +
+ | + | 1. WebLogic Kubernetes Operator to 3.4.2 | +
+ | + | 2. Patch the OIG container image to October 22 | +
+ | + | 3. Upgrade the Ingress | +
+ | + | 4. Upgrade Elasticsearch and Kibana | +
+ | + | See Patch and Upgrade for these instructions. | +
July, 2022 | +22.3.1 | +Supports Oracle Identity Governance 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
April, 2022 | +22.2.1 | +Updated for CRI-O support. | +
November, 2021 | +21.4.2 | +Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. | +
October 2021 | +21.4.1 | +A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Addtional post configuration tasks added. D) New section on how to start Design Console in a container. E) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. | +
November 2020 | +20.4.1 | +Initial release of Identity Governance on Kubernetes. | +
If the OIG domain creation fails when running create-domain.sh
, run the following to diagnose the issue:
Run the following command to diagnose the create domain job:
+$ kubectl logs <job_name> -n <domain_namespace>
+
For example:
+$ kubectl logs governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns
+
Also run:
+$ kubectl describe pod <job_domain> -n <domain_namespace>
+
For example:
+$ kubectl describe pod governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns
+
Using the output you should be able to diagnose the problem and resolve the issue.
+Clean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, Kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.
+If any of the above commands return the following error:
+Failed to start container "create-fmw-infra-sample-domain-job": Error response from daemon: error while creating mount source path
+'/scratch/shared/governancedomainpv ': mkdir /scratch/shared/governancedomainpv : permission denied
+
then there is a permissions error on the directory for the PV and PVC and the following should be checked:
+a) The directory has 777 permissions: chmod -R 777 <persistent_volume>/governancedomainpv
.
b) If it does have the permissions, check if an oracle
user exists and the uid
and gid
equal 1000
, for example:
$ uid=1000(oracle) gid=1000(spg) groups=1000(spg),59968(oinstall),8500(dba),100(users),1007(cgbudba)
+
Create the oracle
user if it doesn’t exist and set the uid
and gid
to 1000
.
c) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml
and add a slash to the end of the directory for the weblogicDomainStoragePath
parameter:
weblogicDomainStoragePath: /scratch/shared/governancedomainpv/
+
Clean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.
+The instructions in this section relate to problems patching a deployment with a new image as per Patch an image.
+If the OIG domain patching fails when running patch_oig_domain.sh
, run the following to diagnose the issue:
$ kubectl describe domain <domain name> -n <domain_namespace>
+
For example:
+$ kubectl describe domain governancedomain -n oigns
+
Using the output you should be able to diagnose the problem and resolve the issue.
+If the domain is already patched successfully and the script failed at the last step of waiting for pods to come up with the new image, then you do not need to rerun the script again after issue resolution. The pods will come up automatically once you resolve the underlying issue.
+If the script is stuck at the following message for a long time:
+"[INFO] Waiting for weblogic pods to be ready..This may take several minutes, do not close the window. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-<DATE>/monitor_weblogic_pods.log for progress"
+
run the following command to diagnose the issue:
+$ kubectl get pods -n <domain_namespace>
+
For example:
+$ kubectl get pods -n oigns
+
Run the following to check the logs of the AdminServer, SOA server or OIM server pods, as there may be an issue that is not allowing the domain pods to start properly:
+$ kubectl logs <pod> -n oigns
+
If the above does not glean any information you can also run:
+$ kubectl describe pod <pod> -n oigns
+
Further diagnostic logs can also be found under the $WORKDIR/kubernetes/domain-lifecycle
.
Once any issue is resolved the pods will come up automatically without the need to rerun the script.
+In this section you validate the OIG domain URLs that are accessible via the NGINX ingress.
+Make sure you know the master hostname and port before proceeding.
+Launch a browser and access the following URL’s. Use http
or https
depending on whether you configured your ingress for non-ssl or ssl.
Login to the WebLogic Administration Console and Oracle Enterprise Manager Console with the WebLogic username and password (weblogic/<password>
).
Login to Oracle Identity Governance with the xelsysadm username and password (xelsysadm/<password>
).
Note: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}
with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}
.
Console or Page | +URL | +
---|---|
WebLogic Administration Console | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console |
+
Oracle Enterprise Manager Console | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em |
+
Oracle Identity System Administration | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/sysadmin |
+
Oracle Identity Self Service | +https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/identity |
+
Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OIG domain. To control the Administration Server and OIG Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.
+The browser will give certificate errors if you used a self signed certifcate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.
+After the URL’s have been verified follow Post install configuration.
+ + + + + + + + +Access to interfaces through ingress
+a. Changes in /etc/hosts to validate hostname based ingress rules
+ + + +The instructions below explain how to set up NGINX as an ingress for OUD.
+By default the ingress configuration only supports HTTP and HTTPS ports. To allow LDAP and LDAPS communication over TCP, configuration is required at the ingress controller/implementation level.
+Use Helm to install NGINX.
+Add the Helm chart repository for installing NGINX using the following command:
+$ helm repo add stable https://kubernetes.github.io/ingress-nginx
+
The output will look similar to the following:
+"stable" has been added to your repositories
+
Update the repository using the following command:
+$ helm repo update
+
The output will look similar to the following:
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "stable" chart repository
+Update Complete. Happy Helming!
+
Create a Kubernetes namespace for NGINX:
+$ kubectl create namespace <namespace>
+
For example:
+$ kubectl create namespace mynginx
+
The output will look similar to the following:
+namespace/mynginx created
+
Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml
that contains the following:
Note: The configuration below:
+oud-ds-rs
installed with value oud-ds-rs
as a deployment/release name in the namespace oudns
. If using a different deployment name and/or namespace change appropriately.# Configuration for additional TCP ports to be exposed through Ingress
+# Format for each port would be like:
+# <PortNumber>: <Namespace>/<Service>
+tcp:
+ # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port
+ 1389: oudns/oud-ds-rs-lbr-ldap:ldap
+ # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port
+ 1636: oudns/oud-ds-rs-lbr-ldap:ldaps
+controller:
+ admissionWebhooks:
+ enabled: false
+ extraArgs:
+ # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server.
+ # If this flag is not provided NGINX will use a self-signed certificate.
+ # If the TLS Secret is in different namespace, name can be mentioned as <namespace>/<tlsSecretName>
+ default-ssl-certificate: oudns/oud-ds-rs-tls-cert
+ service:
+ # controller service external IP addresses
+ # externalIPs:
+ # - < External IP Address >
+ # To configure Ingress Controller Service as LoadBalancer type of Service
+ # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service
+ type: LoadBalancer
+ # Configuration for NodePort to be used for Ports exposed through Ingress
+ # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes
+ # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer.
+ nodePorts:
+ # For HTTP Interface exposed through LoadBalancer/Ingress
+ http: 30080
+ # For HTTPS Interface exposed through LoadBalancer/Ingress
+ https: 30443
+ tcp:
+ # For LDAP Interface
+ 1389: 31389
+ # For LDAPS Interface
+ 1636: 31636
+
To install and configure NGINX Ingress issue the following command:
+$ helm install --namespace <namespace> \
+--values nginx-ingress-values-override.yaml \
+lbr-nginx stable/ingress-nginx
+
Where:
+lbr-nginx
is your deployment namestable/ingress-nginx
is the chart referenceFor example:
+$ helm install --namespace mynginx \
+--values nginx-ingress-values-override.yaml \
+lbr-nginx stable/ingress-nginx
+
The output will look similar to the following:
+NAME: lbr-nginx
+LAST DEPLOYED: <DATE>
+NAMESPACE: mynginx
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+The ingress-nginx controller has been installed.
+It may take a few minutes for the LoadBalancer IP to be available.
+You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller'
+
+An example Ingress that makes use of the controller:
+
+ apiVersion: networking.k8s.io/v1beta1
+ kind: Ingress
+ metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ name: example
+ namespace: foo
+ spec:
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - backend:
+ serviceName: exampleService
+ servicePort: 80
+ path: /
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
+
helm upgrade
to update nginx-ingressIf required, an nginx-ingress deployment can be updated/upgraded with following command. In this example, nginx-ingress configuration is updated with an additional TCP port and Node Port for accessing the LDAP/LDAPS port of a specific POD:
+Create a nginx-ingress-values-override.yaml
that contains the following:
# Configuration for additional TCP ports to be exposed through Ingress
+# Format for each port would be like:
+# <PortNumber>: <Namespace>/<Service>
+tcp:
+ # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port
+ 1389: oudns/oud-ds-rs-lbr-ldap:ldap
+ # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port
+ 1636: oudns/oud-ds-rs-lbr-ldap:ldaps
+ # Map specific ports for LDAP and LDAPS communication from individual Services/Pods
+ # To redirect requests on 3890 port to oudns/oud-ds-rs-ldap-0:ldap
+ 3890: oudns/oud-ds-rs-ldap-0:ldap
+ # To redirect requests on 6360 port to oudns/oud-ds-rs-ldaps-0:ldap
+ 6360: oudns/oud-ds-rs-ldap-0:ldaps
+ # To redirect requests on 3891 port to oudns/oud-ds-rs-ldap-1:ldap
+ 3891: oudns/oud-ds-rs-ldap-1:ldap
+ # To redirect requests on 6361 port to oudns/oud-ds-rs-ldaps-1:ldap
+ 6361: oudns/oud-ds-rs-ldap-1:ldaps
+ # To redirect requests on 3892 port to oudns/oud-ds-rs-ldap-2:ldap
+ 3892: oudns/oud-ds-rs-ldap-2:ldap
+ # To redirect requests on 6362 port to oudns/oud-ds-rs-ldaps-2:ldap
+ 6362: oudns/oud-ds-rs-ldap-2:ldaps
+ # Map 1444 TCP port to LBR Admin service to get requests handled through any available POD/Endpoint serving Admin LDAPS Port
+ 1444: oudns/oud-ds-rs-lbr-admin:adminldaps
+ # To redirect requests on 4440 port to oudns/oud-ds-rs-0:adminldaps
+ 4440: oudns/oud-ds-rs-0:adminldaps
+ # To redirect requests on 4441 port to oudns/oud-ds-rs-1:adminldaps
+ 4441: oudns/oud-ds-rs-1:adminldaps
+ # To redirect requests on 4442 port to oudns/oud-ds-rs-2:adminldaps
+ 4442: oudns/oud-ds-rs-2:adminldaps
+controller:
+ admissionWebhooks:
+ enabled: false
+ extraArgs:
+ # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server.
+ # If this flag is not provided NGINX will use a self-signed certificate.
+ # If the TLS Secret is in different namespace, name can be mentioned as <namespace>/<tlsSecretName>
+ default-ssl-certificate: oudns/oud-ds-rs-tls-cert
+ service:
+ # controller service external IP addresses
+ # externalIPs:
+ # - < External IP Address >
+ # To configure Ingress Controller Service as LoadBalancer type of Service
+ # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service
+ type: LoadBalancer
+ # Configuration for NodePort to be used for Ports exposed through Ingress
+ # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes
+ # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer.
+ nodePorts:
+ # For HTTP Interface exposed through LoadBalancer/Ingress
+ http: 30080
+ # For HTTPS Interface exposed through LoadBalancer/Ingress
+ https: 30443
+ tcp:
+ # For LDAP Interface referring to LBR LDAP services serving LDAP port
+ 1389: 31389
+ # For LDAPS Interface referring to LBR LDAP services serving LDAPS port
+ 1636: 31636
+ # For LDAP Interface from specific service oud-ds-rs-ldap-0
+ 3890: 30890
+ # For LDAPS Interface from specific service oud-ds-rs-ldap-0
+ 6360: 30360
+ # For LDAP Interface from specific service oud-ds-rs-ldap-1
+ 3891: 30891
+ # For LDAPS Interface from specific service oud-ds-rs-ldap-1
+ 6361: 30361
+ # For LDAP Interface from specific service oud-ds-rs-ldap-2
+ 3892: 30892
+ # For LDAPS Interface from specific service oud-ds-rs-ldap-2
+ 6362: 30362
+ # For LDAPS Interface referring to LBR Admin services serving adminldaps port
+ 1444: 31444
+ # For Admin LDAPS Interface from specific service oud-ds-rs-0
+ 4440: 30440
+ # For Admin LDAPS Interface from specific service oud-ds-rs-1
+ 4441: 30441
+ # For Admin LDAPS Interface from specific service oud-ds-rs-2
+ 4442: 30442
+
Run the following command to upgrade the ingress:
+$ helm upgrade --namespace <namespace> \
+--values nginx-ingress-values-override.yaml \
+lbr-nginx stable/ingress-nginx
+
Where:
+lbr-nginx
is your deployment namestable/ingress-nginx
is the chart referenceFor example:
+$ helm upgrade --namespace mynginx \
+--values nginx-ingress-values-override.yaml \
+lbr-nginx stable/ingress-nginx
+
Using the Helm chart, ingress objects are created according to configuration. The following table details the rules configured in ingress object(s) for access to Oracle Unified Directory Interfaces through ingress.
+Port | +NodePort | +Host | +Example Hostname | +Path | +Backend Service:Port | +Example Service Name:Port | +
---|---|---|---|---|---|---|
http/https | +30080/30443 | +<deployment/release name>-admin-0 | +oud-ds-rs-admin-0 | +* | +<deployment/release name>-0:adminhttps | +oud-ds-rs-0:adminhttps | +
http/https | +30080/30443 | +<deployment/release name>-admin-N | +oud-ds-rs-admin-N | +* | +<deployment/release name>-N:adminhttps | +oud-ds-rs-1:adminhttps | +
http/https | +30080/30443 | +<deployment/release name>-admin | +oud-ds-rs-admin | +* | +<deployment/release name>-lbr-admin:adminhttps | +oud-ds-rs-lbr-admin:adminhttps | +
http/https | +30080/30443 | +* | +* | +/rest/v1/admin | +<deployment/release name>-lbr-admin:adminhttps | +oud-ds-rs-lbr-admin:adminhttps | +
http/https | +30080/30443 | +<deployment/release name>-http-0 | +oud-ds-rs-http-0 | +* | +<deployment/release name>-http-0:http | +oud-ds-rs-http-0:http | +
http/https | +30080/30443 | +<deployment/release name>-http-N | +oud-ds-rs-http-N | +* | +<deployment/release name>-http-N:http | +oud-ds-rs-http-N:http | +
http/https | +30080/30443 | +<deployment/release name>-http | +oud-ds-rs-http | +* | +<deployment/release name>-lbr-http:http | +oud-ds-rs-lbr-http:http | +
http/https | +30080/30443 | +* | +* | +/rest/v1/directory | +<deployment/release name>-lbr-http:http | +oud-ds-rs-lbr-http:http | +
http/https | +30080/30443 | +* | +* | +/iam/directory | +<deployment/release name>-lbr-http:http | +oud-ds-rs-lbr-http:http | +
++In the table above, example values are based on the value ‘oud-ds-rs’ as the deployment/release name for Helm chart installation. +The NodePorts mentioned in the table are according to ingress configuration described in previous section. +When External LoadBalancer is not available/configured, interfaces can be accessed through NodePort on a Kubernetes node.
+
For LDAP/LDAPS access (based on the updated/upgraded configuration mentioned in previous section)
+Port | +NodePort | +Backend Service:Port | +Example Service Name:Port | +
---|---|---|---|
1389 | +31389 | +<deployment/release name>-lbr-ldap:ldap | +oud-ds-rs-lbr-ldap:ldap | +
1636 | +31636 | +<deployment/release name>-lbr-ldap:ldap | +oud-ds-rs-lbr-ldap:ldaps | +
1444 | +31444 | +<deployment/release name>-lbr-admin:adminldaps | +oud-ds-rs-lbr-admin:adminldaps | +
3890 | +30890 | +<deployment/release name>-ldap-0:ldap | +oud-ds-rs-ldap-0:ldap | +
6360 | +30360 | +<deployment/release name>-ldap-0:ldaps | +oud-ds-rs-ldap-0:ldaps | +
3891 | +30891 | +<deployment/release name>-ldap-1:ldap | +oud-ds-rs-ldap-1:ldap | +
6361 | +30361 | +<deployment/release name>-ldap-1:ldaps | +oud-ds-rs-ldap-1:ldaps | +
3892 | +30892 | +<deployment/release name>-ldap-2:ldap | +oud-ds-rs-ldap-2:ldap | +
6362 | +30362 | +<deployment/release name>-ldap-2:ldaps | +oud-ds-rs-ldap-2:ldaps | +
4440 | +30440 | +<deployment/release name>-0:adminldaps | +oud-ds-rs-ldap-0:adminldaps | +
4441 | +30441 | +<deployment/release name>-1:adminldaps | +oud-ds-rs-ldap-1:adminldaps | +
4442 | +30442 | +<deployment/release name>-2:adminldaps | +oud-ds-rs-ldap-2:adminldaps | +
If it is not possible to have a LoadBalancer configuration updated to have host names added for Oracle Unified Directory Interfaces then the following entries can be added in /etc/hosts
files on the host from where Oracle Unified Directory interfaces will be accessed.
<IP Address of External LBR or Kubernetes Node> oud-ds-rs-http oud-ds-rs-http-0 oud-ds-rs-http-1 oud-ds-rs-http-2 oud-ds-rs-http-N
+<IP Address of External LBR or Kubernetes Node> oud-ds-rs-admin oud-ds-rs-admin-0 oud-ds-rs-admin-1 oud-ds-rs-admin-2 oud-ds-rs-admin-N
+
To use Oracle LDAP utilities such as ldapbind
, ldapsearch
, ldapmodify
etc. you can either:
Run the LDAP commands from an OUD installation outside the Kubernetes cluster. This requires access to an On-Premises OUD installation oustide the Kubernetes cluster.
+Run the LDAP commands from inside the OUD Kubernetes pod.
+$ kubectl exec -ti <pod> -n <namespace> -- bash
+
For example:
+$ kubectl exec -ti oud-ds-rs-0 -n oudns -- bash
+
This will take you into a bash session in the pod:
+[oracle@oud-ds-rs-0 oracle]$
+
Inside the container navigate to /u01/oracle/oud/bin
to view the LDAP utilties:
[oracle@oud-ds-rs-0 oracle]$ cd /u01/oracle/oud/bin
+[oracle@oud-ds-rs-0 bin]$ ls ldap*
+ldapcompare ldapdelete ldapmodify ldappasswordmodify ldapsearch
+
Note: For commands that require an ldif file, copy the file into the <persistent_volume>/oud_user_projects
directory:
$ cp file.ldif <peristent_volume>/oud_user_projects
+
For example:
+$ cp file.ldif /scratch/shared/oud_user_projects
+
The file can then be viewed inside the pod:
+[oracle@oud-ds-rs-0 bin]$ cd /u01/oracle/user_projects
+[oracle@oud-ds-rs-0 user_projects]$ ls *.ldif
+file.ldif
+
Note: The examples assume sample data was installed when creating the OUD instance.
+Note If your ingress is configured with type: LoadBalancer
then you cannot connect to the external LoadBalancer hostname and ports from inside the pod and must connect from an OUD installation outside the cluster.
Command to perform ldapsearch
against External LBR and LDAP port
$OUD_HOME/bin/ldapsearch --hostname <External LBR> --port 1389 \
+-D "<Root User DN>" -w <Password for Root User DN> \
+-b "" -s base "(objectClass=*)" "*"
+
The output will look similar to the following:
+dn:
+objectClass: top
+objectClass: ds-root-dse
+lastChangeNumber: 0
+firstChangeNumber: 0
+changelog: cn=changelog
+entryDN:
+pwdPolicySubentry: cn=Default Password Policy,cn=Password Policies,cn=config
+subschemaSubentry: cn=schema
+supportedAuthPasswordSchemes: SHA256
+supportedAuthPasswordSchemes: SHA1
+supportedAuthPasswordSchemes: SHA384
+supportedAuthPasswordSchemes: SHA512
+supportedAuthPasswordSchemes: MD5
+numSubordinates: 1
+supportedFeatures: 1.3.6.1.1.14
+supportedFeatures: 1.3.6.1.4.1.4203.1.5.1
+supportedFeatures: 1.3.6.1.4.1.4203.1.5.2
+supportedFeatures: 1.3.6.1.4.1.4203.1.5.3
+lastExternalChangelogCookie:
+vendorName: Oracle Corporation
+vendorVersion: Oracle Unified Directory 12.2.1.4.0
+componentVersion: 4
+releaseVersion: 1
+platformVersion: 0
+supportedLDAPVersion: 2
+supportedLDAPVersion: 3
+supportedControl: 1.2.826.0.1.3344810.2.3
+supportedControl: 1.2.840.113556.1.4.1413
+supportedControl: 1.2.840.113556.1.4.319
+supportedControl: 1.2.840.113556.1.4.473
+supportedControl: 1.2.840.113556.1.4.805
+supportedControl: 1.3.6.1.1.12
+supportedControl: 1.3.6.1.1.13.1
+supportedControl: 1.3.6.1.1.13.2
+supportedControl: 1.3.6.1.4.1.26027.1.5.2
+supportedControl: 1.3.6.1.4.1.26027.1.5.4
+supportedControl: 1.3.6.1.4.1.26027.1.5.5
+supportedControl: 1.3.6.1.4.1.26027.1.5.6
+supportedControl: 1.3.6.1.4.1.26027.2.3.1
+supportedControl: 1.3.6.1.4.1.26027.2.3.2
+supportedControl: 1.3.6.1.4.1.26027.2.3.4
+supportedControl: 1.3.6.1.4.1.42.2.27.8.5.1
+supportedControl: 1.3.6.1.4.1.42.2.27.9.5.2
+supportedControl: 1.3.6.1.4.1.42.2.27.9.5.8
+supportedControl: 1.3.6.1.4.1.4203.1.10.1
+supportedControl: 1.3.6.1.4.1.4203.1.10.2
+supportedControl: 2.16.840.1.113730.3.4.12
+supportedControl: 2.16.840.1.113730.3.4.16
+supportedControl: 2.16.840.1.113730.3.4.17
+supportedControl: 2.16.840.1.113730.3.4.18
+supportedControl: 2.16.840.1.113730.3.4.19
+supportedControl: 2.16.840.1.113730.3.4.2
+supportedControl: 2.16.840.1.113730.3.4.3
+supportedControl: 2.16.840.1.113730.3.4.4
+supportedControl: 2.16.840.1.113730.3.4.5
+supportedControl: 2.16.840.1.113730.3.4.9
+supportedControl: 2.16.840.1.113894.1.8.21
+supportedControl: 2.16.840.1.113894.1.8.31
+supportedControl: 2.16.840.1.113894.1.8.36
+maintenanceVersion: 2
+supportedSASLMechanisms: PLAIN
+supportedSASLMechanisms: EXTERNAL
+supportedSASLMechanisms: CRAM-MD5
+supportedSASLMechanisms: DIGEST-MD5
+majorVersion: 12
+orclGUID: D41D8CD98F003204A9800998ECF8427E
+entryUUID: d41d8cd9-8f00-3204-a980-0998ecf8427e
+ds-private-naming-contexts: cn=schema
+hasSubordinates: true
+nsUniqueId: d41d8cd9-8f003204-a9800998-ecf8427e
+structuralObjectClass: ds-root-dse
+supportedExtension: 1.3.6.1.4.1.4203.1.11.1
+supportedExtension: 1.3.6.1.4.1.4203.1.11.3
+supportedExtension: 1.3.6.1.1.8
+supportedExtension: 1.3.6.1.4.1.26027.1.6.3
+supportedExtension: 1.3.6.1.4.1.26027.1.6.2
+supportedExtension: 1.3.6.1.4.1.26027.1.6.1
+supportedExtension: 1.3.6.1.4.1.1466.20037
+namingContexts: cn=changelog
+namingContexts: dc=example,dc=com
+
Command to perform ldapsearch
against External LBR and LDAP port for specific Oracle Unified Directory Interface
$OUD_HOME/bin/ldapsearch --hostname <External LBR> --port 3890 \
+-D "<Root User DN>" -w <Password for Root User DN> \
+-b "" -s base "(objectClass=*)" "*"
+
In the example below LDAP utilities are executed from inside the oud-ds-rs-0
pod. If your ingress is configured with type: LoadBalancer
you can connect to the Kubernetes hostname where the ingress is deployed using the NodePorts.
Command to perform ldapsearch
against Kubernetes NodePort and LDAP port
[oracle@oud-ds-rs-0 bin]$ ./ldapsearch --hostname <Kubernetes Node> --port 31636 \
+--useSSL --trustAll \
+-D "<Root User DN>" -w <Password for Root User DN> \
+-b "" -s base "(objectClass=*)" "*"
+
Note: In all the examples below:
+a) You need to have an external IP assigned at ingress level.
+b) | json_pp
is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp
library.
c) Base64 of userDN:userPassword
can be generated using echo -n "userDN:userPassword" | base64
.
Command to invoke Data REST API:
+$curl --noproxy "*" -k --location \
+--request GET 'https://<External LBR Host>/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub&attributes=*' \
+--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
+
The output will look similar to the following:
+{
+ "msgType" : "urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse",
+ "totalResults" : 1,
+ "searchResultEntries" : [
+ {
+ "dn" : "uid=user.1,ou=People,dc=example,dc=com",
+ "attributes" : {
+ "st" : "OH",
+ "employeeNumber" : "1",
+ "postalCode" : "93694",
+ "description" : "This is the description for Aaren Atp.",
+ "telephoneNumber" : "+1 390 103 6917",
+ "homePhone" : "+1 280 375 4325",
+ "initials" : "ALA",
+ "objectClass" : [
+ "top",
+ "inetorgperson",
+ "organizationalperson",
+ "person"
+ ],
+ "uid" : "user.1",
+ "sn" : "Atp",
+ "street" : "70110 Fourth Street",
+ "mobile" : "+1 680 734 6300",
+ "givenName" : "Aaren",
+ "mail" : "user.1@maildomain.net",
+ "l" : "New Haven",
+ "postalAddress" : "Aaren Atp$70110 Fourth Street$New Haven, OH 93694",
+ "pager" : "+1 850 883 8888",
+ "cn" : "Aaren Atp"
+ }
+ }
+ ]
+}
+
Command to invoke Data REST API against specific Oracle Unified Directory Interface:
+$ curl --noproxy "*" -k --location \
+--request GET 'https://oud-ds-rs-http-0/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub&attributes=*' \
+--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
+
Note: In all the examples below:
+a) | json_pp
is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp
library.
b) Base64 of userDN:userPassword
can be generated using echo -n "userDN:userPassword" | base64
.
c) It is assumed that the value ‘oud-ds-rs’ is used as the deployment/release name for helm chart installation.
+Command to invoke Data SCIM API:
+$ curl --noproxy "*" -k --location \
+--request GET 'https://<Kubernetes Node>:30443/iam/directory/oud/scim/v1/Users' \
+--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
+
The output will look similar to the following:
+{
+ "Resources" : [
+ {
+ "id" : "ad55a34a-763f-358f-93f9-da86f9ecd9e4",
+ "userName" : [
+ {
+ "value" : "user.0"
+ }
+ ],
+ "schemas" : [
+ "urn:ietf:params:scim:schemas:core:2.0:User",
+ "urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User",
+ "urn:ietf:params:scim:schemas:extension:enterprise:2.0:User"
+ ],
+ "meta" : {
+ "location" : "http://<Kubernetes Node>:30443/iam/directory/oud/scim/v1/Users/ad55a34a-763f-358f-93f9-da86f9ecd9e4",
+ "resourceType" : "User"
+ },
+ "addresses" : [
+ {
+ "postalCode" : "50369",
+ "formatted" : "Aaccf Amar$01251 Chestnut Street$Panama City, DE 50369",
+ "streetAddress" : "01251 Chestnut Street",
+ "locality" : "Panama City",
+ "region" : "DE"
+ }
+ ],
+ "urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User" : {
+ "description" : [
+ {
+ "value" : "This is the description for Aaccf Amar."
+ }
+ ],
+ "mobile" : [
+ {
+ "value" : "+1 010 154 3228"
+ }
+ ],
+ "pager" : [
+ {
+ "value" : "+1 779 041 6341"
+ }
+ ],
+ "objectClass" : [
+ {
+ "value" : "top"
+ },
+ {
+ "value" : "organizationalperson"
+ },
+ {
+ "value" : "person"
+ },
+ {
+ "value" : "inetorgperson"
+ }
+ ],
+ "initials" : [
+ {
+ "value" : "ASA"
+ }
+ ],
+ "homePhone" : [
+ {
+ "value" : "+1 225 216 5900"
+ }
+ ]
+ },
+ "name" : [
+ {
+ "givenName" : "Aaccf",
+ "familyName" : "Amar",
+ "formatted" : "Aaccf Amar"
+ }
+ ],
+ "emails" : [
+ {
+ "value" : "user.0@maildomain.net"
+ }
+ ],
+ "phoneNumbers" : [
+ {
+ "value" : "+1 685 622 6202"
+ }
+ ],
+ "urn:ietf:params:scim:schemas:extension:enterprise:2.0:User" : {
+ "employeeNumber" : [
+ {
+ "value" : "0"
+ }
+ ]
+ }
+ }
+ ,
+ .
+ .
+ .
+ }
+
Command to invoke Data SCIM API against specific Oracle Unified Directory Interface:
+$ curl --noproxy "*" -k --location \
+--request GET 'https://oud-ds-rs-http-0:30443/iam/directory/oud/scim/v1/Users' \
+--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
+
Note: In all the examples below:
+a) | json_pp
is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp
library.
b) Base64 of userDN:userPassword
can be generated using echo -n "userDN:userPassword" | base64
.
Command to invoke Admin REST API against External LBR:
+$ curl --noproxy "*" -k --insecure --location \
+--request GET 'https://<External LBR Host>/rest/v1/admin/?scope=base&attributes=vendorName&attributes=vendorVersion&attributes=ds-private-naming-contexts&attributes=subschemaSubentry' \
+--header 'Content-Type: application/json' \
+--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
+
The output will look similar to the following:
+{
+ "totalResults" : 1,
+ "searchResultEntries" : [
+ {
+ "dn" : "",
+ "attributes" : {
+ "vendorVersion" : "Oracle Unified Directory 12.2.1.4.0",
+ "ds-private-naming-contexts" : [
+ "cn=admin data",
+ "cn=ads-truststore",
+ "cn=backups",
+ "cn=config",
+ "cn=monitor",
+ "cn=schema",
+ "cn=tasks",
+ "cn=virtual acis",
+ "dc=replicationchanges"
+ ],
+ "subschemaSubentry" : "cn=schema",
+ "vendorName" : "Oracle Corporation"
+ }
+ }
+ ],
+ "msgType" : "urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse"
+}
+
Command to invoke Admin REST API against specific Oracle Unified Directory Admin Interface:
+$ curl --noproxy "*" -k --insecure --location \
+--request GET 'https://oud-ds-rs-admin-0/rest/v1/admin/?scope=base&attributes=vendorName&attributes=vendorVersion&attributes=ds-private-naming-contexts&attributes=subschemaSubentry' \
+--header 'Content-Type: application/json' \
+--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
+
Command to invoke Admin REST API against Kubernetes NodePort for Ingress Controller Service
+$ curl --noproxy "*" -k --insecure --location \
+--request GET 'https://oud-ds-rs-admin-0:30443/rest/v1/admin/?scope=base&attributes=vendorName&attributes=vendorVersion&attributes=ds-private-naming-contexts&attributes=subschemaSubentry' \
+--header 'Content-Type: application/json' \
+--header 'Authorization: Basic <Base64 of userDN:userPassword>' | json_pp
+
As described in Prepare Your Environment you can create your own OUD container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Unified Directory image for production deployments.
+Using the WebLogic Image Tool, you can create a new Oracle Unified Directory image with PSU’s and interim patches or update an existing image with one or more interim patches.
+++Recommendations:
++
+- Use create for creating a new Oracle Unified Directory image containing the Oracle Unified Directory binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OUD patches because it optimizes the size of the image.
+- Use update for patching an existing Oracle Unified Directory image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
+
Verify that your environment meets the following prerequisites:
+To set up the WebLogic Image Tool:
+Create a working directory and change to it:
+$ mdir <workdir>
+$ cd <workdir>
+
For example:
+$ mkdir /scratch/imagetool-setup
+$ cd /scratch/imagetool-setup
+
Download the latest version of the WebLogic Image Tool from the releases page.
+$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip
+
where X.X.X is the latest release referenced on the releases page.
+Unzip the release ZIP file in the imagetool-setup
directory.
$ unzip imagetool.zip
+
Execute the following commands to set up the WebLogic Image Tool:
+$ cd <workdir>/imagetool-setup/imagetool/bin
+$ source setup.sh
+
For example:
+$ cd /scratch/imagetool-setup/imagetool/bin
+$ source setup.sh
+
To validate the setup of the WebLogic Image Tool:
+Enter the following command to retrieve the version of the WebLogic Image Tool:
+$ imagetool --version
+
Enter imagetool
then press the Tab key to display the available imagetool
commands:
$ imagetool <TAB>
+cache create help rebase update
+
The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp
, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR
:
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache
directory. Under this directory, the lookup information is stored in the .metadata
file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR
:
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Creating an Oracle Unified Directory container image using the WebLogic Image Tool requires additional container scripts for Oracle Unified Directory domains.
+Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO
:
$ cd <workdir>/imagetool-setup
+$ git clone https://github.com/oracle/docker-images.git
+
For example:
+$ cd /scratch/imagetool-setup
+$ git clone https://github.com/oracle/docker-images.git
+
++Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.
+
After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create
a new Oracle Unified Directory image.
You must download the required Oracle Unified Directory installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.
+The installation binaries and patches required are:
+Oracle Unified Directory 12.2.1.4.0
+OUD Patches:
+Container Image Download/Patch Details
section, locate the Oracle Unified Directory (OUD)
table. For the latest PSU click the README
link in the Documentation
column. In the README, locate the “Installed Software” section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support.Oracle JDK v8
+The following files in the code repository location <imagetool-setup-location>/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0
are used for creating the image:
additionalBuildCmds.txt
buildArgs
Edit the <workdir>/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs
file and change %DOCKER_REPO%
,%JDK_VERSION%
and %BUILDTAG%
appropriately.
For example:
+create
+--jdkVersion=8u321
+--type oud
+--version=12.2.1.4.0
+--tag=oud-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/install/oud.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/container-scripts
+
The <workdir>/imagetool-setup/imagetool/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt
contains additional build commands. You may edit this file if you want to customize the image further.
Add a JDK package to the WebLogic Image Tool cache. For example:
+$ imagetool cache addInstaller --type jdk --version 8uXXX --path <download location>/jdk-8uXXX-linux-x64.tar.gz
+
where XXX
is the JDK version downloaded
Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:
+$ imagetool cache addInstaller --type OUD --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_oud.jar
+
Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:
+$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <download location>/p28186730_139428_Generic.zip
+
Add the rest of the downloaded product patches to the WebLogic Image Tool cache:
+$ imagetool cache addEntry --key <patch>_12.2.1.4.0 --value <download location>/p<patch>_122140_Generic.zip
+
For example:
+$ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value <download location>/p33448950_122140_Generic.zip
+
Edit the <workdir>/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs
file and append the product patches and opatch patch as follows:
--patches 33448950_12.2.1.4.0
+--opatchBugNumber=28186730_13.9.4.2.8
+
An example buildArgs
file is now as follows:
create
+--jdkVersion=8u321
+--type oud
+--version=12.2.1.4.0
+--tag=oud-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/install/oud.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/container-scripts
+--patches 33448950_12.2.1.4.0
+--opatchBugNumber=28186730_13.9.4.2.8
+
++Note: In the
+buildArgs
file:+
+- +
--jdkVersion
value must match the--version
value used in theimagetool cache addInstaller
command for--type jdk
.- +
--version
value must match the--version
value used in theimagetool cache addInstaller
command for--type OUD
.
Refer to this page for the complete list of options available with the WebLogic Image Tool create
command.
Create the Oracle Unified Directory image:
+$ imagetool @<absolute path to buildargs file> --fromImage ghcr.io/oracle/oraclelinux:7-slim
+
++Note: Make sure that the absolute path to the
+buildargs
file is prepended with a@
character, as shown in the example above.
For example:
+$ imagetool @<imagetool-setup-location>/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim
+
Check the created image using the docker images
command:
$ docker images | grep oud
+
The output will look similar to the following:
+oud-latestpsu 12.2.1.4.0 30b02a692fa3 About a minute ago 1.04GB
+
Run the following command to save the container image to a tar file:
+$ docker save -o <path>/<file>.tar <image>
+
For example:
+$ docker save -o $WORKDIR/oud-latestpsu.tar oud-latestpsu:12.2.1.4.0
+
The steps below show how to update an existing Oracle Unified Directory image with an interim patch.
+The container image to be patched must be loaded in the local docker images repository before attempting these steps.
+In the examples below the image oracle/oud:12.2.1.4.0
is updated with an interim patch.
$ docker images
+
+REPOSITORY TAG IMAGE ID CREATED SIZE
+oracle/oud 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB
+
Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.
+Add the OPatch patch to the WebLogic Image Tool cache, for example:
+$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <downloaded-patches-location>/p28186730_139428_Generic.zip
+
Execute the imagetool cache addEntry
command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip
:
$ imagetool cache addEntry --key=33521773_12.2.1.4.211008 --value <downloaded-patches-location>/p33521773_12214211008_Generic.zip
+
Provide the following arguments to the WebLogic Image Tool update
command:
–-fromImage
- Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oud:12.2.1.4.0
.–-patches
- Multiple patches can be specified as a comma-separated list.--tag
- Specify the new tag to be applied for the image being built.Refer here for the complete list of options available with the WebLogic Image Tool update
command.
++Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.
+
For example:
+$ imagetool update --fromImage oracle/oud:12.2.1.4.0 --tag=oracle/oud-new:12.2.1.4.0 --patches=33521773_12.2.1.4.211008 --opatchBugNumber=28186730_13.9.4.2.8
+
++Note: If the command fails because the files in the image being upgraded are not owned by
+oracle:oracle
, then add the parameter--chown <userid>:<groupid>
to correspond with the values returned in the error.
Check the built image using the docker images
command:
$ docker images | grep oud
+
The output will look similar to the following:
+REPOSITORY TAG IMAGE ID CREATED SIZE
+oracle/oud-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 1.11GB
+oracle/oud 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB
+
Run the following command to save the patched container image to a tar file:
+$ docker save -o <path>/<file>.tar <image>
+
For example:
+$ docker save -o $WORKDIR/oud-new.tar oracle/oud-new:12.2.1.4.0
+
This chapter demonstrates how to deploy Oracle Unified Directory (OUD) 12c instance(s) and replicated instances using the Helm package manager for Kubernetes.
+The helm chart can be used to deploy an Oracle Unified Directory instance as a base, with configured sample entries, and multiple replicated Oracle Unified Directory instances/pods/services based on the specified replicaCount
.
Based on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.
+Note: From July 22 (22.3.1) onwards OUD deployment is performed using StatefulSets.
+Create a Kubernetes namespace for the OUD deployment by running the following command:
+$ kubectl create namespace <namespace>
+
For example:
+$ kubectl create namespace oudns
+
The output will look similar to the following:
+namespace/oudns created
+
Create a Kubernetes secret to stores the credentials for the container registry where the OUD image is stored. This step must be followed if using Oracle Container Registry or your own private container registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.
+Run the following command to create the secret:
+kubectl create secret docker-registry "orclcred" --docker-server=<CONTAINER_REGISTRY> \
+--docker-username="<USER_NAME>" \
+--docker-password=<PASSWORD> --docker-email=<EMAIL_ID> \
+--namespace=<domain_namespace>
+
For example, if using Oracle Container Registry:
+$ kubectl create secret docker-registry "orclcred" --docker-server=container-registry.oracle.com \
+--docker-username="user@example.com" \
+--docker-password=password --docker-email=user@example.com \
+--namespace=oudns
+
Replace <USER_NAME>
and <PASSWORD>
with the credentials for the registry with the following caveats:
If using Oracle Container Registry to pull the OUD container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware
> oud_cpu
and accept the license agreement.
If using your own container registry to store the OUD container image, this is the username and password (or token) for your container registry.
+The output will look similar to the following:
+secret/orclcred created
+
Once OUD is deployed, if the Kubernetes node where the OUD pod(s) is/are running goes down after the pod eviction time-out, the pod(s) don’t get evicted but move to a Terminating
state. The pod(s) will then remain in that state forever. To avoid this problem a cron-job is created during OUD deployment that checks for any pods in Terminating
state. If there are any pods in Terminating
state, the cron job will delete them. The pods will then start again automatically. This cron job requires access to images on hub.docker.com. A Kubernetes secret must therefore be created to enable access to these images.
Create a Kubernetes secret to access the required images on hub.docker.com:
+Note: You must first have a user account on hub.docker.com:
+$ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="<docker_username>" --docker-password=<password> --docker-email=<docker_email_credentials> --namespace=<domain_namespace>
+
For example:
+$ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="username" --docker-password=<password> --docker-email=user@example.com --namespace=oudns
+
The output will look similar to the following:
+secret/dockercred created
+
The oud-ds-rs
Helm chart allows you to create or deploy a group of replicated Oracle Unified Directory instances along with Kubernetes objects in a specified namespace.
The deployment can be initiated by running the following Helm command with reference to the oud-ds-rs
Helm chart, along with configuration parameters according to your environment.
$ cd $WORKDIR/kubernetes/helm
+$ helm install --namespace <namespace> \
+<Configuration Parameters> \
+<deployment/release name> \
+<Helm Chart Path/Name>
+
Configuration Parameters (override values in chart) can be passed on with --set
arguments on the command line and/or with -f / --values
arguments when referring to files.
Note: The examples in Create OUD instances below provide values which allow the user to override the default values provided by the Helm chart. A full list +of configuration parameters and their default values is shown in Appendix A: Configuration parameters.
+For more details about the helm
command and parameters, please execute helm --help
and helm install --help
.
You can create OUD instances using one of the following methods:
+ +Note: While it is possible to install sample data during the OID deployment is it not possible to load your own data via an ldif file . In order to load data in OUD, create the OUD deployment and then use ldapmodify post the ingress deployment. See Using LDAP utilities.
+Navigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Create an oud-ds-rs-values-override.yaml
as follows:
image:
+ repository: <image_location>
+ tag: <image_tag>
+ pullPolicy: IfNotPresent
+imagePullSecrets:
+ - name: orclcred
+oudConfig:
+ # memory, cpu parameters for both requests and limits for oud instances
+ resources:
+ limits:
+ cpu: "1"
+ memory: "4Gi"
+ requests:
+ cpu: "500m"
+ memory: "4Gi"
+ rootUserPassword: <password>
+ sampleData: "200"
+persistence:
+ type: filesystem
+ filesystem:
+ hostPath:
+ path: <persistent_volume>/oud_user_projects
+cronJob:
+ kubectlImage:
+ repository: bitnami/kubectl
+ tag: <version>
+ pullPolicy: IfNotPresent
+
+ imagePullSecrets:
+ - name: dockercred
+
For example:
+image:
+ repository: container-registry.oracle.com/middleware/oud_cpu
+ tag: 12.2.1.4-jdk8-ol7-<October`23>
+ pullPolicy: IfNotPresent
+imagePullSecrets:
+ - name: orclcred
+oudConfig:
+ # memory, cpu parameters for both requests and limits for oud instances
+ resources:
+ limits:
+ cpu: "1"
+ memory: "8Gi"
+ requests:
+ cpu: "500m"
+ memory: "4Gi"
+ rootUserPassword: <password>
+ sampleData: "200"
+persistence:
+ type: filesystem
+ filesystem:
+ hostPath:
+ path: /scratch/shared/oud_user_projects
+cronJob:
+ kubectlImage:
+ repository: bitnami/kubectl
+ tag: 1.26.6
+ pullPolicy: IfNotPresent
+
+ imagePullSecrets:
+ - name: dockercred
+
The following caveats exist:
+Replace <password>
with the relevant password.
sampleData: "200"
will load 200 sample users into the default baseDN dc=example,dc=com
. If you do not want sample data, remove this entry. If sampleData
is set to 1,000,000
users or greater, then you must add the following entries to the yaml file to prevent inconsistencies in dsreplication:
deploymentConfig:
+ startupTime: 720
+ period: 120
+ timeout: 60
+
The <version>
in kubectlImage tag:
should be set to the same version as your Kubernetes version (kubectl version
). For example if your Kubernetes version is 1.26.6
set to 1.26.6
.
If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:
+imagePullSecrets:
+ - name: orclcred
+
If your cluster does not have access to the internet to pull external images, such as bitnami/kubectl or busybox, you must load the images in a local container registry. You must then set the following:
+cronJob:
+ kubectlImage:
+ repository: container-registry.example.com/bitnami/kubectl
+ tag: 1.26.6
+ pullPolicy: IfNotPresent
+
+busybox:
+ image: container-registry.example.com/busybox
+
If using NFS for your persistent volume then change the persistence
section as follows:
Note: If you want to use NFS you should ensure that you have a default Kubernetes storage class defined for your environment that allows network storage.
+For more information on storage classes, see Storage Classes.
+persistence:
+ type: networkstorage
+ networkstorage:
+ nfs:
+ path: <persistent_volume>/oud_user_projects
+ server: <NFS IP address>
+ # if true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used.
+ storageClassCreate: true
+ storageClass: oud-sc
+ # if storageClassCreate is true, please provide the custom provisioner if any to use. If you do not have a custom provisioner, delete this line, and it will use the default class kubernetes.io/is-default-class.
+ provisioner: kubernetes.io/is-default-class
+
The following caveats exist:
+storageClassCreate: true
. If storageClassCreate: true
it is recommended to set storageClass
to a value of your choice, and provisioner
to the provisioner supported by your cloud vendor.storageClassCreate: false
and storageClass
to the NAME value returned in “kubectl get storageclass
”. The provisioner
can be ignored.If using Block Device storage for your persistent volume then change the persistence
section as follows:
Note: If you want to use block devices you should ensure that you have a default Kubernetes storage class defined for your environment that allows dynamic storage. Each vendor has its own storage provider but it may not be configured to provide dynamic storage allocation.
+For more information on storage classes, see Storage Classes.
+persistence:
+ type: blockstorage
+ # Specify Accessmode ReadWriteMany for NFS and for block ReadWriteOnce
+ accessMode: ReadWriteOnce
+ # if true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used.
+ storageClassCreate: true
+ storageClass: oud-sc
+ # if storageClassCreate is true, please provide the custom provisioner if any to use or else it will use default.
+ provisioner: oracle.com/oci
+
The following caveats exist:
+storageClassCreate: true
. If storageClassCreate: true
it is recommended to set storageClass
to a value of your choice, and provisioner
to the provisioner supported by your cloud vendor.storageClassCreate: false
and storageClass
to the NAME value returned in “kubectl get storageclass
”. The provisioner
can be ignored.For resources
, limits
and requests
, the example CPU and memory values shown are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.
Note: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An “m
” suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.
Note: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.
+If you plan on integrating OUD with other Oracle components then you must specify the following under the oudConfig:
section:
integration: <Integration option>
+
+For example:
+
oudConfig:
+ etc...
+ integration: <Integration option>
+
+It is recommended to choose the option covering your minimal requirements. Allowed values include: `no-integration` (no integration), `basic` (Directory Integration Platform), `generic` (Directory Integration Platform, Database Net Services and E-Business Suite integration), `eus` (Directory Integration Platform, Database Net Services, E-Business Suite and Enterprise User Security integration). The default value is `no-integration`
+
+
+**Note**: This will enable the integration type only. To integrate OUD with the Oracle component referenced, refer to the relevant product component documentation.
+
+
If you want to enable Assured Replication, see Enabling Assured Replication (Optional).
+Run the following command to deploy OUD:
+$ helm install --namespace <namespace> \
+--values oud-ds-rs-values-override.yaml \
+<release_name> oud-ds-rs
+
For example:
+$ helm install --namespace oudns \
+--values oud-ds-rs-values-override.yaml \
+oud-ds-rs oud-ds-rs
+
Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.
+--set
argumentNavigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Run the following command to create OUD instances:
+$ helm install --namespace <namespace> \
+--set oudConfig.rootUserPassword=<password> \
+--set persistence.filesystem.hostPath.path=<persistent_volume>/oud_user_projects \
+--set image.repository=<image_location>,image.tag=<image_tag> \
+--set oudConfig.sampleData="200" \
+--set oudConfig.resources.limits.cpu="1",oudConfig.resources.limits.memory="8Gi",oudConfig.resources.requests.cpu="500m",oudConfig.resources.requests.memory="4Gi" \
+--set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=<version> \
+--set cronJob.imagePullSecrets[0].name="dockercred" \
+--set imagePullSecrets[0].name="orclcred" \
+<release_name> oud-ds-rs
+
For example:
+$ helm install --namespace oudns \
+--set oudConfig.rootUserPassword=<password> \
+--set persistence.filesystem.hostPath.path=/scratch/shared/oud_user_projects \
+--set image.repository=container-registry.oracle.com/middleware/oud_cpu,image.tag=12.2.1.4-jdk8-ol7-<October`23> \
+--set oudConfig.sampleData="200" \
+--set oudConfig.resources.limits.cpu="1",oudConfig.resources.limits.memory="8Gi",oudConfig.resources.requests.cpu="500m",oudConfig.resources.requests.memory="4Gi" \
+--set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=1.26.6 \
+--set cronJob.imagePullSecrets[0].name="dockercred" \
+--set imagePullSecrets[0].name="orclcred" \
+oud-ds-rs oud-ds-rs
+
The following caveats exist:
+Replace <password>
with a the relevant password.
sampleData: "200"
will load 200 sample users into the default baseDN dc=example,dc=com
. If you do not want sample data, remove this entry. If sampleData
is set to 1,000,000
users or greater, then you must add the following entries to the yaml file to prevent inconsistencies in dsreplication: --set deploymentConfig.startupTime=720,deploymentConfig.period=120,deploymentConfig.timeout=60
.
The <version>
in kubectlImage tag:
should be set to the same version as your Kubernetes version (kubectl version
). For example if your Kubernetes version is 1.26.6
set to 1.26.6
.
If using using NFS for your persistent volume then use:
+--set persistence.networkstorage.nfs.path=<persistent_volume>/oud_user_projects,persistence.networkstorage.nfs.server:<NFS IP address>` \
+--set persistence.storageClassCreate="true",persistence.storageClass="oud-sc",persistence.provisioner="kubernetes.io/is-default-class" \
+
* If you want to create your own storage class, set `storageClassCreate: true`. If `storageClassCreate: true` it is recommended to set `storageClass` to a value of your choice, and `provisioner` to the provisioner supported by your cloud vendor.
+* If you have an existing storageClass that supports dynamic storage, set `storageClassCreate: false` and `storageClass` to the NAME value returned in "`kubectl get storageclass`". The `provisioner` can be ignored.
+
+
If using using block storage for your persistent volume then use:
+--set persistence.type="blockstorage",persistence.accessMode="ReadWriteOnce" \
+--set persistence.storageClassCreate="true",persistence.storageClass="oud-sc",persistence.provisioner="oracle.com/oci" \
+
* If you want to create your own storage class, set `storageClassCreate: true`. If `storageClassCreate: true` it is recommended to set `storageClass` to a value of your choice, and `provisioner` to the provisioner supported by your cloud vendor.
+* If you have an existing storageClass that supports dynamic storage, set `storageClassCreate: false` and `storageClass` to the NAME value returned in "`kubectl get storageclass`". The `provisioner` can be ignored.
+
+
If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following: --set imagePullSecrets[0].name="orclcred"
.
For resources
, limits
and `requests1, the example CPU and memory values shown are for development environments only. For Enterprise Deployments, please review the performance recommendations and sizing requirements in Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster.
Note: Limits and requests for CPU resources are measured in CPU units. One CPU in Kubernetes is equivalent to 1 vCPU/Core for cloud providers, and 1 hyperthread on bare-metal Intel processors. An “m
” suffix in a CPU attribute indicates ‘milli-CPU’, so 500m is 50% of a CPU. Memory can be expressed in various units, where one Mi is one IEC unit mega-byte (1024^2), and one Gi is one IEC unit giga-byte (1024^3). For more information, see Resource Management for Pods and Containers, Assign Memory Resources to Containers and Pods, and Assign CPU Resources to Containers and Pods.
Note: The parameters above are also utilized by the Kubernetes Horizontal Pod Autoscaler (HPA). For more details on HPA, see Kubernetes Horizontal Pod Autoscaler.
+If you plan on integrating OUD with other Oracle components then you must specify the following:
+--set oudConfig.integration=<Integration option>
+
+It is recommended to choose the option covering your minimal requirements. Allowed values include: `no-integration` (no integration), `basic` (Directory Integration Platform), `generic` (Directory Integration Platform, Database Net Services and E-Business Suite integration), `eus` (Directory Integration Platform, Database Net Services, E-Business Suite and Enterprise User Security integration). The default value is `no-integration`
+
+**Note**: This will enable the integration type only. To integrate OUD with the Oracle component referenced, refer to the relevant product component documentation.
+
+
If you want to enable Assured Replication, see Enabling Assured Replication (Optional).
+Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.
+If you want to enable assured replication, perform the following steps:
+Create a directory on the persistent volume as follows:
+$ cd <persistent_volume>
+$ mkdir oud-repl-config
+$ sudo chown -R 1000:0 oud-repl-config
+
For example:
+$ cd /scratch/shared
+$ mkdir oud-repl-config
+$ sudo chown -R 1000:0 oud-repl-config
+
Add the following section in the oud-ds-rs-values-override.yaml
:
replOUD:
+ envVars:
+ - name: post_dsreplication_dsconfig_3
+ value: set-replication-domain-prop --domain-name ${baseDN} --advanced --set assured-type:safe-read --set assured-sd-level:2 --set assured-timeout:5s
+ - name: execCmd_1
+ value: /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/OUD/bin/dsconfig --no-prompt --hostname ${sourceHost} --port ${adminConnectorPort} --bindDN "${rootUserDN}" --bindPasswordFile /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/admin/rootPwdFile.txt --trustAll set-replication-domain-prop --domain-name ${baseDN} --advanced --set assured-type:safe-read --set assured-sd-level:2 --set assured-timeout:5s --provider-name "Multimaster Synchronization"
+configVolume:
+ enabled: true
+ type: networkstorage
+ storageClassCreate: true
+ storageClass: oud-config
+ provisioner: kubernetes.io/is-default-class
+ networkstorage:
+ nfs:
+ server: <IP_address>
+ path: <persistent_volume>/oud-repl-config
+ mountPath: /u01/oracle/config-input
+
For more information on OUD Assured Replication, and other options and levels, see, Understanding the Oracle Unified Directory Replication Model.
+The following caveats exist:
+post_dsreplication_dsconfig_N
and execCmd_N
should be a unique key - change the suffix accordingly. For more information on the environment variable and respective keys, see, Appendix B: Environment Variables.
For configVolume the storage can be networkstorage(nfs) or filesystem(hostPath) as the config volume path has to be accessible from all the Kuberenetes nodes. Please note that block storage is not supported for configVolume.
+If you want to create your own storage class, set storageClassCreate: true
. If storageClassCreate: true
it is recommended to set storageClass
to a value of your choice, and provisioner
to the provisioner supported by your cloud vendor.
If you have an existing storageClass that supports network storage, set storageClassCreate: false
and storageClass
to the NAME value returned in “kubectl get storageclass
”. Please note that the storage-class should not be the one you used for the persistent volume earlier. The provisioner
can be ignored.
In all the examples above, the following output is shown following a successful execution of the helm install
command.
NAME: oud-ds-rs
+LAST DEPLOYED: <DATE>
+NAMESPACE: oudns
+STATUS: deployed
+REVISION: 4
+NOTES:
+#
+# Copyright (c) 2020, Oracle and/or its affiliates.
+#
+# Licensed under the Universal Permissive License v 1.0 as shown at
+# https://oss.oracle.com/licenses/upl
+#
+#
+Since "nginx" has been chosen, follow the steps below to configure nginx ingress controller.
+Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation.
+command-# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+
+Command helm install to install nginx-ingress related objects like pod, service, deployment, etc.
+# helm install --namespace <namespace for ingress> --values nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx
+
+For details of content of nginx-ingress-values-override.yaml refer README.md file of this chart.
+
+Run these commands to check port mapping and services:
+# kubectl --namespace <namespace for ingress> get services -o wide -w lbr-nginx-ingress-controller
+# kubectl describe --namespace <namespace for oud-ds-rs chart> ingress.extensions/oud-ds-rs-http-ingress-nginx
+# kubectl describe --namespace <namespace for oud-ds-rs chart> ingress.extensions/oud-ds-rs-admin-ingress-nginx
+
+Accessible interfaces through ingress:
+(External IP Address for LoadBalancer NGINX Controller can be determined through details associated with lbr-nginx-ingress-controller)
+
+1. OUD Admin REST:
+ Port: http/https
+
+2. OUD Data REST:
+ Port: http/https
+
+3. OUD Data SCIM:
+ Port: http/https
+
+4. OUD LDAP/LDAPS:
+ Port: ldap/ldaps
+
+5. OUD Admin LDAPS:
+ Port: ldaps
+
+Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters.
+
Run the following command to verify the OUD deployment:
+$ kubectl --namespace <namespace> get pod,service,secret,pv,pvc,ingress -o wide
+
For example:
+$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oud-ds-rs-0 1/1 Running 0 14m 10.244.1.180 <Worker Node> <none> <none>
+pod/oud-ds-rs-1 1/1 Running 0 8m26s 10.244.1.181 <Worker Node> <none> <none>
+pod/oud-ds-rs-2 0/1 Running 0 2m24s 10.244.1.182 <Worker Node> <none> <none>
+pod/oud-pod-cron-job-27586680-p5d8q 0/1 Completed 0 50s 10.244.1.183 <Worker Node> <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/oud-ds-rs ClusterIP None <none> 1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-0 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-1 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-2 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-http-0 ClusterIP 10.104.112.93 <none> 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-http-1 ClusterIP 10.103.105.70 <none> 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-http-2 ClusterIP 10.110.160.107 <none> 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-lbr-admin ClusterIP 10.99.238.222 <none> 1888/TCP,1444/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-http ClusterIP 10.101.250.196 <none> 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-ldap ClusterIP 10.104.149.90 <none> 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-ldap-0 ClusterIP 10.109.255.221 <none> 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-ldap-1 ClusterIP 10.111.135.142 <none> 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-ldap-2 ClusterIP 10.100.8.145 <none> 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+
+NAME TYPE DATA AGE
+secret/dockercred kubernetes.io/dockerconfigjson 1 4h24m
+secret/orclcred kubernetes.io/dockerconfigjson 1 14m
+secret/oud-ds-rs-creds opaque 8 14m
+secret/oud-ds-rs-tls-cert kubernetes.io/tls 2 14m
+secret/sh.helm.release.v1.oud-ds-rs.v1 helm.sh/release.v1 1 14m
+
+
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
+persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Bound oudns/oud-ds-rs-pvc manual 14m Filesystem
+
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+persistentvolumeclaim/oud-ds-rs-pvc Bound oud-ds-rs-pv 20Gi RWX manual 14m Filesystem
+
+NAME CLASS HOSTS ADDRESS PORTS AGE
+ingress.networking.k8s.io/oud-ds-rs-admin-ingress-nginx <none> oud-ds-rs-admin-0,oud-ds-rs-admin-0,oud-ds-rs-admin-1 + 3 more... 80, 443 14m
+ingress.networking.k8s.io/oud-ds-rs-http-ingress-nginx <none> oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more... 80, 443 14m
+
+
Note: If you are using block storage you will see slightly different entries for PV and PVC, for example:
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
+persistentvolume/ocid1.volume.oc1.iad.<unique_ID> 50Gi RWO Delete Bound oudns/oud-ds-rs-pv-oud-ds-rs-2 oud-sc 60m Filesystem
+persistentvolume/ocid1.volume.oc1.iad.<unique_ID> 50Gi RWO Delete Bound oudns/oud-ds-rs-pv-oud-ds-rs-1 oud-sc 67m Filesystem
+persistentvolume/ocid1.volume.oc1.iad.<unique_ID> 50Gi RWO Delete Bound oudns/oud-ds-rs-pv-oud-ds-rs-3 oud-sc 45m Filesystem
+
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-1 Bound ocid1.volume.oc1.iad.<unique_ID> 50Gi RWO oud-sc 67m Filesystem
+persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-2 Bound ocid1.volume.oc1.iad.<unique_ID> 50Gi RWO oud-sc 60m Filesystem
+persistentvolumeclaim/oud-ds-rs-pv-oud-ds-rs-3 Bound ocid1.volume.oc1.iad.<unique_ID> 50Gi RWO oud-sc 45m Filesystem
+
Note: Initially pod/oud-ds-rs-0
will appear with a STATUS
of 0/1
and it will take approximately 5 minutes before OUD is started (1/1
). Once pod/oud-ds-rs-0
has a STATUS
of 1/1
, pod/oud-ds-rs-1
will appear with a STATUS
of 0/1
. Once pod/oud-ds-rs-1
is started (1/1
), pod/oud-ds-rs-2
will appear. It will take around 15 minutes for all the pods to fully started.
While the oud-ds-rs pods have a STATUS
of 0/1
the pod is running but OUD server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:
$ kubectl logs <pod> -n oudns
+
For example:
+$ kubectl logs oud-ds-rs-0 -n oudns
+
Note : If the OUD deployment fails additionally refer to Troubleshooting for instructions on how describe the failing pod(s). +Once the problem is identified follow Undeploy an OUD deployment to clean down the deployment before deploying again.
+Kubernetes objects created by the Helm chart are detailed in the table below:
+Type | +Name | +Example Name | +Purpose | +
---|---|---|---|
Service Account | +<deployment/release name> | +oud-ds-rs | +Kubernetes Service Account for the Helm Chart deployment | +
Secret | +<deployment/release name>-creds | +oud-ds-rs-creds | +Secret object for Oracle Unified Directory related critical values like passwords | +
Persistent Volume | +<deployment/release name>-pv | +oud-ds-rs-pv | +Persistent Volume for user_projects mount. | +
Persistent Volume Claim | +<deployment/release name>-pvc | +oud-ds-rs-pvc | +Persistent Volume Claim for user_projects mount. | +
Persistent Volume | +<deployment/release name>-pv-config | +oud-ds-rs-pv-config | +Persistent Volume for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc. | +
Persistent Volume Claim | +<deployment/release name>-pvc-config | +oud-ds-rs-pvc-config | +Persistent Volume Claim for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc. | +
Pod | +<deployment/release name>-0 | +oud-ds-rs-0 | +Pod/Container for base Oracle Unified Directory Instance which would be populated first with base configuration (like number of sample entries) | +
Pod | +<deployment/release name>-N | +oud-ds-rs-1, oud-ds-rs-2, … | +Pod(s)/Container(s) for Oracle Unified Directory Instances - each would have replication enabled against base Oracle Unified Directory instance <deployment/release name>-0 | +
Service | +<deployment/release name>-0 | +oud-ds-rs-0 | +Service for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance <deployment/release name>-0 | +
Service | +<deployment/release name>-http-0 | +oud-ds-rs-http-0 | +Service for HTTP and HTTPS interfaces from base Oracle Unified Directory instance <deployment/release name>-0 | +
Service | +<deployment/release name>-ldap-0 | +oud-ds-rs-ldap-0 | +Service for LDAP and LDAPS interfaces from base Oracle Unified Directory instance <deployment/release name>-0 | +
Service | +<deployment/release name>-N | +oud-ds-rs-1, oud-ds-rs-2, … | +Service(s) for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance <deployment/release name>-N | +
Service | +<deployment/release name>-http-N | +oud-ds-rs-http-1, oud-ds-rs-http-2, … | +Service(s) for HTTP and HTTPS interfaces from base Oracle Unified Directory instance <deployment/release name>-N | +
Service | +<deployment/release name>-ldap-N | +oud-ds-rs-ldap-1, oud-ds-rs-ldap-2, … | +Service(s) for LDAP and LDAPS interfaces from base Oracle Unified Directory instance <deployment/release name>-N | +
Service | +<deployment/release name>-lbr-admin | +oud-ds-rs-lbr-admin | +Service for LDAPS Admin, REST Admin and Replication interfaces from all Oracle Unified Directory instances | +
Service | +<deployment/release name>-lbr-http | +oud-ds-rs-lbr-http | +Service for HTTP and HTTPS interfaces from all Oracle Unified Directory instances | +
Service | +<deployment/release name>-lbr-ldap | +oud-ds-rs-lbr-ldap | +Service for LDAP and LDAPS interfaces from all Oracle Unified Directory instances | +
Ingress | +<deployment/release name>-admin-ingress-nginx | +oud-ds-rs-admin-ingress-nginx | +Ingress Rules for HTTP Admin interfaces. | +
Ingress | +<deployment/release name>-http-ingress-nginx | +oud-ds-rs-http-ingress-nginx | +Ingress Rules for HTTP (Data/REST) interfaces. | +
Once all the PODs created are visible as READY
(i.e. 1/1
), you can verify your replication across multiple Oracle Unified Directory instances.
To verify the replication group, connect to the container and issue an OUD administration command to show the details. The name of the container can be found by issuing the following:
+$ kubectl get pods -n <namespace> -o jsonpath='{.items[*].spec.containers[*].name}'
+
For example:
+$ kubectl get pods -n oudns -o jsonpath='{.items[*].spec.containers[*].name}'
+
The output will look similar to the following:
+oud-ds-rs oud-ds-rs oud-ds-rs
+
Once you have the container name you can verify the replication status in the following ways:
+Run the following command to create a bash shell in the pod:
+$ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- bash
+
For example:
+$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash
+
This will take you into the pod:
+[oracle@oud-ds-rs-0 oracle]$
+
From the prompt, use the dsreplication
command to check the status of your replication group:
$ cd /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin
+
+$ ./dsreplication status --trustAll \
+--hostname oud-ds-rs-0 --port 1444 --adminUID admin \
+--dataToDisplay compat-view --dataToDisplay rs-connections
+
The output will look similar to the following. Enter credentials where prompted:
+>>>> Specify Oracle Unified Directory LDAP connection parameters
+
+Password for user 'admin':
+
+Establishing connections and reading configuration ..... Done.
+
+dc=example,dc=com - Replication Enabled
+=======================================
+
+Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10]
+---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:-------------------------------
+oud-ds-rs-0:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898
+ : : : : : : : : : : : (GID=1)
+oud-ds-rs-1:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898
+ : : : : : : : : : : : (GID=1)
+oud-ds-rs-2:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898
+ : : : : : : : : : : : (GID=1)
+
+Replication Server [11] : RS #1 : RS #2 : RS #3
+-------------------------------:-------:-------:------
+oud-ds-rs-0:1898 : -- : Yes : Yes
+(#1) : : :
+oud-ds-rs-1:1898 : Yes : -- : Yes
+(#2) : : :
+oud-ds-rs-2:1898 : Yes : Yes : --
+(#3) : : :
+
+[1] The number of changes that are still missing on this element (and that have been applied to at least one other server).
+[2] Age of oldest missing change: the age (in seconds) of the oldest change that has not yet arrived on this element.
+[3] The replication port used to communicate between the servers whose contents are being replicated.
+[4] Whether the replication communication initiated by this element is encrypted or not.
+[5] Whether the directory server is trusted or not. Updates coming from an untrusted server are discarded and not propagated.
+[6] The number of untrusted changes. These are changes generated on this server while it is untrusted. Those changes are not propagated to the rest of the topology but are effective on the untrusted server.
+[7] The status of the replication on this element.
+[8] Whether the external change log is enabled for the base DN on this server or not.
+[9] The ID of the replication group to which the server belongs.
+[10] The replication server this server is connected to with its group ID between brackets.
+[11] This table represents the connections between the replication servers. The headers of the columns use a number as identifier for each replication server. See the values of the first column to identify the corresponding replication server for each number.
+
Type exit
to exit the pod.
The dsreplication status
command can be invoked using the following kubectl command:
$ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- \
+/u01/oracle/user_projects/<OUD Instance/Pod Name>/OUD/bin/dsreplication status \
+--trustAll --hostname <OUD Instance/Pod Name> --port 1444 --adminUID admin \
+--dataToDisplay compat-view --dataToDisplay rs-connections
+
For example:
+$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \
+/u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \
+--trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \
+--dataToDisplay compat-view --dataToDisplay rs-connections
+
The output will be the same as per Run dresplication inside the pod.
+Note: This section only needs to be followed if you enabled assured replication as per Enabling Assured Replication (Optional).
+Run the following command to create a bash shell in the pod:
+$ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- bash
+
For example:
+$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash
+
This will take you into the pod:
+[oracle@oud-ds-rs-0 oracle]$
+
At the prompt, enter the following commands:
+$ echo $bindPassword1 > /tmp/pwd.txt
+$ /u01/oracle/user_projects/${OUD_INSTANCE_NAME}/OUD/bin/dsconfig --no-prompt --hostname ${OUD_INSTANCE_NAME} --port ${adminConnectorPort} --bindDN "${rootUserDN}" --bindPasswordFile /tmp/pwd.txt --trustAll get-replication-domain-prop --domain-name ${baseDN} --advanced --property assured-type --property assured-sd-level --property assured-timeout --provider-name "Multimaster Synchronization"
+
The output will look similar to the following:
+Property : Value(s)
+-----------------:----------
+assured-sd-level : 2
+assured-timeout : 5 s
+assured-type : safe-read
+
Run the following command to make sure the cronjob is created:
+$ kubectl get cronjob -n <namespace>
+
For example:
+$ kubectl get cronjob -n oudns
+
The output will look similar to the following:
+NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
+oud-pod-cron-job */30 * * * * False 0 5m18s 19m
+
Run the following command to make sure the job(s) is created:
+$ kubectl get job -n <namespace> -o wide
+
For example:
+$ kubectl get job -n oudns -o wide
+
The output will look similar to the following:
+NAME COMPLETIONS DURATION AGE CONTAINERS IMAGES SELECTOR
+oud-pod-cron-job-27586680 1/1 1s 5m36s cron-kubectl bitnami/kubectl:1.26.6 controller-uid=700ab9f7-6094-488a-854d-f1b914de5f61
+
If you need to disable the job, for example if maintenance needs to be performed on the node, you can disable the job as follows:
+Run the following command to edit the cronjob:
+$ kubectl edit cronjob pod-cron-job -n <namespace>
+
For example:
+$ kubectl edit cronjob oud-pod-cron-job -n oudns
+
Note: This opens an edit session for the cronjob where parameters can be changed using standard vi
commands.
In the edit session search for suspend
and change the vaule from false
to true
:
...
+schedule: '*/30 * * * *'
+successfulJobsHistoryLimit: 3
+suspend: true
+...
+
Save the file and exit (wq!)
.
Run the following to make sure the cronjob is suspended:
+$ kubectl get cronjob -n <namespace>
+
For example:
+$ kubectl get cronjob -n oudns
+
The output will look similar to the following:
+NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
+oud-pod-cron-job */30 * * * * True 0 7m47s 21m
+
To enable the cronjob again, repeat the above steps and set suspend
to false
.
With an OUD instance now deployed you are now ready to configure an ingress controller to direct traffic to OUD as per Configure an ingress for an OUD.
+Find the deployment release name:
+$ helm --namespace <namespace> list
+
For example:
+$ helm --namespace oudns list
+
The output will look similar to the following:
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+oud-ds-rs oudns 1 <DATE> deployed oud-ds-rs-0.2 12.2.1.4.0
+
Delete the deployment using the following command:
+$ helm uninstall --namespace <namespace> <release>
+
For example:
+$ helm uninstall --namespace oudns oud-ds-rs
+release "oud-ds-rs" uninstalled
+
Run the following command to view the status:
+$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide
+
Initially the pods and persistent volume (PV) and persistent volume claim (PVC) will move to a Terminating
status:
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+
+pod/oud-ds-rs-0 1/1 Terminating 0 24m 10.244.1.180 <Worker Node> <none> <none>
+pod/oud-ds-rs-1 1/1 Terminating 0 18m 10.244.1.181 <Worker Node> <none> <none>
+pod/oud-ds-rs-2 1/1 Terminating 0 12m 10.244.1.182 <Worker Node> <none> <none>
+
+NAME TYPE DATA AGE
+secret/default-token-msmmd kubernetes.io/service-account-token 3 3d20h
+secret/dockercred kubernetes.io/dockerconfigjson 1 3d20h
+secret/orclcred kubernetes.io/dockerconfigjson 1 3d20h
+
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
+persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Terminating oudns/oud-ds-rs-pvc manual 24m Filesystem
+
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+persistentvolumeclaim/oud-ds-rs-pvc Terminating oud-ds-rs-pv 20Gi RWX manual 24m Filesystem
+
Run the command again until the pods, PV and PVC disappear.
+If the PV or PVC’s don’t delete, remove them manually:
+$ kubectl delete pvc oud-ds-rs-pvc -n oudns
+$ kubectl delete pv oud-ds-rs-pv -n oudns
+
Note: If using blockstorage, you will see a PV and PVC for each pod. Delete all of the PVC’s and PV’s using the above commands.
+Note: The steps below are not relevant for block storage.
+Delete the contents of the oud_user_projects
directory in the persistent volume:
$ cd <persistent_volume>/oud_user_projects
+$ rm -rf *
+
For example:
+$ cd /scratch/shared/oud_user_projects
+$ rm -rf *
+
The following table lists the configurable parameters of the oud-ds-rs
chart and their default values.
Parameter | +Description | +Default Value | +
---|---|---|
replicaCount | +Number of DS+RS instances/pods/services to be created with replication enabled against a base Oracle Unified Directory instance/pod. | +3 | +
restartPolicyName | +restartPolicy to be configured for each POD containing Oracle Unified Directory instance | +OnFailure | +
image.repository | +Oracle Unified Directory Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers | +oracle/oud | +
image.tag | +Oracle Unified Directory Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers | +12.2.1.4.0 | +
image.pullPolicy | +policy to pull the image | +IfnotPresent | +
imagePullSecrets.name | +name of Secret resource containing private registry credentials | +regcred | +
nameOverride | +override the fullname with this name | ++ |
fullnameOverride | +Overrides the fullname with the provided string | ++ |
serviceAccount.create | +Specifies whether a service account should be created | +true | +
serviceAccount.name | +If not set and create is true, a name is generated using the fullname template | +oud-ds-rs-< fullname >-token-< randomalphanum > | +
podSecurityContext | +Security context policies to add to the controller pod | ++ |
securityContext | +Security context policies to add by default | ++ |
service.type | +type of controller service to create | +ClusterIP | +
nodeSelector | +node labels for pod assignment | ++ |
tolerations | +node taints to tolerate | ++ |
affinity | +node/pod affinities | ++ |
ingress.enabled | ++ | true | +
ingress.type | +Supported value: nginx | +nginx | +
ingress.nginx.http.host | +Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as < fullname >-http.< domain >, < fullname >-http-0.< domain >, < fullname >-http-1.< domain >, etc. | ++ |
ingress.nginx.http.domain | +Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc. | ++ |
ingress.nginx.http.backendPort | ++ | http | +
ingress.nginx.http.nginxAnnotations | ++ | { kubernetes.io/ingress.class: “nginx" } | +
ingress.nginx.admin.host | +Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as < fullname >-admin.< domain >, < fullname >-admin-0.< domain >, < fullname >-admin-1.< domain >, etc. | ++ |
ingress.nginx.admin.domain | +Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc. | ++ |
ingress.nginx.admin.nginxAnnotations | ++ | { kubernetes.io/ingress.class: “nginx” nginx.ingress.kubernetes.io/backend-protocol: “https"} | +
ingress.ingress.tlsSecret | +Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name < fullname >-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as < namespace >/< tlsSecretName > | ++ |
ingress.certCN | +Subject’s common name (cn) for SelfSigned Cert. | +< fullname > | +
ingress.certValidityDays | +Validity of Self-Signed Cert in days | +365 | +
secret.enabled | +If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through –set, –values, etc.) would be used while creation of pods. | +true | +
secret.name | +secret name to use an already created Secret | +oud-ds-rs-< fullname >-creds | +
secret.type | +Specifies the type of the secret | +Opaque | +
persistence.enabled | +If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. | +true | +
persistence.pvname | +pvname to use an already created Persistent Volume , If blank will use the default name | +oud-ds-rs-< fullname >-pv | +
persistence.pvcname | +pvcname to use an already created Persistent Volume Claim , If blank will use default name | +oud-ds-rs-< fullname >-pvc | +
persistence.type | +supported values: either filesystem or networkstorage or blockstorage or custom | +filesystem | +
persistence.filesystem.hostPath.path | +The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. | +/scratch/shared/oud_user_projects | +
persistence.networkstorage.nfs.path | +Path of NFS Share location | +/scratch/shared/oud_user_projects | +
persistence.networkstorage.nfs.server | +IP or hostname of NFS Server | +0.0.0.0 | +
persistence.custom.* | +Based on values/data, YAML content would be included in PersistenceVolume Object | ++ |
persistence.accessMode | +Specifies the access mode of the location provided. ReadWriteMany for Filesystem/NFS, ReadWriteOnce for block storage. | +ReadWriteMany | +
persistence.size | +Specifies the size of the storage | +10Gi | +
persistence.storageClassCreate | +if true, it will create the storageclass. if value is false, please provide existing storage class (storageClass) to be used. | +empty | +
persistence.storageClass | +Specifies the storageclass of the persistence volume. | +empty | +
persistence.provisioner | +If storageClassCreate is true, provide the custom provisioner if any . | +kubernetes.io/is-default-class | +
persistence.annotations | +specifies any annotations that will be used | +{ } | +
configVolume.enabled | +If enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. | +true | +
configVolume.mountPath | +If enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and there would not be any mount point available for config | +false | +
configVolume.pvname | +pvname to use an already created Persistent Volume , If blank will use the default name | +oud-ds-rs-< fullname >-pv-config | +
configVolume.pvcname | +pvcname to use an already created Persistent Volume Claim , If blank will use default name | +oud-ds-rs-< fullname >-pvc-config | +
configVolume.type | +supported values: either filesystem or networkstorage or custom | +filesystem | +
configVolume.filesystem.hostPath.path | +The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. | +/scratch/shared/oud_user_projects | +
configVolume.networkstorage.nfs.path | +Path of NFS Share location | +/scratch/shared/oud_config | +
configVolume.networkstorage.nfs.server | +IP or hostname of NFS Server | +0.0.0.0 | +
configVolume.custom.* | +Based on values/data, YAML content would be included in PersistenceVolume Object | ++ |
configVolume.accessMode | +Specifies the access mode of the location provided | +ReadWriteMany | +
configVolume.size | +Specifies the size of the storage | +10Gi | +
configVolume.storageClass | +Specifies the storageclass of the persistence volume. | +empty | +
configVolume.annotations | +Specifies any annotations that will be used | +{ } | +
configVolume.storageClassCreate | +If true, it will create the storageclass. if value is false, provide existing storage class (storageClass) to be used. | +true | +
configVolume.provisioner | +If configVolume.storageClassCreate is true, please provide the custom provisioner if any. | +kubernetes.io/is-default-class | +
oudPorts.adminldaps | +Port on which Oracle Unified Directory Instance in the container should listen for Administration Communication over LDAPS Protocol | +1444 | +
oudPorts.adminhttps | +Port on which Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol. | +1888 | +
oudPorts.ldap | +Port on which Oracle Unified Directory Instance in the container should listen for LDAP Communication. | +1389 | +
oudPorts.ldaps | +Port on which Oracle Unified Directory Instance in the container should listen for LDAPS Communication. | +1636 | +
oudPorts.http | +Port on which Oracle Unified Directory Instance in the container should listen for HTTP Communication. | +1080 | +
oudPorts.https | +Port on which Oracle Unified Directory Instance in the container should listen for HTTPS Communication. | +1081 | +
oudPorts.replication | +Port value to be used while setting up replication server. | +1898 | +
oudConfig.baseDN | +BaseDN for Oracle Unified Directory Instances | +dc=example,dc=com | +
oudConfig.rootUserDN | +Root User DN for Oracle Unified Directory Instances | +cn=Directory Manager | +
oudConfig.rootUserPassword | +Password for Root User DN | +RandomAlphanum | +
oudConfig.sampleData | +To specify that the database should be populated with the specified number of sample entries. | +0 | +
oudConfig.sleepBeforeConfig | +Based on the value for this parameter, initialization/configuration of each Oracle Unified Directory replica would be delayed. | +120 | +
oudConfig.adminUID | +AdminUID to be configured with each replicated Oracle Unified Directory instance | +admin | +
oudConfig.adminPassword | +Password for AdminUID. If the value is not passed, value of rootUserPassword would be used as password for AdminUID. | +rootUserPassword | +
baseOUD.envVarsConfigMap | +Reference to ConfigMap which can contain additional environment variables to be passed on to POD for Base Oracle Unified Directory Instance. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData. | +- | +
baseOUD.envVars | +Environment variables in Yaml Map format. This is helpful when its requried to pass environment variables through –values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. For a full list of environment variables, see Appendix B: Environment Variables. | +- | +
replOUD.envVarsConfigMap | +Reference to ConfigMap which can contain additional environment variables to be passed on to PODs for Replicated Oracle Unified Directory Instances. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData, sourceHost, sourceServerPorts, sourceAdminConnectorPort, sourceReplicationPort, dsreplication_1, dsreplication_2, dsreplication_3, dsreplication_4, post_dsreplication_dsconfig_1, post_dsreplication_dsconfig_2 | +- | +
replOUD.envVars | +Environment variables in Yaml Map format. This is helpful when its required to pass environment variables through –values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. For a full list of environment variables, see Appendix B: Environment Variables. | +- | +
podManagementPolicy | +Defines the policy for pod management within the statefulset. Typical values are OrderedReady/Parallel | +OrderedReady | +
updateStrategy | +Allows you to configure and disable automated rolling updates for containers, labels, resource request/limits, and annotations for the Pods in a StatefulSet. Typical values are OnDelete/RollingUpdate | +RollingUpdate | +
busybox.image | +busy box image name. Used for initcontainers | +busybox | +
oudConfig.cleanupbeforeStart | +Used to remove the individual pod directories during restart. Recommended value is false. Note: Do not change the default value (false) as it will delete the existing data and clone it from base pod again. | +false | +
oudConfig.disablereplicationbeforeStop | +This parameter is used to disable replication when a pod is restarted. Recommended value is false. Note Do not change the default value (false), as changing the value will result in an issue where the pod won’t join the replication topology after a restart. | +false | +
oudConfig.resources.requests.memory | +This parameter is used to set the memory request for the OUD pod | +4Gi | +
oudConfig.resources.requests.cpu | +This parameter is used to set the cpu request for the OUD pod | +0.5 | +
oudConfig.resources.limits.memory | +This parameter is used to set the memory limit for the OUD pod | +4Gi | +
oudConfig.resources.limits.cpu | +This parameter is used to set the cpu limit for the OUD pod | +1 | +
replOUD.groupId | +Group ID to be used/configured with each Oracle Unified Directory instance in replicated topology. | +1 | +
service.lbrtype | +Type of load balancer Service to be created for admin, http,ldap services. Values allowed: ClusterIP/NodePort | +ClusterIP | +
oudPorts.nodePorts.adminldaps | +Public port on which the OUD instance in the container should listen for administration communication over LDAPS Protocol. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. | ++ |
oudPorts.nodePorts.adminhttps | +Public port on which the OUD instance in the container should listen for administration communication over HTTPS Protocol. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. | ++ |
oudPorts.nodePorts.ldap | +Public port on which the OUD instance in the container should listen for LDAP communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. | ++ |
oudPorts.nodePorts.ldaps | +Public port on which the OUD instance in the container should listen for LDAPS communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. | ++ |
oudPorts.nodePorts.http | +Public port on which the OUD instance in the container should listen for HTTP communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. | ++ |
oudPorts.nodePorts.https | +Public port on which the OUD instance in the container should listen for HTTPS communication. The port number should be between 30000-32767. No duplicate values are allowed. Note: Set only if service.lbrtype is set as NodePort. If left blank then k8s will assign random ports in between 30000 and 32767. | ++ |
oudConfig.integration | +Specifies which Oracle components the server can be integrated with. It is recommended to choose the option covering your minimal requirements. Allowed values: no-integration (no integration), basic (Directory Integration Platform), generic (Directory Integration Platform, Database Net Services and E-Business Suite integration), eus (Directory Integration Platform, Database Net Services, E-Business Suite and Enterprise User Security integration) | +no-integration | +
elk.logStashImage | +The version of logstash you want to install | +logstash:8.3.1 | +
elk.sslenabled | +If SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase | +TRUE | +
elk.eshosts | +The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used | +https://elasticsearch.example.com:9200 | +
elk.esuser | +The name of the user for logstash to access Elasticsearch | +logstash_internal | +
elk.espassword | +The password for ELK_USER | +password | +
elk.esapikey | +The API key details | +apikey | +
elk.esindex | +The log name | +oudlogs-00001 | +
elk.imagePullSecrets | +secret to be used for pulling logstash image | +dockercred | +
Environment Variable | +Description | +Default Value | +
---|---|---|
ldapPort | +Port on which the Oracle Unified Directory instance in the container should listen for LDAP communication. Use ‘disabled’ if you do not want to enable it. | +1389 | +
ldapsPort | +Port on which the Oracle Unified Directory instance in the container should listen for LDAPS communication. Use ‘disabled’ if you do not want to enable it. | +1636 | +
rootUserDN | +DN for the Oracle Unified Directory instance root user. | +—— | +
rootUserPassword | +Password for the Oracle Unified Directory instance root user. | +—— | +
adminConnectorPort | +Port on which the Oracle Unified Directory instance in the container should listen for administration communication over LDAPS. Use ‘disabled’ if you do not want to enable it. Note that at least one of the LDAP or the HTTP administration ports must be enabled. | +1444 | +
httpAdminConnectorPort | +Port on which the Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol. Use ‘disabled’ if you do not want to enable it. Note that at least one of the LDAP or the HTTP administration ports must be enabled. | +1888 | +
httpPort | +Port on which the Oracle Unified Directory Instance in the container should listen for HTTP Communication. Use ‘disabled’ if you do not want to enable it. | +1080 | +
httpsPort | +Port on which the Oracle Unified Directory Instance in the container should listen for HTTPS Communication. Use ‘disabled’ if you do not want to enable it. | +1081 | +
sampleData | +Specifies the number of sample entries to populate the Oracle Unified Directory instance with on creation. If this parameter has a non-numeric value, the parameter addBaseEntry is added to the command instead of sampleData. Similarly, when the ldifFile_n parameter is specified sampleData will not be considered and ldifFile entries will be populated. | +0 | +
adminUID | +User ID of the Global Administrator to use to bind to the server. This parameter is primarily used with the dsreplication command. | +—— | +
adminPassword | +Password for adminUID | +—— | +
bindDN1 | +BindDN to be used while setting up replication using dsreplication to connect to First Directory/Replication Instance. |
+—— | +
bindPassword1 | +Password for bindDN1 | +—— | +
bindDN2 | +BindDN to be used while setting up replication using dsreplication to connect to Second Directory/Replication Instance. |
+—— | +
bindPassword2 | +Password for bindDN2 | +—— | +
replicationPort | +Port value to be used while setting up a replication server. This variable is used to substitute values in dsreplication parameters. |
+1898 | +
sourceHost | +Value for the hostname to be used while setting up a replication server. This variable is used to substitute values in dsreplication parameters. |
+—— | +
initializeFromHost | +Value for the hostname to be used while initializing data on a new Oracle Unified Directory instance replicated from an existing instance. This variable is used to substitute values in dsreplication parameters. It is possible to have a different value for sourceHost and initializeFromHost while setting up replication with Replication Server, sourceHost can be used for the Replication Server and initializeFromHost can be used for an existing Directory instance from which data will be initialized. |
+$sourceHost | +
serverTuning | +Values to be used to tune JVM settings. The default value is jvm-default. If specific tuning parameters are required, they can be added using this variable. | +jvm-default | +
offlineToolsTuning | +Values to be used to specify the tuning for offline tools. This variable if not specified will consider jvm-default as the default or specify the complete set of values with options if wanted to set to specific tuning | +jvm-default | +
generateSelfSignedCertificate | +Set to “true” if the requirement is to generate a self signed certificate when creating an Oracle Unified Directory instance. If no value is provided this value takes the default, “true”. If using a certificate generated separately this value should be set to “false”. | +true | +
usePkcs11Keystore | +Use a certificate in a PKCS#11 token that the replication gateway will use as servercertificate when accepting encrypted connections from the Oracle Directory Server Enterprise Edition server. Set to “true” if the requirement is to use the usePkcs11Keystore parameter when creating an Oracle Unified Directory instance. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to “false”. | +—— | +
enableStartTLS | +Enable StartTLS to allow secure communication with the directory server by using the LDAP port. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to “false”. | +—— | +
useJCEKS | +Specifies the path of a JCEKS that contains a certificate that the replication gateway will use as server certificate when accepting encrypted connections from the Oracle Directory Server Enterprise Edition server. If required this should specify the keyStorePath, for example, /u01/oracle/config/keystore . |
+—— | +
useJavaKeystore | +Specify the path to the Java Keystore (JKS) that contains the server certificate. If required this should specify the path to the JKS, for example, /u01/oracle/config/keystore . By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to “false”. |
+—— | +
usePkcs12keyStore | +Specify the path to the PKCS#12 keystore that contains the server certificate. If required this should specify the path, for example, /u01/oracle/config/keystore.p12 . By default this parameter is not set. |
+—— | +
keyStorePasswordFile | +Use the password in the specified file to access the certificate keystore. A password is required when you specify an existing certificate (JKS, JCEKS, PKCS#11, orPKCS#12) as a server certificate. If required this should specify the path of the password file, for example, /u01/oracle/config/keystorepassword.txt . By default this parameter is not set. |
+—— | +
eusPasswordScheme | +Set password storage scheme, if configuring Oracle Unified Directory for Enterprise User Security. Set this to a value of either “sha1” or “sha2”. By default this parameter is not set. | +—— | +
jmxPort | +Port on which the Directory Server should listen for JMX communication. Use ‘disabled’ if you do not want to enable it. | +disabled | +
javaSecurityFile | +Specify the path to the Java security file. If required this should specify the path, for example, /u01/oracle/config/new_security_file . By default this parameter is not set. |
+—— | +
schemaConfigFile_n | +‘n’ in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for schema configuration/extension. If required this should specify the path, for example, schemaConfigFile_1=/u01/oracle/config/00_test.ldif . |
+—— | +
ldifFile_n | +‘n’ in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for initial data population. If required this should specify the path, for example, ldifFile_1=/u01/oracle/config/test1.ldif . |
+—— | +
dsconfigBatchFile_n | +‘n’ in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for batch processing by the dsconfig command. If required this should specify the path, for example, dsconfigBatchFile_1=/u01/oracle/config/dsconfig_1.txt . When executing the dsconfig command the following values are added implicitly to the arguments contained in the batch file : ${hostname}, ${adminConnectorPort}, ${bindDN} and ${bindPasswordFile} |
+—— | +
dstune_n | +‘n’ in the variable name represents a numeric value between 1 and 50. Allows commands and options to be passed to the dstune utility as a full command. |
+—— | +
dsconfig_n | +‘n’ in the variable name represents a numeric value between 1 and 300. Each file represents a set of execution parameters for the dsconfig command. For each dsconfig execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. |
+—— | +
dsreplication_n | +‘n’ in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the dsreplication command. For each dsreplication execution, the following variables are added implicitly : ${hostname}, ${ldapPort}, ${ldapsPort}, ${adminConnectorPort}, ${replicationPort}, ${sourceHost}, ${initializeFromHost}, and ${baseDN}. Depending on the dsreplication sub-command, the following variables are added implicitly : ${bindDN1}, ${bindPasswordFile1}, ${bindDN2}, ${bindPasswordFile2}, ${adminUID}, and ${adminPasswordFile}. |
+—— | +
post_dsreplication_dsconfig_n | +‘n’ in the variable name represents a numeric value between 1 and 300. Each file represents a set of execution parameters for the dsconfig command to be run following execution of the dsreplication command. For each dsconfig execution, the following variables/values are added implicitly : –provider-name “Multimaster Synchronization”, ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. |
+—— | +
rebuildIndex_n | +‘n’ in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the rebuild-index command. For each rebuild-index execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}, and ${baseDN}. |
+—— | +
manageSuffix_n | +‘n’ in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the manage-suffix command. For each manage-suffix execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. |
+—— | +
importLdif_n | +‘n’ in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the import-ldif command. For each import-ldif execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. |
+—— | +
execCmd_n | +‘n’ in the variable name represents a numeric value between 1 and 300. Each file represents a command to be executed in the container. For each command execution, the following variables are replaced, if present in the command : ${hostname}, ${ldapPort}, ${ldapsPort}, ${adminConnectorPort}. | +—— | +
restartAfterRebuildIndex | +Specifies whether to restart the server after building the index. | +false | +
restartAfterSchemaConfig | +Specifies whether to restart the server after configuring the schema. | +false | +
Note For the following parameters above, the following statement applies:
+If values are provided the following variables will be substituted with their values: ${hostname},${ldapPort},${ldapsPort},${adminConnectorPort},${replicationPort},${sourceHost},${initializeFromHost},${sourceAdminConnectorPort},${sourceReplicationPort},${baseDN},${rootUserDN},${adminUID},${rootPwdFile},${bindPasswordFile},${adminPwdFile},${bindPwdFile1},${bindPwdFile2}
+ + + + + + + + +Oracle supports the deployment of Oracle Unified Directory on Kubernetes. See the following sections:
+ + + + + + + +Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management. +Oracle Unified Directory is an all-in-one directory solution with storage, proxy, synchronization and virtualization capabilities. While unifying the approach, it provides all the services required for high-performance Enterprise and carrier-grade environments. Oracle Unified Directory ensures scalability to billions of entries, ease of installation, elastic deployments, enterprise manageability and effective monitoring.
+This project supports deployment of Oracle Unified Directory (OUD) container images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The OUD container image refers to binaries for OUD Release 12.2.1.4.0 and it has the capability to create different types of OUD Instances (Directory Service, Proxy, Replication) in containers.
+This project has several key features to assist you with deploying and managing Oracle Unified Directory in a Kubernetes environment. You can:
+The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 23.4.1.
+See the Release Notes for recent changes and known issues for Oracle Unified Directory deployment on Kubernetes.
+This documentation explains how to configure OUD on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.
+If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. +Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OUD and no other Oracle Identity Management products.
+Note: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Unified Directory deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:
+To view documentation for an earlier release, see:
+ + + + + + + + + +Kubernetes Horizontal Pod Autoscaler (HPA) allows automatic scaling (up and down) of the OUD servers. If load increases then extra OUD servers will be started as required. Similarly, if load decreases, OUD servers will be automatically shutdown.
+For more information on HPA, see Horizontal Pod Autoscaling.
+The instructions below show you how to configure and run an HPA to scale OUD servers, based on CPU utilization or memory resource metrics.
+Note: If you enable HPA and then decide you want to start/stop/scale OUD servers manually as per Scaling Up/Down OUD Pods, it is recommended to delete HPA beforehand as per Delete the HPA.
+In order to use HPA, OUD must have been created with the required resources
parameter as per Create OUD instances. For example:
oudConfig:
+ # memory, cpu parameters for both requests and limits for oud instances
+ resources:
+ limits:
+ cpu: "1"
+ memory: "8Gi"
+ requests:
+ cpu: "500m"
+ memory: "4Gi"
+
If you created the OUD servers at any point since July 22 (22.3.1) then these values are the defaults. You can check using the following command:
+$ helm show values oud-ds-rs -n oudns
+
The output will look similar to the following:
+...
+# memory, cpu parameters for both requests and limits for oud instances
+ resources:
+ requests:
+ memory: "4Gi"
+ cpu: "500m"
+ limits:
+ memory: "8Gi"
+ cpu: "2"
+ ...
+
Before deploying HPA you must deploy the Kubernetes Metrics Server.
+Check to see if the Kubernetes Metrics Server is already deployed:
+$ kubectl get pods -n kube-system | grep metric
+
If a row is returned as follows, then Kubernetes Metric Server is deployed and you can move to Deploy HPA.
+metrics-server-d9694457-mf69d 1/1 Running 0 5m13s
+
If no rows are returned by the previous command, then the Kubernetes Metric Server needs to be deployed. Run the following commands to get the components.yaml
:
$ mkdir $WORKDIR/kubernetes/hpa
+$ cd $WORKDIR/kubernetes/hpa
+$ wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
+
Deploy the Kubernetes Metrics Server by running the following command:
+$ kubectl apply -f components.yaml
+
The output will look similar to the following:
+serviceaccount/metrics-server created
+clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
+clusterrole.rbac.authorization.k8s.io/system:metrics-server created
+rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
+clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
+clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
+service/metrics-server created
+deployment.apps/metrics-server created
+apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
+
Run the following command to check Kubernetes Metric Server is running:
+$ kubectl get pods -n kube-system | grep metric
+
Make sure the pod has a READY
status of 1/1
:
metrics-server-d9694457-mf69d 1/1 Running 0 39s
+
If the Kubernetes Metric Server does not reach the READY 1/1
state, run the following commands:
$ kubectl describe pod <metrics-server-pod> -n kube-system
+$ kubectl logs <metrics-server-pod> -n kube-system
+
If you see errors such as:
+Readiness probe failed: HTTP probe failed with statuscode: 500
+
and:
+E0907 13:07:50.937308 1 scraper.go:140] "Failed to scrape node" err="Get \"https://X.X.X.X:10250/metrics/resource\": x509: cannot validate certificate for 100.105.18.113 because it doesn't contain any IP SANs" node="worker-node1"
+
then you may need to install a valid cluster certificate for your Kubernetes cluster.
+For testing purposes, you can resolve this issue by:
+Delete the Kubernetes Metrics Server by running the following command:
+$ kubectl delete -f $WORKDIR/kubernetes/hpa/components.yaml
+
Edit the $WORKDIR/hpa/components.yaml
and locate the args:
section. Add kubelet-insecure-tls
to the arguments. For example:
spec:
+ containers:
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --kubelet-insecure-tls
+ - --metric-resolution=15s
+ image: registry.k8s.io/metrics-server/metrics-server:v0.6.4
+ ...
+
Deploy the Kubenetes Metrics Server using the command:
+$ kubectl apply -f components.yaml
+
Run the following and make sure the READY status shows 1/1
:
$ kubectl get pods -n kube-system | grep metric
+
The output should look similar to the following:
+metrics-server-d9694457-mf69d 1/1 Running 0 40s
+
The steps below show how to configure and run an HPA to scale OUD, based on the CPU or memory utilization resource metrics.
+Assuming the example OUD configuration in Create OUD instances, three OUD servers are started by default (oud-ds-rs-0
, oud-ds-rs-1
, oud-ds-rs-2
).
In the following example an HPA resource is created, targeted at the statefulset oud-ds-rs
. This resource will autoscale OUD servers from a minimum of 3 OUD servers up to 5 OUD servers. Scaling up will occur when the average CPU is consistently over 70%. Scaling down will occur when the average CPU is consistently below 70%.
Navigate to the $WORKDIR/kubernetes/hpa
and create an autoscalehpa.yaml
file that contains the following.
#
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: oud-sts-hpa
+ namespace: oudns
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: StatefulSet
+ name: oud-ds-rs #statefulset name of oud
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 60
+ scaleUp:
+ stabilizationWindowSeconds: 60
+ minReplicas: 3
+ maxReplicas: 5
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 70
+
Note : minReplicas
should match the number of OUD servers started by default. Set maxReplicas
to the maximum amount of OUD servers that can be started.
Note: To find the statefulset name, in this example oud-ds-rs
, run “kubectl get statefulset -n oudns
”.
Note: For setting HPA based on Memory Metrics, update the metrics block with the following content. Please note we recommend using only CPU or Memory, not both.
+metrics:
+- type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: 70
+
Run the following command to create the autoscaler:
+$ kubectl apply -f autoscalehpa.yaml
+
The output will look similar to the following:
+horizontalpodautoscaler.autoscaling/oud-sts-hpa created
+
Verify the status of the autoscaler by running the following:
+$ kubectl get hpa -n oudns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+oud-sts-hpa StatefulSet/oud-ds-rs 5%/70% 3 5 3 33s
+
In the example above, this shows that CPU is currently running at 5% for the oud-sts-hpa
.
Check the current status of the OUD servers:
+$ kubectl get pods -n oudns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+oud-ds-rs-0 1/1 Running 0 5h15m
+oud-ds-rs-1 1/1 Running 0 5h9m
+oud-ds-rs-2 1/1 Running 0 5h2m
+oud-pod-cron-job-28242120-bwtcz 0/1 Completed 0 61m
+oud-pod-cron-job-28242150-qf8fg 0/1 Completed 0 31m
+oud-pod-cron-job-28242180-q69lm 0/1 Completed 0 92s
+
In the above oud-ds-rs-0
, oud-ds-rs-0
, oud-ds-rs-2
are running.
To test HPA can scale up the OUD servers, run the following commands:
+$ kubectl exec --stdin --tty oud-ds-rs-0 -n oudns -- /bin/bash
+
This will take you inside a bash shell inside the oud-ds-rs-0
pod:
[oracle@oud-ds-rs-0 oracle]$
+
Inside the bash shell, run the following command to increase the load on the CPU:
+[oracle@oud-ds-rs-0 oracle]$ dd if=/dev/zero of=/dev/null
+
This command will continue to run in the foreground.
+Repeat the step above for the oud-ds-rs-1 pod:
+$ kubectl exec --stdin --tty oud-ds-rs-1 -n oudns -- /bin/bash
+[oracle@oud-ds-rs-1 oracle]$
+[oracle@oud-ds-rs-1 oracle]$ dd if=/dev/zero of=/dev/null
+
In a command window outside the bash shells, run the following command to view the current CPU usage:
+$ kubectl get hpa -n oudns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+oud-sts-hpa StatefulSet/oud-ds-rs 125%/70% 3 5 3 5m15s
+
In the above example the CPU has increased to 125%. As this is above the 70% limit, the autoscaler increases the replicas by starting additional OUD servers.
+Run the following to see if any more OUD servers are started:
+$ kubectl get pods -n oudns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+oud-ds-rs-0 1/1 Running 0 5h50m
+oud-ds-rs-1 1/1 Running 0 5h44m
+oud-ds-rs-2 1/1 Running 0 5h37m
+oud-ds-rs-3 1/1 Running 0 9m29s
+oud-ds-rs-4 1/1 Running 0 5m17s
+oud-pod-cron-job-28242150-qf8fg 0/1 Completed 0 66m
+oud-pod-cron-job-28242180-q69lm 0/1 Completed 0 36m
+oud-pod-cron-job-28242210-kn7sv 0/1 Completed 0 6m28s
+
In the example above one more OUD server has started (oud-ds-rs-4
).
Note: It may take some time for the server to appear and start. Once the server is at READY
status of 1/1
, the server is started.
To stop the load on the CPU, in both bash shells, issue a Control C, and then exit the bash shell:
+[oracle@oud-ds-rs-0 oracle]$ dd if=/dev/zero of=/dev/null
+^C
+[oracle@oud-ds-rs-0 oracle]$ exit
+
Run the following command to view the current CPU usage:
+$ kubectl get hpa -n oudns
+
The output will look similar to the following:
+NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
+oud-sts-hpa StatefulSet/oud-ds-rs 4%/70% 3 5 5 40m
+
In the above example CPU has dropped to 4%. As this is below the 70% threshold, you should see the autoscaler scale down the servers:
+$ kubectl get pods -n oudns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+oud-ds-rs-0 1/1 Running 0 5h54m
+oud-ds-rs-1 1/1 Running 0 5h48m
+oud-ds-rs-2 1/1 Running 0 5h41m
+oud-ds-rs-3 1/1 Running 0 13m
+oud-ds-rs-4 1/1 Terminating 0 8m27s
+oud-pod-cron-job-28242150-qf8fg 0/1 Completed 0 70m
+oud-pod-cron-job-28242180-q69lm 0/1 Completed 0 40m
+oud-pod-cron-job-28242210-kn7sv 0/1 Completed 0 10m
+
Eventually, the extra server will disappear:
+NAME READY STATUS RESTARTS AGE
+oud-ds-rs-0 1/1 Running 0 5h57m
+oud-ds-rs-1 1/1 Running 0 5h51m
+oud-ds-rs-2 1/1 Running 0 5h44m
+oud-ds-rs-3 1/1 Running 0 16m
+oud-pod-cron-job-28242150-qf8fg 0/1 Completed 0 73m
+oud-pod-cron-job-28242180-q69lm 0/1 Completed 0 43m
+oud-pod-cron-job-28242210-kn7sv 0/1 Completed 0 13m
+
If you need to delete the HPA, you can do so by running the following command:
+$ cd $WORKDIR/kubernetes/hpa
+$ kubectl delete -f autoscalehpa.yaml
+
Important considerations for Oracle Unified Directory instances in Kubernetes.
+ + + + + + + +Describes the steps for scaling up/down for OUD pods.
+ + + + + + + + + + + + +Describes the steps for logging and visualization with Elasticsearch and Kibana.
+ + + + + + + + + + + + +Describes the steps for Monitoring the Oracle Unified Directory environment.
+ + + + + + + + + + + + +Describes the steps for implementing the Horizontal Pod Autoscaler.
+ + + + + + + + +This section describes how to install and configure logging and visualization for the oud-ds-rs Helm chart deployment.
+The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications.
+If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +Installing Elasticsearch (ELK) Stack and Kibana
+In order to create the logstash pod, you must create a yaml file. This file contains variables which you must substitute with variables applicable to your ELK environment.
+Most of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.
+The table below outlines the variables and values you must set:
+Variable | +Sample Value | +Description | +
---|---|---|
<ELK_VER> |
+8.3.1 |
+The version of logstash you want to install. | +
<ELK_SSL> |
+true |
+If SSL is enabled for ELK set the value to true , or if NON-SSL set to false . This value must be lowercase. |
+
<ELK_HOSTS> |
+https://elasticsearch.example.com:9200 |
+The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used. | +
<ELK_USER> |
+logstash_internal |
+The name of the user for logstash to access Elasticsearch. | +
<ELK_PASSWORD> |
+password |
+The password for ELK_USER. | +
<ELK_APIKEY> |
+apikey |
+The API key details. | +
You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt
.
Create a Kubernetes secret for Elasticsearch using the API Key or Password.
+a) If ELK uses an API Key for authentication:
+$ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_APIKEY>
+
For example:
+$ kubectl create secret generic elasticsearch-pw-elastic -n oudns --from-literal password=<ELK_APIKEY>
+
The output will look similar to the following:
+secret/elasticsearch-pw-elastic created
+
b) If ELK uses a password for authentication:
+$ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_PASSWORD>
+
For example:
+$ kubectl create secret generic elasticsearch-pw-elastic -n oudns --from-literal password=<ELK_PASSWORD>
+
The output will look similar to the following:
+secret/elasticsearch-pw-elastic created
+
Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.
+Check that the dockercred
secret that was created previously in Create a Kubernetes secret for cronjob images exists:
$ kubectl get secret -n <domain_namespace> | grep dockercred
+
For example,
+$ kubectl get secret -n oudns | grep dockercred
+
The output will look similar to the following:
+dockercred kubernetes.io/dockerconfigjson 1 149m
+
If the secret does not exist, create it as per Create a Kubernetes secret for cronjob images.
+Navigate to the $WORKDIR/kubernetes/helm
directory and create a logging-override-values.yaml
file as follows:
elk:
+ imagePullSecrets:
+ - name: dockercred
+ IntegrationEnabled: true
+ logStashImage: logstash:<ELK_VER>
+ logstashConfigMap: false
+ esindex: oudlogs-00001
+ sslenabled: <ELK_SSL>
+ eshosts: <ELK_HOSTS>
+ # Note: We need to provide either esuser,espassword or esapikey
+ esuser: <ELK_USER>
+ espassword: elasticsearch-pw-elastic
+ esapikey: elasticsearch-pw-elastic
+
<ELK_VER>
, <ELK_SSL>
, <ELK_HOSTS>
, and <ELK_USER>
to match the values for your environment.elk.crt
in $WORKDIR/kubernetes/helm/oud-ds-rs/certs/
with the elk.crt
for your ElasticSearch server.esuser:
and espassword:
with no value.esapi_key:
but delete elasticsearch-pw-elastic
.esuser
, espassword
, and esapi_key
with no value assigned.For example:
+elk:
+ imagePullSecrets:
+ - name: dockercred
+ IntegrationEnabled: true
+ logStashImage: logstash:8.3.1
+ logstashConfigMap: false
+ esindex: oudlogs-00001
+ sslenabled: true
+ eshosts: https://elasticsearch.example.com:9200
+ # Note: We need to provide either esuser,espassword or esapikey
+ esuser: logstash_internal
+ espassword: elasticsearch-pw-elastic
+ esapikey:
+
Run the following command to upgrade the OUD deployment with the ELK configuration:
+$ helm upgrade --namespace <namespace> --values <valuesfile.yaml> <releasename> oud-ds-rs --reuse-values
+
For example:
+$ helm upgrade --namespace oudns --values logging-override-values.yaml oud-ds-rs oud-ds-rs --reuse-values
+
The output should look similar to the following:
+Release "oud-ds-rs" has been upgraded. Happy Helming!
+NAME: oud-ds-rs
+LAST DEPLOYED: <DATE>
+NAMESPACE: oudns
+STATUS: deployed
+REVISION: 2
+NOTES:
+#
+# Copyright (c) 2020, 2022, Oracle and/or its affiliates.
+#
+# Licensed under the Universal Permissive License v 1.0 as shown at
+# https://oss.oracle.com/licenses/upl
+#
+#
+Since "nginx" has been chosen, follow the steps below to configure nginx ingress controller.
+Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation.
+command-# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+
+Command helm install to install nginx-ingress related objects like pod, service, deployment, etc.
+# helm install --namespace <namespace for ingress> --values nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx
+
+For details of content of nginx-ingress-values-override.yaml refer README.md file of this chart.
+
+Run these commands to check port mapping and services:
+# kubectl --namespace <namespace for ingress> get services -o wide -w lbr-nginx-ingress-controller
+# kubectl describe --namespace <namespace for oud-ds-rs chart> ingress.extensions/oud-ds-rs-http-ingress-nginx
+# kubectl describe --namespace <namespace for oud-ds-rs chart> ingress.extensions/oud-ds-rs-admin-ingress-nginx
+
+Accessible interfaces through ingress:
+ (External IP Address for LoadBalancer NGINX Controller can be determined through details associated with lbr-nginx-ingress-controller)
+
+1. OUD Admin REST:
+ Port: http/https
+
+2. OUD Data REST:
+ Port: http/https
+
+3. OUD Data SCIM:
+ Port: http/https
+
+4. OUD LDAP/LDAPS:
+ Port: ldap/ldaps
+
+5. OUD Admin LDAPS:
+ Port: ldaps
+
+Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters.
+
+
+Accessible interfaces through ingress:
+
+1. OUD Admin REST:
+ Port: http/https
+
+2. OUD Data REST:
+ Port: http/https
+
+3. OUD Data SCIM:
+ Port: http/https
+
+Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters.
+
Run the following command to check the logstash
pod is created correctly:
$ kubectl get pods -n <namespace>
+
For example:
+$ kubectl get pods -n oudns
+
The output should look similar to the following:
+NAME READY STATUS RESTARTS AGE
+oud-ds-rs-0 1/1 Running 0 150m
+oud-ds-rs-1 1/1 Running 0 143m
+oud-ds-rs-2 1/1 Running 0 137m
+oud-ds-rs-logstash-5dc8d94597-knk8g 1/1 Running 0 2m12s
+oud-pod-cron-job-27758370-wpfq7 0/1 Completed 0 66m
+oud-pod-cron-job-27758400-kd6pn 0/1 Completed 0 36m
+oud-pod-cron-job-27758430-ndmgj 0/1 Completed 0 6m33s
+
Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:
+$ kubectl logs -f oud-ds-rs-logstash-<pod> -n oudns
+
Most errors occur due to misconfiguration of the logging-override-values.yaml
. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.
If the pod has errors, view the helm history to find the last working revision, for example:
+$ helm history oud-ds-rs -n oudns
+
The output will look similar to the following:
+REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
+1 Tue Jan 10 14:06:01 2023 superseded oud-ds-rs-0.2 12.2.1.4.0 Install complete
+2 Tue Jan 10 16:34:21 2023 deployed oud-ds-rs-0.2 12.2.1.4.0 Upgrade complete
+
Rollback to the previous working revision by running:
+$ helm rollback <release> <revision> -n <domain_namespace>
+
For example:
+helm rollback oud-ds-rs 1 -n oudns
+
Once you have resolved the issue in the yaml files, run the helm upgrade
command outlined earlier to recreate the logstash pod.
To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.
+For Kibana 7.7.x and below:
+Access the Kibana console with http://<hostname>:<port>/app/kibana
and login with your username and password.
From the Navigation menu, navigate to Management > Kibana > Index Patterns.
+In the Create Index Pattern page enter oudlogs*
for the Index pattern and click Next Step.
In the Configure settings page, from the Time Filter field name drop down menu select @timestamp
and click Create index pattern.
Once the index pattern is created click on Discover in the navigation menu to view the OIG logs.
+For Kibana version 7.8.X and above:
+Access the Kibana console with http://<hostname>:<port>/app/kibana
and login with your username and password.
From the Navigation menu, navigate to Management > Stack Management.
+Click Data Views in the Kibana section.
+Click Create Data View and enter the following information:
+oudlogs*
@timestamp
Click Create Data View.
+From the Navigation menu, click Discover to view the log file entries.
+From the drop down menu, select oudlogs*
to view the log file entries.
After the Oracle Unified Directory instance (OUD) is set up you can monitor it using Prometheus and Grafana.
+Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:
+$ kubectl create namespace <namespace>
+
For example:
+$ kubectl create namespace monitoring
+
The output will look similar to the following:
+namespace/monitoring created
+
Add the Prometheus and Grafana Helm repositories by issuing the following command:
+$ helm repo add prometheus https://prometheus-community.github.io/helm-charts
+
The output will look similar to the following:
+"prometheus" has been added to your repositories
+
Run the following command to update the repositories:
+$ helm repo update
+
The output will look similar to the following:
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "stable" chart repository
+...Successfully got an update from the "prometheus" chart repository
+...Successfully got an update from the "prometheus-community" chart repository
+
+Update Complete. Happy Helming!
+
Install the Prometheus operator using the helm
command:
$ helm install <release_name> prometheus/kube-prometheus-stack -n <namespace>
+
For example:
+$ helm install monitoring prometheus/kube-prometheus-stack -n monitoring
+
The output should look similar to the following:
+NAME: monitoring
+LAST DEPLOYED: <DATE>
+NAMESPACE: monitoring
+STATUS: deployed
+REVISION: 1
+NOTES:
+kube-prometheus-stack has been installed. Check its status by running:
+ kubectl --namespace monitoring get pods -l "release=monitoring"
+
+Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
+
Note: If your cluster does not have access to the internet to pull external images, such as prometheus or grafana, you must load the images in a local container registry. You must then install as follows:
+helm install --set grafana.image.repository==container-registry.example.com/grafana --set grafana.image.tag=8.4.2 monitoring prometheus/kube-prometheus-stack -n monitoring
+
View the objects created for Prometheus and Grafana by issuing the following command:
+$ kubectl get all,service,pod -o wide -n <namespace>
+
For example:
+$ kubectl get all,service,pod -o wide -n monitoring
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 36s 10.244.1.78 <worker-node> <none> <none>
+pod/monitoring-grafana-578f79599c-qc9gd 3/3 Running 0 47s 10.244.2.200 <worker-node> <none> <none>
+pod/monitoring-kube-prometheus-operator-65cdf7995-kndgg 1/1 Running 0 47s 10.244.2.199 <worker-node> <none> <none>
+pod/monitoring-kube-state-metrics-56bfd4f44f-85l4p 1/1 Running 0 47s 10.244.1.76 <worker-node> <none> <none>
+pod/monitoring-prometheus-node-exporter-g2x9g 1/1 Running 0 47s 100.102.48.121 <master-node> <none> <none>
+pod/monitoring-prometheus-node-exporter-p9kkq 1/1 Running 0 47s 100.102.48.84 <worker-node> <none> <none>
+pod/monitoring-prometheus-node-exporter-rzhrd 1/1 Running 0 47s 100.102.48.28 <worker-node> <none> <none>
+pod/prometheus-monitoring-kube-prometheus-prometheus-0 2/2 Running 0 35s 10.244.1.79 <worker-node> <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/alertmanager-operated ClusterIP None <none> 9093/TCP,9094/TCP,9094/UDP 36s app.kubernetes.io/name=alertmanager
+service/monitoring-grafana ClusterIP 10.110.193.30 <none> 80/TCP 47s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana
+service/monitoring-kube-prometheus-alertmanager ClusterIP 10.104.2.37 <none> 9093/TCP 47s alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager
+service/monitoring-kube-prometheus-operator ClusterIP 10.99.162.229 <none> 443/TCP 47s app=kube-prometheus-stack-operator,release=monitoring
+service/monitoring-kube-prometheus-prometheus ClusterIP 10.108.161.46 <none> 9090/TCP 47s app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus
+service/monitoring-kube-state-metrics ClusterIP 10.111.162.185 <none> 8080/TCP 47s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics
+service/monitoring-prometheus-node-exporter ClusterIP 10.109.21.136 <none> 9100/TCP 47s app=prometheus-node-exporter,release=monitoring
+service/prometheus-operated ClusterIP None <none> 9090/TCP 35s app.kubernetes.io/name=prometheus
+
+NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
+daemonset.apps/monitoring-prometheus-node-exporter 3 3 3 3 3 <none> 47s node-exporter quay.io/prometheus/node-exporter:v1.3.1 app=prometheus-node-exporter,release=monitoring
+
+NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
+deployment.apps/monitoring-grafana 1/1 1 1 47s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana
+deployment.apps/monitoring-kube-prometheus-operator 1/1 1 1 47s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,release=monitoring
+deployment.apps/monitoring-kube-state-metrics 1/1 1 1 47s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics
+
+NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
+replicaset.apps/monitoring-grafana-578f79599c 1 1 1 47s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c
+replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995 1 1 1 47s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring
+replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f 1 1 1 47s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f
+
+NAME READY AGE CONTAINERS IMAGES
+statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager 1/1 36s alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0
+statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 35s prometheus,config-reloader quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0
+
Edit the grafana
service to add the NodePort:
$ kubectl edit service/<deployment_name>-grafana -n <namespace>
+
For example:
+$ kubectl edit service/monitoring-grafana -n monitoring
+
Note: This opens an edit session for the domain where parameters can be changed using standard vi
commands.
Change the ports entry and add nodePort: 30091
and type: NodePort
:
ports:
+ - name: http-web
+ nodePort: 30091
+ port: 80
+ protocol: TCP
+ targetPort: 3000
+ selector:
+ app.kubernetes.io/instance: monitoring
+ app.kubernetes.io/name: grafana
+ sessionAffinity: None
+ type: NodePort
+
Save the file and exit (:wq)
.
Access the Grafana GUI using http://<HostIP>:<nodeport>
and login with admin/prom-operator
. Change the password when prompted.
Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.
+Import the Grafana dashboard by navigating on the left hand menu to Dashboards > Import. Click Upload JSON file and select the json downloaded file. In the Prometheus
drop down box select Prometheus
. Click Import. The dashboard should be displayed.
Verify your installation by viewing some of the customized dashboard views.
+This section describes how to increase or decrease the number of OUD pods in the Kubernetes deployment.
+Note: The instructions below are for scaling servers up or down manually. If you wish to use autoscaling, see Kubernetes Horizontal Pod Autoscaler. Please note, if you have enabled autoscaling, it is recommended to delete the autoscaler before running the commands below.
+By default the oud-ds-rs
helm chart deployment starts three pods: oud-ds-rs-0
and two replica pods oud-ds-rs-1
and oud-ds-rs-2
.
The number of pods started is determined by the replicaCount
, which is set to 3
by default. A value of 3
starts the three pods above.
To scale up or down the number of OUD pods, set replicaCount
accordingly.
Run the following command to view the number of pods in the OUD deployment:
+$ kubectl --namespace <namespace> get pods -o wide
+
For example:
+$ kubectl --namespace oudns get pods -o wide
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oud-ds-rs-0 1/1 Running 0 22h 10.244.0.195 <Worker Node> <none> <none>
+pod/oud-ds-rs-1 1/1 Running 0 22h 10.244.0.194 <Worker Node> <none> <none>
+pod/oud-ds-rs-2 1/1 Running 0 22h 10.244.0.193 <Worker Node> <none> <none>
+
In this example, replicaCount
is increased to 4
which creates a new OUD pod oud-ds-rs-3
with associated services created.
You can scale up the number of OUD pods using one of the following methods:
+ +Navigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Create a oud-scaleup-override.yaml
file that contains:
replicaCount: 4
+
Run the following command to scale up the OUD pods:
+$ helm upgrade --namespace <namespace> \
+--values oud-scaleup-override.yaml \
+<release_name> oud-ds-rs --reuse-values
+
For example:
+$ helm upgrade --namespace oudns \
+--values oud-scaleup-override.yaml \
+oud-ds-rs oud-ds-rs --reuse-values
+
--set
argumentRun the following command to scale up the OUD pods:
+$ helm upgrade --namespace <namespace> \
+--set replicaCount=4 \
+<release_name> oud-ds-rs --reuse-values
+
For example:
+$ helm upgrade --namespace oudns \
+--set replicaCount=4 \
+oud-ds-rs oud-ds-rs --reuse-values
+
Verify the new OUD pod oud-ds-rs-3
and has started:
$ kubectl get pod,service -o wide -n <namespace>
+
For example:
+$ kubectl get pods,service -n oudns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oud-ds-rs-0 1/1 Running 0 22h 10.244.0.195 <Worker Node> <none> <none>
+pod/oud-ds-rs-1 1/1 Running 0 22h 10.244.0.194 <Worker Node> <none> <none>
+pod/oud-ds-rs-2 1/1 Running 0 22h 10.244.0.193 <Worker Node> <none> <none>
+pod/oud-ds-rs-3 1/1 Running 0 17m 10.244.0.193 <Worker Node> <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/oud-ds-rs ClusterIP None <none> 1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-0 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-1 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-2 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-3 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 9m9s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3
+service/oud-ds-rs-http-0 ClusterIP 10.104.112.93 <none> 1080/TCP,1081/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-http-1 ClusterIP 10.103.105.70 <none> 1080/TCP,1081/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-http-2 ClusterIP 10.110.160.107 <none> 1080/TCP,1081/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-http-3 ClusterIP 10.102.93.179 <none> 1080/TCP,1081/TCP 9m9s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3
+service/oud-ds-rs-lbr-admin ClusterIP 10.99.238.222 <none> 1888/TCP,1444/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-http ClusterIP 10.101.250.196 <none> 1080/TCP,1081/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-ldap ClusterIP 10.104.149.90 <none> 1389/TCP,1636/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-ldap-0 ClusterIP 10.109.255.221 <none> 1389/TCP,1636/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-ldap-1 ClusterIP 10.111.135.142 <none> 1389/TCP,1636/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-ldap-2 ClusterIP 10.100.8.145 <none> 1389/TCP,1636/TCP 22h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-ldap-3 ClusterIP 10.111.177.46 <none> 1389/TCP,1636/TCP 9m9s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-3
+
Note: It will take several minutes before all the services listed above show. While the oud-ds-rs-3
pod has a STATUS
of 0/1
the pod is started but the OUD server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:
$ kubectl logs oud-ds-rs-3 -n oudns
+
Scaling down OUD pods is performed in exactly the same as in Scaling up OUD pods except the replicaCount
is reduced to the required number of pods.
Once the kubectl command is executed the pod(s) will move to a Terminating
state. In the example below replicaCount
was reduced from 4
to 3
and hence oud-ds-rs-3
has moved to Terminating
:
$ kubectl get pods -n oudns
+
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oud-ds-rs-0 1/1 Running 0 22h 10.244.0.195 <Worker Node> <none> <none>
+pod/oud-ds-rs-1 1/1 Running 0 22h 10.244.0.194 <Worker Node> <none> <none>
+pod/oud-ds-rs-2 1/1 Running 0 22h 10.244.0.193 <Worker Node> <none> <none>
+pod/oud-ds-rs-3 1/1 Terminating 0 21m 10.244.0.193 <Worker Node> <none> <none>
+
The pod will take a minute or two to stop and then will disappear:
+$ kubectl get pods -n oudns
+
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oud-ds-rs-0 1/1 Running 0 22h 10.244.0.195 <Worker Node> <none> <none>
+pod/oud-ds-rs-1 1/1 Running 0 22h 10.244.0.194 <Worker Node> <none> <none>
+pod/oud-ds-rs-2 1/1 Running 0 22h 10.244.0.193 <Worker Node> <none> <none>
+
In this section you learn how to upgrade OUD from a previous version. Follow the section relevant to the version you are upgrading from.
+Note: If on July 22 (22.3.1) or later, and have Kubernetes Horizontal Pod Autoscaler (HPA) enabled, you must disable HPA before performing the steps in the relevant upgrade section. See Delete the HPA.
+The instructions below are for upgrading from April 23 (23.2.1) or later to October 23 (23.4.1).
+Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.
+Navigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Create a oud-patch-override.yaml
file that contains:
image:
+ repository: <image_location>
+ tag: <image_tag>
+ imagePullSecrets:
+ - name: orclcred
+
For example:
+image:
+ repository: container-registry.oracle.com/middleware/oud_cpu
+ tag: 12.2.1.4-jdk8-ol7-<October'23>
+imagePullSecrets:
+ - name: orclcred
+
The following caveats exist:
+If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:
+imagePullSecrets:
+ - name: orclcred
+
Run the following command to upgrade the deployment:
+$ helm upgrade --namespace <namespace> \
+--values oud-patch-override.yaml \
+<release_name> oud-ds-rs --reuse-values
+
For example:
+$ helm upgrade --namespace oudns \
+--values oud-patch-override.yaml \
+oud-ds-rs oud-ds-rs --reuse-values
+
The instructions below are for upgrading from October 22 (22.4.1) or January 23 (23.1.1), to October 23 (23.4.1).
+Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.
+Make sure the base pod (oud-ds-rs-0
) is running and healthy (READY 1/1
) by running the following command:
$ kubectl get pods -n <namespace>
+
For example:
+$ kubectl get pods -n oudns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+oud-ds-rs-0 1/1 Running 0 21h
+oud-ds-rs-1 1/1 Running 0 20h
+oud-ds-rs-2 1/1 Running 0 20h
+
Ensure dsreplication is healthy by running the following command:
+$ $ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- \
+/u01/oracle/user_projects/<OUD Instance/Pod Name>/OUD/bin/dsreplication status \
+--trustAll --hostname <OUD Instance/Pod Name> --port 1444 --adminUID admin \
+--dataToDisplay compat-view --dataToDisplay rs-connections
+
For example:
+$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \
+/u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \
+--trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \
+--dataToDisplay compat-view --dataToDisplay rs-connections
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+
+>>>> Specify Oracle Unified Directory LDAP connection parameters
+
+Password for user 'admin':
+
+Establishing connections and reading configuration ..... Done.
+
+dc=example,dc=com - Replication Enabled
+=======================================
+
+Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10]
+---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:-------------------------------
+oud-ds-rs-0:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898
+ : : : : : : : : : : : (GID=1)
+oud-ds-rs-1:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898
+ : : : : : : : : : : : (GID=1)
+oud-ds-rs-2:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898
+ : : : : : : : : : : : (GID=1)
+
+Replication Server [11] : RS #1 : RS #2 : RS #3
+-------------------------------:-------:-------:------
+oud-ds-rs-0:1898 : -- : Yes : Yes
+(#1) : : :
+oud-ds-rs-1:1898 : Yes : -- : Yes
+(#2) : : :
+oud-ds-rs-2:1898 : Yes : Yes : --
+(#3) : : :
+
+etc...
+
Scale down OUD by reducing the replicas to 1
:
$ cd $WORKDIR/kubernetes/helm
+$ helm upgrade -n oudns --set replicaCount=1 oud-ds-rs oud-ds-rs --reuse-values
+
Note: The $WORKDIR
is the directory for your existing release, not October 23.
The output will be similar to the following:
+Release "oud-ds-rs" has been upgraded. Happy Helming!
+NAME: oud-ds-rs
+LAST DEPLOYED: <DATE>
+NAMESPACE: oudns
+STATUS: deployed
+REVISION: 2
+NOTES:
+
+etc..
+
Make sure the replica pods are shutdown before proceeding:
+$ kubectl get pods -n oudns
+
+
+NAME READY STATUS RESTARTS AGE
+oud-ds-rs-0 1/1 Running 0 21h
+
Note: It will take several minutes before the replica pods disappear.
+Take a backup of the OUD data for every pod in the NFS shared volume:
+$ kubectl exec -it -n oudns oud-ds-rs-0 -- bash
+[oracle@oud-ds-rs-0 oracle]$ cd user_projects
+[oracle@oud-ds-rs-0 user_projects]$ mkdir OUD_backup_<DATE>
+[oracle@oud-ds-rs-0 user_projects]$ cp -r oud-ds-rs-* OUD_backup_<DATE>/
+
Make sure the backup created successfully:
+[oracle@oud-ds-rs-0 user_projects]$ ls -l OUD_backup_<date>
+total 2
+drwxr-x---. 5 oracle root 3 <DATE> oud-ds-rs-0
+drwxr-x---. 5 oracle root 3 <DATE> oud-ds-rs-1
+drwxr-x---. 5 oracle root 3 <DATE> oud-ds-rs-2
+
Remove the non-zero pod directories oud-ds-rs-1
and oud-ds-rs-2
:
[oracle@oud-ds-rs-0 user_projects]$ rm -rf oud-ds-rs-1 oud-ds-rs-2
+
Exit the oud-ds-rs-0
bash session:
[oracle@oud-ds-rs-0 user_projects]$ exit
+
Create a working directory on the persistent volume to setup the latest source code:
+$ mkdir <persistent_volume>/<workdir>
+
For example:
+$ mkdir /scratch/shared/OUDK8SJuly23
+
Download the latest OUD deployment scripts from the OUD repository:
+$ cd <persistent_volume>/<workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ mkdir /scratch/shared/OUDK8SJuly23
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectory
+
For example:
+$ export WORKDIR=/scratch/shared/OUDK8SJuly23/fmw-kubernetes/OracleUnifiedDirectory
+
Navigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Create a oud-patch-override.yaml
file that contains:
image:
+ repository: <image_location>
+ tag: <image_tag>
+ pullPolicy: IfNotPresent
+imagePullSecrets:
+ - name: orclcred
+oudConfig:
+ cleanupbeforeStart: false
+ disablereplicationbeforeStop: false
+replicaCount: 3
+
For example:
+image:
+ repository: container-registry.oracle.com/middleware/oud_cpu
+ tag: 12.2.1.4-jdk8-ol7-<October'23>
+ pullPolicy: IfNotPresent
+imagePullSecrets:
+ - name: orclcred
+oudConfig:
+ cleanupbeforeStart: false
+ disablereplicationbeforeStop: false
+replicaCount: 3
+
The following caveats exist:
+If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:
+imagePullSecrets:
+ - name: orclcred
+
Run the following command to upgrade the deployment:
+$ cd $WORKDIR/kubernetes/helm
+$ helm upgrade --namespace <namespace> \
+--values oud-patch-override.yaml \
+<release_name> oud-ds-rs --reuse-values
+
For example:
+$ cd $WORKDIR/kubernetes/helm
+$ helm upgrade --namespace oudns \
+--values oud-patch-override.yaml \
+oud-ds-rs oud-ds-rs --reuse-values
+
The output should look similar to the following:
+Release "oud-ds-rs" has been upgraded. Happy Helming!
+NAME: oud-ds-rs
+LAST DEPLOYED: <DATE>
+NAMESPACE: oudns
+STATUS: deployed
+REVISION: 3
+NOTES:
+etc..
+
After updating with the new image the pods will restart. Verify the pods are running:
+$ kubectl --namespace <namespace> get pods
+
For example:
+$ kubectl --namespace oudns get pods
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+oud-ds-rs-0 1/1 Running 0 11m
+oud-ds-rs-1 1/1 Running 0 28m
+oud-ds-rs-2 1/1 Running 0 22m
+...
+
Note: It will take several minutes before the pods oud-ds-rs-1
and oud-ds-rs-2
start, and oud-ds-rs-0
restarts. While the OUD pods have a STATUS
of 0/1
the pods are started but the OUD server associated with it is currently starting.
Verify the pods are using the new image by running the following command:
+$ kubectl describe pod <pod> -n <namespace>
+
For example:
+$ kubectl describe pod oud-ds-rs-0 -n oudns | grep Image
+
The output will look similar to the following:
+...
+Image: container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-<October'23>
+Image ID: container-registry.oracle.com/middleware/oud_cpu@sha256:<sha256>
+
Ensure dsreplication is healthy by running the following command:
+$ $ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- \
+/u01/oracle/user_projects/<OUD Instance/Pod Name>/OUD/bin/dsreplication status \
+--trustAll --hostname <OUD Instance/Pod Name> --port 1444 --adminUID admin \
+--dataToDisplay compat-view --dataToDisplay rs-connections
+
For example:
+$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \
+/u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \
+--trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \
+--dataToDisplay compat-view --dataToDisplay rs-connections
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE
+
+>>>> Specify Oracle Unified Directory LDAP connection parameters
+
+Password for user 'admin':
+
+Establishing connections and reading configuration ..... Done.
+
+dc=example,dc=com - Replication Enabled
+=======================================
+
+Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10]
+---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:-------------------------------
+oud-ds-rs-0:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898
+ : : : : : : : : : : : (GID=1)
+oud-ds-rs-1:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898
+ : : : : : : : : : : : (GID=1)
+oud-ds-rs-2:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898
+ : : : : : : : : : : : (GID=1)
+
+Replication Server [11] : RS #1 : RS #2 : RS #3
+-------------------------------:-------:-------:------
+oud-ds-rs-0:1898 : -- : Yes : Yes
+(#1) : : :
+oud-ds-rs-1:1898 : Yes : -- : Yes
+(#2) : : :
+oud-ds-rs-2:1898 : Yes : Yes : --
+(#3) : : :
+
+etc...
+
Once the validation steps are performed and you are confident OUD is working correctly, you can optionally delete the OUD backup data in the NFS shared volume:
+$ kubectl exec -it -n oudns oud-ds-rs-0 -- bash
+[oracle@oud-ds-rs-0 oracle]$ cd user_projects/OUD_backup_<DATE>/
+[oracle@oud-ds-rs-0 OUD_backup_<DATE>]$ rm -rf oud-ds-rs-0 oud-ds-rs-1 oud-ds-rs-2
+
The instructions below are for upgrading from July 22 (22.3.1) to October 23 (23.4.1).
+In releases prior to July 22 (22.3.1) OUD used pod based deployment. From July 22 (22.3.1) onwards OUD is deployed using StatefulSets.
+If you are upgrading from a release prior to July 22 (22.3.1) you must follow the steps below to deploy a new OUD instance to use your existing OUD data in <persistent_volume>/oud_user_projects
.
Note: The steps below will incur a small outage.
+Find the deployment release name as follows:
+$ helm --namespace <namespace> list
+
For example:
+$ helm --namespace oudns list
+
The output will look similar to the following:
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+oud-ds-rs oudns 1 <DATE> deployed oud-ds-rs-0.2 12.2.1.4.0
+
Delete the deployment using the following command:
+$ helm uninstall --namespace <namespace> <release>
+
For example:
+$ helm uninstall --namespace oudns oud-ds-rs
+release "oud-ds-rs" uninstalled
+
Run the following command to view the status:
+$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide
+
Initially the pods and persistent volume (PV) and persistent volume claim (PVC) will move to a Terminating
status:
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+
+pod/oud-ds-rs-0 1/1 Terminating 0 24m 10.244.1.180 <Worker Node> <none> <none>
+pod/oud-ds-rs-1 1/1 Terminating 0 18m 10.244.1.181 <Worker Node> <none> <none>
+pod/oud-ds-rs-2 1/1 Terminating 0 12m 10.244.1.182 <Worker Node> <none> <none>
+
+NAME TYPE DATA AGE
+secret/default-token-msmmd kubernetes.io/service-account-token 3 3d20h
+secret/dockercred kubernetes.io/dockerconfigjson 1 3d20h
+secret/orclcred kubernetes.io/dockerconfigjson 1 3d20h
+
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
+persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Terminating oudns/oud-ds-rs-pvc manual 24m Filesystem
+
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+persistentvolumeclaim/oud-ds-rs-pvc Terminating oud-ds-rs-pv 20Gi RWX manual 24m Filesystem
+
Run the command again until the pods, PV and PVC disappear.
+Create a working directory on the persistent volume to setup the latest source code:
+$ mkdir <persistent_volume>/<workdir>
+
For example:
+$ mkdir /scratch/shared/OUDK8SJuly23
+
Download the latest OUD deployment scripts from the OUD repository:
+$ cd <persistent_volume>/<workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ cd /scratch/shared/OUDK8SJuly23
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectory
+
For example:
+$ export WORKDIR=/scratch/shared/OUDK8SJuly23/fmw-kubernetes/OracleUnifiedDirectory
+
Navigate to the $WORKDIR/kubernetes/helm
directory
$ cd $WORKDIR/kubernetes/helm
+
Create an oud-ds-rs-values-override.yaml
as follows:
image:
+ repository: <image_location>
+ tag: <image_tag>
+ pullPolicy: IfNotPresent
+imagePullSecrets:
+ - name: orclcred
+oudConfig:
+ rootUserPassword: <password>
+ sampleData: "200"
+persistence:
+ type: filesystem
+ filesystem:
+ hostPath:
+ path: <persistent_volume>/oud_user_projects
+cronJob:
+ kubectlImage:
+ repository: bitnami/kubectl
+ tag: <version>
+ pullPolicy: IfNotPresent
+
+ imagePullSecrets:
+ - name: dockercred
+
For example:
+image:
+ repository: container-registry.oracle.com/middleware/oud_cpu
+ tag: 12.2.1.4-jdk8-ol7-<October'23>
+ pullPolicy: IfNotPresent
+imagePullSecrets:
+ - name: orclcred
+oudConfig:
+ rootUserPassword: <password>
+ sampleData: "200"
+persistence:
+ type: filesystem
+ filesystem:
+ hostPath:
+ path: /scratch/shared/oud_user_projects
+cronJob:
+ kubectlImage:
+ repository: bitnami/kubectl
+ tag: 1.24.5
+ pullPolicy: IfNotPresent
+
+ imagePullSecrets:
+ - name: dockercred
+
The following caveats exist:
+The <persistent_volume>/oud_user_projects
must point to the directory used in your previous deployment otherwise your existing OUD data will not be used. Make sure you take a backup of the <persistent_volume>/oud_user_projects
directory before proceeding further.
Replace <password>
with the password used in your previous deployment.
The <version>
in kubectlImage tag:
should be set to the same version as your Kubernetes version (kubectl version
). For example if your Kubernetes version is 1.24.5
set to 1.24.5
.
If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:
+imagePullSecrets:
+ - name: orclcred
+
If using NFS for your persistent volume then change the persistence
section as follows:
persistence:
+ type: networkstorage
+ networkstorage:
+ nfs:
+ path: <persistent_volume>/oud_user_projects
+ server: <NFS IP address>
+
Run the following command to deploy OUD:
+$ helm install --namespace <namespace> \
+--values oud-ds-rs-values-override.yaml \
+<release_name> oud-ds-rs
+
For example:
+$ helm install --namespace oudns \
+--values oud-ds-rs-values-override.yaml \
+oud-ds-rs oud-ds-rs
+
Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.
+Upgrade Elasticsearch and Kibana by following Upgrading Elasticsearch and Kibana.
+This section shows how to upgrade Elasticsearch and Kibana. From October 22 (22.4.1) onwards, OUD logs should be stored on a centralized Elasticsearch and Kibana stack.
+Note: This section should only be followed if upgrading from July 22 (22.3.1) or earlier to October 23 (23.4.1). If you are upgrading from October 22 or later to October 23 do not follow this section.
+From October 22 (22.4.1) onwards, OUD logs should be stored on a centralized Elasticsearch and Kibana (ELK) stack.
+Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.
+If you are upgrading from July 22 (22.3.1) or earlier, to October 23 (23.4.1), you must first undeploy Elasticsearch and Kibana using the steps below:
+Navigate to the $WORKDIR/kubernetes/helm
directory and create a logging-override-values-uninstall.yaml
with the following:
elk:
+ enabled: false
+
Run the following command to remove the existing ELK deployment:
+$ helm upgrade --namespace <domain_namespace> --values <valuesfile.yaml> <releasename> oud-ds-rs --reuse-values
+
For example:
+$ helm upgrade --namespace oudns --values logging-override-values-uninstall.yaml oud-ds-rs oud-ds-rs --reuse-values
+
As per the Prerequisites a Kubernetes cluster should have already been configured.
+Run the following command on the master node to check the cluster and worker nodes are running:
+$ kubectl get nodes,pods -n kube-system
+
The output will look similar to the following:
+NAME STATUS ROLES AGE VERSION
+node/worker-node1 Ready <none> 17h v1.26.6+1.el8
+node/worker-node2 Ready <none> 17h v1.26.6+1.el8
+node/master-node Ready control-plane,master 23h v1.26.6+1.el8
+
+NAME READY STATUS RESTARTS AGE
+pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h
+pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h
+pod/etcd-master 1/1 Running 0 21h
+pod/kube-apiserver-master-node 1/1 Running 0 21h
+pod/kube-controller-manager-master-node 1/1 Running 0 21h
+pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h
+pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h
+pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h
+pod/kube-proxy-2kxv2 1/1 Running 0 17h
+pod/kube-proxy-82vvj 1/1 Running 0 17h
+pod/kube-proxy-nrgw9 1/1 Running 0 23h
+pod/kube-scheduler-master 1/1 Running 0 21h
+
The OUD Kubernetes deployment requires access to an OUD container image. The image can be obtained in the following ways:
+The prebuilt OUD October 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Unified Directory 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..
+Note: Before using this image you must login to Oracle Container Registry, navigate to Middleware
> oud_cpu
and accept the license agreement.
You can use this image in the following ways:
+You can build your own OUD container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OUD container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.
+You can use an image built with WebLogic Image Tool in the following ways:
+Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.
+Note: This section should not be followed if using block storage.
+As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.
+In this example /scratch/shared/
is a shared directory accessible from all nodes.
On the master node run the following command to create a user_projects
directory:
$ cd <persistent_volume>
+$ mkdir oud_user_projects
+$ sudo chown -R 1000:0 oud_user_projects
+
For example:
+$ cd /scratch/shared
+$ mkdir oud_user_projects
+$ sudo chown -R 1000:0 oud_user_projects
+
On the master node run the following to ensure it is possible to read and write to the persistent volume:
+$ cd <persistent_volume>/oud_user_projects
+$ touch file.txt
+$ ls filemaster.txt
+
For example:
+$ cd /scratch/shared/oud_user_projects
+$ touch filemaster.txt
+$ ls filemaster.txt
+
On the first worker node run the following to ensure it is possible to read and write to the persistent volume:
+$ cd /scratch/shared/oud_user_projects
+$ ls filemaster.txt
+$ touch fileworker1.txt
+$ ls fileworker1.txt
+
Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it’s possible to read and write from each node to the persistent volume, delete the files created.
+Oracle Unified Directory deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory containers using the Helm charts provided. To deploy Oracle Unified Directory on Kubernetes you should set up the deployment scripts as below:
+Create a working directory to setup the source code.
+$ mkdir <workdir>
+
For example:
+$ mkdir /scratch/shared/OUDContainer
+
Download the latest OUD deployment scripts from the OUD repository:
+$ cd <workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ cd /scratch/shared/OUDContainer
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectory
+
For example:
+$ export WORKDIR=/scratch/shared/OUDContainer/fmw-kubernetes/OracleUnifiedDirectory
+
You are now ready to create the OUD deployment as per Create OUD instances.
+This document provides information about the system requirements for deploying and running Oracle Unified Directory 12c PS4 (12.2.1.4.0) in a Kubernetes environment.
+Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. +Please refer to your vendor specific documentation for this information. Also see Getting Started.
+ + + + + + + + +Review the latest changes and known issues for Oracle Unified Directory on Kubernetes.
+Date | +Version | +Change | +
---|---|---|
October, 2023 | +23.4.1 | +Supports Oracle Unified Directory 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | + Support for Block Device Storage. See, Create OUD Instances. | +
+ | + | + Ability to set resource requests and limits for CPU and memory on an OUD instance. See, Create OUD Instances. | +
+ | + | + Support for Assured Replication. See, Create OUD Instances. | +
+ | + | + Support for the Kubernetes Horizontal Pod Autoscaler (HPA). See, Kubernetes Horizontal Pod Autoscaler. | +
+ | + | + Supports integration options such as Enterprise User Security (EUS), EBusiness Suite (EBS), and Directory Integration Platform (DIP). | +
+ | + | To upgrade to October 23 (23.4.1) you must follow the instructions in Patch and Upgrade. | +
July, 2023 | +23.3.1 | +Supports Oracle Unified Directory 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | To upgrade to July 23 (23.3.1) you must follow the instructions in Patch and Upgrade. | +
April, 2023 | +23.2.1 | +Supports Oracle Unified Directory 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | To upgrade to April 23 (23.2.1) you must follow the instructions in Patch and Upgrade. | +
January, 2023 | +23.1.1 | +Supports Oracle Unified Directory 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
October, 2022 | +22.4.1 | +Supports Oracle Unified Directory 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. | +
+ | + | OUD container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support. | +
July, 2022 | +22.3.1 | +Supports Oracle Unified Directory 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. From July 2022 onwards OUD deployment is performed using StatefulSets. | +
April, 2022 | +22.2.1 | +Updated for CRI-O support. | +
November 2021 | +21.4.2 | +Voyager ingress removed as no longer supported. | +
October 2021 | +21.4.1 | +A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. | +
November 2020 | +20.4.1 | +Initial release of Oracle Unified Directory on Kubernetes. | +
To check the status of objects in a namespace use the following command:
+$ kubectl --namespace <namespace> get nodes,pod,service,secret,pv,pvc,ingress -o wide
+
For example:
+$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oud-ds-rs-0 1/1 Running 0 14m 10.244.1.180 <Worker Node> <none> <none>
+pod/oud-ds-rs-1 1/1 Running 0 8m26s 10.244.1.181 <Worker Node> <none> <none>
+pod/oud-ds-rs-2 0/1 Running 0 2m24s 10.244.1.182 <Worker Node> <none> <none>
+pod/oud-pod-cron-job-27586680-p5d8q 0/1 Completed 0 50s 10.244.1.183 <Worker Node> <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/oud-ds-rs ClusterIP None <none> 1444/TCP,1888/TCP,1389/TCP,1636/TCP,1080/TCP,1081/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-0 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-1 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-2 ClusterIP None <none> 1444/TCP,1888/TCP,1898/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-http-0 ClusterIP 10.104.112.93 <none> 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-http-1 ClusterIP 10.103.105.70 <none> 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-http-2 ClusterIP 10.110.160.107 <none> 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+service/oud-ds-rs-lbr-admin ClusterIP 10.99.238.222 <none> 1888/TCP,1444/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-http ClusterIP 10.101.250.196 <none> 1080/TCP,1081/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-lbr-ldap ClusterIP 10.104.149.90 <none> 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs
+service/oud-ds-rs-ldap-0 ClusterIP 10.109.255.221 <none> 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+service/oud-ds-rs-ldap-1 ClusterIP 10.111.135.142 <none> 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-1
+service/oud-ds-rs-ldap-2 ClusterIP 10.100.8.145 <none> 1389/TCP,1636/TCP 14m app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,statefulset.kubernetes.io/pod-name=oud-ds-rs-2
+
+NAME TYPE DATA AGE
+secret/dockercred kubernetes.io/dockerconfigjson 1 4h24m
+secret/orclcred kubernetes.io/dockerconfigjson 1 14m
+secret/oud-ds-rs-creds opaque 8 14m
+secret/oud-ds-rs-tls-cert kubernetes.io/tls 2 14m
+secret/sh.helm.release.v1.oud-ds-rs.v1 helm.sh/release.v1 1 14m
+
+
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
+persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Bound oudns/oud-ds-rs-pvc manual 14m Filesystem
+
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+persistentvolumeclaim/oud-ds-rs-pvc Bound oud-ds-rs-pv 20Gi RWX manual 14m Filesystem
+
+NAME CLASS HOSTS ADDRESS PORTS AGE
+ingress.networking.k8s.io/oud-ds-rs-admin-ingress-nginx <none> oud-ds-rs-admin-0,oud-ds-rs-admin-0,oud-ds-rs-admin-1 + 3 more... 80, 443 14m
+ingress.networking.k8s.io/oud-ds-rs-http-ingress-nginx <none> oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more... 80, 443 14m
+
Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.
+To view logs for a pod use the following command:
+$ kubectl logs <pod> -n <namespace>
+
For example:
+$ kubectl logs oud-ds-rs-0 -n oudns
+
Details about a pod can be viewed using the kubectl describe
command:
$ kubectl describe pod <pod> -n <namespace>
+
For example:
+$ kubectl describe pod oud-ds-rs-0 -n oudns
+
The output will look similar to the following:
+Name: oud-ds-rs-0
+Namespace: oudns
+Priority: 0
+Node: <Worker Node>/100.105.18.114
+Start Time: <DATE>
+Labels: app.kubernetes.io/instance=oud-ds-rs
+ app.kubernetes.io/name=oud-ds-rs
+ controller-revision-hash=oud-ds-rs-5c8b8f67c9
+ statefulset.kubernetes.io/pod-name=oud-ds-rs-0
+Annotations: <none>
+Status: Running
+IP: 10.244.2.48
+IPs:
+ IP: 10.244.2.48
+Controlled By: StatefulSet/oud-ds-rs
+Init Containers:
+ mount-pv:
+ Container ID: cri-o://905af11c6f032f2dfa18b1e3956d7936cb7dd04d9d0df0cfcf8ed061e6930b52
+ Image: <location>/busybox
+ Image ID: <location>@sha256:2c8ed5408179ff4f53242a4bdd2706110ce000be239fe37a61be9c52f704c437
+ Port: <none>
+ Host Port: <none>
+ Command:
+ /bin/sh
+ -c
+ Args:
+ ordinal=${OUD_INSTANCE_NAME##*-}; if [[ ${CLEANUP_BEFORE_START} == "true" ]]; then if [[ "$ordinal" != "0" ]]; then cd /u01/oracle; rm -fr /u01/oracle/user_projects/$(OUD_INSTANCE_NAME)/OUD; fi; fi
+ if [[ ${CONFIGVOLUME_ENABLED} == "true" ]]; then if [[ "$ordinal" == "0" ]]; then cp "/mnt/baseOUD.props" "${CONFIGVOLUME_MOUNTPATH}/config-baseOUD.props"; else cp "/mnt/replOUD.props" "${CONFIGVOLUME_MOUNTPATH}/config-replOUD.props"; fi; fi;
+ State: Terminated
+ Reason: Completed
+ Exit Code: 0
+ Started: <DATE>
+ Finished: <DATE>
+ Ready: True
+ Restart Count: 0
+ Environment:
+ OUD_INSTANCE_NAME: oud-ds-rs-0 (v1:metadata.name)
+ CONFIGVOLUME_ENABLED: false
+ CONFIGVOLUME_MOUNTPATH: /u01/oracle/config-input
+ CLEANUP_BEFORE_START: false
+ Mounts:
+ /u01/oracle/user_projects from oud-ds-rs-pv (rw)
+ /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-65skp (ro)
+Containers:
+ oud-ds-rs:
+ Container ID: cri-o://d691b090dfbb1ee1b8606952497d80642424a82a2290071b325ea720098817c3
+ Image: container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-<April'23>
+ Image ID: container-registry.oracle.com/middleware/oud_cpu@sha256:faca16dbbcda1985ff567eefe3f2ca7bae6cbbb7ebcd296fffb040ce61e9396a
+ Ports: 1444/TCP, 1888/TCP, 1389/TCP, 1636/TCP, 1080/TCP, 1081/TCP, 1898/TCP
+ Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP
+ State: Running
+ Started: <DATE>
+ Ready: True
+ Restart Count: 0
+ Limits:
+ cpu: 1
+ memory: 4Gi
+ Requests:
+ cpu: 500m
+ memory: 4Gi
+ Liveness: tcp-socket :ldap delay=300s timeout=30s period=60s #success=1 #failure=5
+ Readiness: exec [/u01/oracle/container-scripts/checkOUDInstance.sh] delay=300s timeout=30s period=60s #success=1 #failure=10
+ Environment:
+ instanceType: DS2RS_STS
+ OUD_INSTANCE_NAME: oud-ds-rs-0 (v1:metadata.name)
+ MY_NODE_NAME: (v1:spec.nodeName)
+ MY_POD_NAME: oud-ds-rs-0 (v1:metadata.name)
+ sleepBeforeConfig: 3
+ sourceHost: oud-ds-rs-0
+ baseDN: dc=example,dc=com
+ rootUserDN: <set to the key 'rootUserDN' in secret 'oud-ds-rs-creds'> Optional: false
+ rootUserPassword: <set to the key 'rootUserPassword' in secret 'oud-ds-rs-creds'> Optional: false
+ adminUID: <set to the key 'adminUID' in secret 'oud-ds-rs-creds'> Optional: false
+ adminPassword: <set to the key 'adminPassword' in secret 'oud-ds-rs-creds'> Optional: false
+ bindDN1: <set to the key 'bindDN1' in secret 'oud-ds-rs-creds'> Optional: false
+ bindPassword1: <set to the key 'bindPassword1' in secret 'oud-ds-rs-creds'> Optional: false
+ bindDN2: <set to the key 'bindDN2' in secret 'oud-ds-rs-creds'> Optional: false
+ bindPassword2: <set to the key 'bindPassword2' in secret 'oud-ds-rs-creds'> Optional: false
+ sourceServerPorts: oud-ds-rs-0:1444
+ sourceAdminConnectorPort: 1444
+ sourceReplicationPort: 1898
+ sampleData: 200
+ adminConnectorPort: 1444
+ httpAdminConnectorPort: 1888
+ ldapPort: 1389
+ ldapsPort: 1636
+ httpPort: 1080
+ httpsPort: 1081
+ replicationPort: 1898
+ dsreplication_1: verify --hostname ${sourceHost} --port ${sourceAdminConnectorPort} --baseDN ${baseDN} --serverToRemove $(OUD_INSTANCE_NAME):${adminConnectorPort} --connectTimeout 600000 --readTimeout 600000
+ dsreplication_2: enable --host1 ${sourceHost} --port1 ${sourceAdminConnectorPort} --replicationPort1 ${sourceReplicationPort} --host2 $(OUD_INSTANCE_NAME) --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000
+ dsreplication_3: initialize --hostSource ${initializeFromHost} --portSource ${sourceAdminConnectorPort} --hostDestination $(OUD_INSTANCE_NAME) --portDestination ${adminConnectorPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000
+ dsreplication_4: verify --hostname $(OUD_INSTANCE_NAME) --port ${adminConnectorPort} --baseDN ${baseDN} --connectTimeout 600000 --readTimeout 600000
+ post_dsreplication_dsconfig_1: set-replication-domain-prop --domain-name ${baseDN} --set group-id:1
+ post_dsreplication_dsconfig_2: set-replication-server-prop --set group-id:1
+ Mounts:
+ /u01/oracle/user_projects from oud-ds-rs-pv (rw)
+ /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-65skp (ro)
+Conditions:
+ Type Status
+ Initialized True
+ Ready True
+ ContainersReady True
+ PodScheduled True
+Volumes:
+ oud-ds-rs-pv:
+ Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
+ ClaimName: oud-ds-rs-pvc
+ ReadOnly: false
+ kube-api-access-65skp:
+ Type: Projected (a volume that contains injected data from multiple sources)
+ TokenExpirationSeconds: 3607
+ ConfigMapName: kube-root-ca.crt
+ ConfigMapOptional: <nil>
+ DownwardAPI: true
+QoS Class: Burstable
+Node-Selectors: <none>
+Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
+ node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
+Events: <none>
+
+
Sometimes when scaling up or down, it is possible to get incorrect data in the dsreplication
output. In the example below the replicaCount
was changed from 4
to 3
. The oud-ds-rs-3
server appears as <Unknown>
when it should have disappeared:
dc=example,dc=com - Replication Enabled
+=======================================
+
+Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10]
+-------------------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:-------------------------------
+oud-ds-rs-3:<Unknown> : -- : N/A : -- : 1898 : Disabled : -- : -- : Unknown : -- : N/A : --
+[11] : : : : : : : : : : :
+oud-ds-rs-0:1444 : 39135 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898
+ : : : : : : : : : : : (GID=1)
+oud-ds-rs-1:1444 : 39135 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898
+ : : : : : : : : : : : (GID=1)
+oud-ds-rs-2:1444 : 39135 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898
+ : : : : : : : : : : : (GID=1)
+
+Replication Server [12] : RS #1 : RS #2 : RS #3 : RS #4
+------------------------------:-------:-------:-------:------
+oud-ds-rs-0:1898 (#1) : -- : Yes : Yes : N/A
+oud-ds-rs-1:1898 (#2) : Yes : -- : Yes : N/A
+oud-ds-rs-2:1898 (#3) : Yes : Yes : -- : N/A
+oud-ds-rs-3:1898 (#4) : No : No : No : --
+
+
In this situation perform the following steps to remove the server:
+Run the following command to enter the OUD Kubernetes pod:
+$ kubectl --namespace <namespace> exec -it -c <containername> <podname> -- bash
+
For example:
+kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash
+
This will take you into the pod:
+[oracle@oud-ds-rs-0 oracle]$
+
Once inside the pod run the following command to create a password file:
+echo <ADMIN_PASSWORD> > /tmp/adminpassword.txt
+
Run the following command to remove the replicationPort
:
/u01/oracle/oud/bin/dsreplication disable --hostname localhost --port $adminConnectorPort --adminUID admin --trustAll --adminPasswordFile /tmp/adminpassword.txt --no-prompt --unreachableServer oud-ds-rs-3:$replicationPort
+
The output will look similar to the following:
+Establishing connections and reading configuration ........ Done.
+
+The following errors were encountered reading the configuration of the
+existing servers:
+Could not connect to the server oud-ds-rs-3:1444. Check that the
+server is running and that is accessible from the local machine. Details:
+oud-ds-rs-3:1444
+The tool will try to update the configuration in a best effort mode.
+
+Removing references to replication server oud-ds-rs-3:1898 ..... Done.
+
Run the following command to remove the adminConnectorPort
:
/u01/oracle/oud/bin/dsreplication disable --hostname localhost --port $adminConnectorPort --adminUID admin --trustAll --adminPasswordFile /tmp/adminpassword.txt --no-prompt --unreachableServer oud-ds-rs-3:$adminConnectorPort
+
The output will look similar to the following:
+Establishing connections and reading configuration ...... Done.
+
+Removing server oud-ds-rs-3:1444 from the registration information ..... Done.
+
Delete the password file:
+rm /tmp/adminpassword.txt
+
The instructions below explain how to set up NGINX as an ingress for OUDSM.
+Use Helm to install NGINX.
+Add the Helm chart repository for installing NGINX using the following command:
+$ helm repo add stable https://kubernetes.github.io/ingress-nginx
+
The output will look similar to the following:
+"stable" has been added to your repositories
+
Update the repository using the following command:
+$ helm repo update
+
The output will look similar to the following:
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "stable" chart repository
+Update Complete. Happy Helming!
+
Create a Kubernetes namespace for NGINX:
+$ kubectl create namespace <namespace>
+
For example:
+$ kubectl create namespace mynginx
+
The output will look similar to the following:
+namespace/mynginx created
+
Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml
that contains the following:
Note: The configuration below deploys an ingress using LoadBalancer. If you prefer to use NodePort, change the configuration accordingly. For more details about NGINX configuration see: NGINX Ingress Controller.
+controller:
+ admissionWebhooks:
+ enabled: false
+ extraArgs:
+ # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server.
+ # If this flag is not provided NGINX will use a self-signed certificate.
+ # If the TLS Secret is in different namespace, name can be mentioned as <namespace>/<tlsSecretName>
+ default-ssl-certificate: oudsmns/oudsm-tls-cert
+ service:
+ # controller service external IP addresses
+ # externalIPs:
+ # - < External IP Address >
+ # To configure Ingress Controller Service as LoadBalancer type of Service
+ # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service
+ type: LoadBalancer
+ # Configuration for NodePort to be used for Ports exposed through Ingress
+ # If NodePorts are not defined/configured, Node Port would be assigned automatically by Kubernetes
+ # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer.
+ nodePorts:
+ # For HTTP Interface exposed through LoadBalancer/Ingress
+ http: 30080
+ # For HTTPS Interface exposed through LoadBalancer/Ingress
+ https: 30443
+
To install and configure NGINX ingress issue the following command:
+$ helm install --namespace <namespace> \
+--values nginx-ingress-values-override.yaml \
+lbr-nginx stable/ingress-nginx
+
Where:
+lbr-nginx
is your deployment namestable/ingress-nginx
is the chart referenceFor example:
+$ helm install --namespace mynginx \
+--values nginx-ingress-values-override.yaml \
+lbr-nginx stable/ingress-nginx
+
The output will be similar to the following:
+NAME: lbr-nginx
+LAST DEPLOYED: <DATE>
+NAMESPACE: mynginx
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+The ingress-nginx controller has been installed.
+It may take a few minutes for the LoadBalancer IP to be available.
+You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller'
+
+An example Ingress that makes use of the controller:
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ name: example
+ namespace: foo
+ spec:
+ ingressClassName: nginx
+ rules:
+ - host: www.example.com
+ http:
+ paths:
+ - pathType: Prefix
+ backend:
+ service:
+ name: exampleService
+ port:
+ number: 80
+ path: /
+ # This section is only required if TLS is to be enabled for the Ingress
+ tls:
+ - hosts:
+ - www.example.com
+ secretName: example-tls
+
+If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
+
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: example-tls
+ namespace: foo
+ data:
+ tls.crt: <base64 encoded cert>
+ tls.key: <base64 encoded key>
+ type: kubernetes.io/tls
+
Using the Helm chart, ingress objects are created according to configuration. The following table details the rules configured in ingress object(s) for access to Oracle Unified Directory Services Manager Interfaces through ingress.
+Port | +NodePort | +Host | +Example Hostname | +Path | +Backend Service:Port | +Example Service Name:Port | +
---|---|---|---|---|---|---|
http/https | +30080/30443 | +<deployment/release name>-N | +oudsm-N | +* | +<deployment/release name>-N:http | +oudsm-1:http | +
http/https | +30080/30443 | +* | +* | +/oudsm /console | +<deployment/release name>-lbr:http | +oudsm-lbr:http | +
If it is not possible to have LoadBalancer configuration updated to have host names added for Oracle Unified Directory Services Manager Interfaces, then the following entries can be added in /etc/hosts files on the host from where Oracle Unified Directory Services Manager interfaces would be accessed.
+<IP Address of External LBR or Kubernetes Node> oudsm oudsm-1 oudsm-2 oudsm-N
+
Launch a browser and access the OUDSM console.
+https://<External LBR Host>/oudsm
.https://<Kubernetes Node>:30443/oudsm
.Access the WebLogic Administration console by accessing the following URL and login with weblogic/<password>
where weblogic/<password>
is the adminUser
and adminPass
set when creating the OUDSM instance.
https://<External LBR Host>/console
.https://<Kubernetes Node>:30443/console
.As described in Prepare Your Environment you can create your own OUDSM container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Unified Directory image for production deployments.
+Using the WebLogic Image Tool, you can create a new Oracle Unified Directory Services Manager image with PSU’s and interim patches or update an existing image with one or more interim patches.
+++Recommendations:
++
+- Use create for creating a new Oracle Unified Directory Services Manager image containing the Oracle Unified Directory Services Manager binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OUDSM patches because it optimizes the size of the image.
+- Use update for patching an existing Oracle Unified Directory Services Manager image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool.
+
Verify that your environment meets the following prerequisites:
+To set up the WebLogic Image Tool:
+Create a working directory and change to it:
+$ mdir <workdir>
+$ cd <workdir>
+
For example:
+$ mkdir /scratch/imagetool-setup
+$ cd /scratch/imagetool-setup
+
Download the latest version of the WebLogic Image Tool from the releases page.
+$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip
+
where X.X.X is the latest release referenced on the releases page.
+Unzip the release ZIP file in the imagetool-setup
directory.
$ unzip imagetool.zip
+
Execute the following commands to set up the WebLogic Image Tool:
+$ cd <workdir>/imagetool-setup/imagetool/bin
+$ source setup.sh
+
For example:
+$ cd /scratch/imagetool-setup/imagetool/bin
+$ source setup.sh
+
To validate the setup of the WebLogic Image Tool:
+Enter the following command to retrieve the version of the WebLogic Image Tool:
+$ imagetool --version
+
Enter imagetool
then press the Tab key to display the available imagetool
commands:
$ imagetool <TAB>
+cache create help rebase update
+
The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp
, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user’s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR
:
$ export WLSIMG_BLDDIR="/path/to/buid/dir"
+
The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user’s $HOME/cache
directory. Under this directory, the lookup information is stored in the .metadata
file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR
:
$ export WLSIMG_CACHEDIR="/path/to/cachedir"
+
Creating an Oracle Unified Directory Services Manager container image using the WebLogic Image Tool requires additional container scripts for Oracle Unified Directory Services Manager domains.
+Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO
:
$ cd <workdir>/imagetool-setup
+$ git clone https://github.com/oracle/docker-images.git
+
For example:
+$ cd /scratch/imagetool-setup
+$ git clone https://github.com/oracle/docker-images.git
+
++Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.
+
After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create
a new Oracle Unified Directory Services Manager image.
You must download the required Oracle Unified Directory Services Manager installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.
+The installation binaries and patches required are:
+Oracle Unified Directory 12.2.1.4.0
+Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0
+OUDSM and FMW Infrastructure Patches:
+Container Image Download/Patch Details
section, locate the Oracle Unified Directory Services Manager (OUDSM)
table. For the latest PSU click the README
link in the Documentation
column. In the README, locate the “Installed Software” section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support.Oracle JDK v8
+The following files in the code repository location <imagetool-setup-location>/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0
are used for creating the image:
additionalBuildCmds.txt
buildArgs
Edit the <workdir>/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs
file and change %DOCKER_REPO%
,%JDK_VERSION%
and %BUILDTAG%
appropriately.
For example:
+create
+--jdkVersion=8u321
+--type oud_wls
+--version=12.2.1.4.0
+--tag=oudsm-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/install/oud.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/container-scripts
+
The <workdir>/imagetool-setup/imagetool/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt
contains additional build commands. You may edit this file if you want to customize the image further.
Edit the <workdir>/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file
and under the GENERIC section add the line INSTALL_TYPE="Fusion Middleware Infrastructure”. For example:
[GENERIC]
+INSTALL_TYPE="Fusion Middleware Infrastructure"
+DECLINE_SECURITY_UPDATES=true
+SECURITY_UPDATES_VIA_MYORACLESUPPORT=false
+
Add a JDK package to the WebLogic Image Tool cache. For example:
+$ imagetool cache addInstaller --type jdk --version 8uXXX --path <download location>/jdk-8uXXX-linux-x64.tar.gz
+
where XXX
is the JDK version downloaded
Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:
+$ imagetool cache addInstaller --type OUD --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_oud.jar
+
+$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path <download location>/fmw_12.2.1.4.0_infrastructure.jar
+
Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:
+$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <download location>/p28186730_139428_Generic.zip
+
Add the rest of the downloaded product patches to the WebLogic Image Tool cache:
+$ imagetool cache addEntry --key <patch>_12.2.1.4.0 --value <download location>/p<patch>_122140_Generic.zip
+
For example:
+$ imagetool cache addEntry --key 33727616_12.2.1.4.0 --value <download location>/p33727616_122140_Generic.zip
+$ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value <download location>/p33093748_122140_Generic.zip
+$ imagetool cache addEntry --key 32720458_12.2.1.4.0 --value <download location>/p32720458_122140_Generic.zip
+$ imagetool cache addEntry --key 33791665_12.2.1.4.220105 --value <download location>/p33791665_12214220105_Generic.zip
+$ imagetool cache addEntry --key 33723124_12.2.1.4.0 --value <download location>/p33723124_122140_Generic.zip
+$ imagetool cache addEntry --key 32647448_12.2.1.4.0 --value <download location>/p32647448_122140_Linux-x86-64.zip
+$ imagetool cache addEntry --key 33591019_12.2.1.4.0 --value <download location>/p33591019_122140_Generic.zip
+$ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value <download location>/p32999272_122140_Generic.zip
+$ imagetool cache addEntry --key 33448950_12.2.1.4.0 --value <download location>/p33448950_122140_Generic.zip
+$ imagetool cache addEntry --key 33697227_12.2.1.4.0 --value <download location>/p33697227_122140_Generic.zip
+$ imagetool cache addEntry --key 33678607_12.2.1.4.0 --value <download location>/p33678607_122140_Generic.zip
+$ imagetool cache addEntry --key 33735326_12.2.1.4.220105 --value <download location>/p33735326_12214220105_Generic.zip
+
Edit the <workdir>/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs
file and append the product patches and opatch patch as follows:
--patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105
+--opatchBugNumber=28186730_13.9.4.2.8
+
An example buildArgs
file is now as follows:
create
+--jdkVersion=8u321
+--type oud_wls
+--version=12.2.1.4.0
+--tag=oudsm-latestpsu:12.2.1.4.0
+--pull
+--installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/install/oud.response
+--additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt
+--additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/container-scripts
+--patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,33448950_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105
+--opatchBugNumber=28186730_13.9.4.2.8
+
++Note: In the
+buildArgs
file:+
+- +
--jdkVersion
value must match the--version
value used in theimagetool cache addInstaller
command for--type jdk
.- +
--version
value must match the--version
value used in theimagetool cache addInstaller
command for--type OUDSM
.
Refer to this page for the complete list of options available with the WebLogic Image Tool create
command.
Create the Oracle Unified Directory Services Manager image:
+$ imagetool @<absolute path to buildargs file> --fromImage ghcr.io/oracle/oraclelinux:7-slim
+
++Note: Make sure that the absolute path to the
+buildargs
file is prepended with a@
character, as shown in the example above.
For example:
+$ imagetool @<imagetool-setup-location>/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim
+
Check the created image using the docker images
command:
$ docker images | grep oudsm
+
The output will look similar to the following:
+oudsm-latestpsu 12.2.1.4.0 f6dd9d2ca0e6 4 minutes ago 3.72GB
+
Run the following command to save the container image to a tar file:
+$ docker save -o <path>/<file>.tar <image>
+
For example:
+$ docker save -o $WORKDIR/oudsm-latestpsu.tar oudsm-latestpsu:12.2.1.4.0
+
The steps below show how to update an existing Oracle Unified Directory Services Manager image with an interim patch.
+The container image to be patched must be loaded in the local docker images repository before attempting these steps.
+In the examples below the image oracle/oudsm:12.2.1.4.0
is updated with an interim patch.
$ docker images
+
+REPOSITORY TAG IMAGE ID CREATED SIZE
+oracle/oudsm 12.2.1.4.0 b051804ba15f 3 months ago 3.72GB
+
Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.
+Add the OPatch patch to the WebLogic Image Tool cache, for example:
+$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value <downloaded-patches-location>/p28186730_139428_Generic.zip
+
Execute the imagetool cache addEntry
command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p33521773_12214211008_Generic.zip
:
$ imagetool cache addEntry --key=33521773_12.2.1.4.211008 --value <downloaded-patches-location>/p33521773_12214211008_Generic.zip
+
Provide the following arguments to the WebLogic Image Tool update
command:
–-fromImage
- Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oudsm:12.2.1.4.0
.–-patches
- Multiple patches can be specified as a comma-separated list.--tag
- Specify the new tag to be applied for the image being built.Refer here for the complete list of options available with the WebLogic Image Tool update
command.
++Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.
+
For example:
+$ imagetool update --fromImage oracle/oudsm:12.2.1.4.0 --tag=oracle/oudsm-new:12.2.1.4.0 --patches=33521773_12.2.1.4.211008 --opatchBugNumber=28186730_13.9.4.2.8
+
++Note: If the command fails because the files in the image being upgraded are not owned by
+oracle:oracle
, then add the parameter--chown <userid>:<groupid>
to correspond with the values returned in the error.
Check the built image using the docker images
command:
$ docker images | grep oudsm
+
The output will look similar to the following:
+REPOSITORY TAG IMAGE ID CREATED SIZE
+oracle/oudsm-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 1.11GB
+oracle/oudsm 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB
+
Run the following command to save the patched container image to a tar file:
+$ docker save -o <path>/<file>.tar <image>
+
For example:
+$ docker save -o $WORKDIR/oudsm-new.tar oracle/oudsm-new:12.2.1.4.0
+
This chapter demonstrates how to deploy Oracle Unified Directory Services Manager (OUDSM) 12c instance(s) using the Helm package manager for Kubernetes.
+Based on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.
+Create a Kubernetes namespace for the OUDSM deployment by running the following command:
+$ kubectl create namespace <namespace>
+
For example:
+$ kubectl create namespace oudsmns
+
The output will look similar to the following:
+namespace/oudsmns created
+
Create a Kubernetes secret that stores the credentials for the container registry where the OUDSM image is stored. This step must be followed if using Oracle Container Registry or your own private container registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.
+Run the following command to create the secret:
+kubectl create secret docker-registry "orclcred" --docker-server=<CONTAINER_REGISTRY> \
+--docker-username="<USER_NAME>" \
+--docker-password=<PASSWORD> --docker-email=<EMAIL_ID> \
+--namespace=<domain_namespace>
+
For example, if using Oracle Container Registry:
+kubectl create secret docker-registry "orclcred" --docker-server=container-registry.oracle.com \
+--docker-username="user@example.com" \
+--docker-password=password --docker-email=user@example.com \
+--namespace=oudsmns
+
Replace <USER_NAME>
and <PASSWORD>
with the credentials for the registry with the following caveats:
If using Oracle Container Registry to pull the OUDSM container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware
> oudsm_cpu
and accept the license agreement.
If using your own container registry to store the OUDSM container image, this is the username and password (or token) for your container registry.
+The output will look similar to the following:
+secret/orclcred created
+
As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.
+In this example /scratch/shared/
is a shared directory accessible from all nodes.
On the master node run the following command to create a user_projects
directory:
$ cd <persistent_volume>
+$ mkdir oudsm_user_projects
+$ sudo chown -R 1000:0 oudsm_user_projects
+
For example:
+$ cd /scratch/shared
+$ mkdir oudsm_user_projects
+$ sudo chown -R 1000:0 oudsm_user_projects
+
On the master node run the following to ensure it is possible to read and write to the persistent volume:
+$ cd <persistent_volume>/oudsm_user_projects
+$ touch file.txt
+$ ls filemaster.txt
+
For example:
+$ cd /scratch/shared/oudsm_user_projects
+$ touch filemaster.txt
+$ ls filemaster.txt
+
On the first worker node run the following to ensure it is possible to read and write to the persistent volume:
+$ cd /scratch/shared/oudsm_user_projects
+$ ls filemaster.txt
+$ touch fileworker1.txt
+$ ls fileworker1.txt
+
Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it’s possible to read and write from each node to the persistent volume, delete the files created.
+The oudsm
Helm chart allows you to create or deploy Oracle Unified Directory Services Manager instances along with Kubernetes objects in a specified namespace.
The deployment can be initiated by running the following Helm command with reference to the oudsm
Helm chart, along with configuration parameters according to your environment.
cd $WORKDIR/kubernetes/helm
+$ helm install --namespace <namespace> \
+<Configuration Parameters> \
+<deployment/release name> \
+<Helm Chart Path/Name>
+
Configuration Parameters (override values in chart) can be passed on with --set
arguments on the command line and/or with -f / --values
arguments when referring to files.
Note: The examples in Create OUDSM instances below provide values which allow the user to override the default values provided by the Helm chart. A full list of configuration parameters and their default values is shown in Appendix: Configuration parameters.
+For more details about the helm
command and parameters, please execute helm --help
and helm install --help
.
You can create OUDSM instances using one of the following methods:
+ +Navigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Create an oudsm-values-override.yaml
as follows:
image:
+ repository: <image_location>
+ tag: <image_tag>
+ pullPolicy: IfNotPresent
+imagePullSecrets:
+ - name: orclcred
+oudsm:
+ adminUser: weblogic
+ adminPass: <password>
+persistence:
+ type: filesystem
+ filesystem:
+ hostPath:
+ path: <persistent_volume>/oudsm_user_projects
+
For example:
+image:
+ repository: container-registry.oracle.com/middleware/oudsm_cpu
+ tag: 12.2.1.4-jdk8-ol7-<October'23>
+ pullPolicy: IfNotPresent
+imagePullSecrets:
+ - name: orclcred
+oudsm:
+ adminUser: weblogic
+ adminPass: <password>
+persistence:
+ type: filesystem
+ filesystem:
+ hostPath:
+ path: /scratch/shared/oudsm_user_projects
+
The following caveats exist:
+Replace <password>
with a the relevant passwords.
If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:
+imagePullSecrets:
+ - name: orclcred
+
If using NFS for your persistent volume the change the persistence
section as follows:
persistence:
+ type: networkstorage
+ networkstorage:
+ nfs:
+ path: <persistent_volume>/oudsm_user_projects
+ server: <NFS IP address>
+
Run the following command to deploy OUDSM:
+$ helm install --namespace <namespace> \
+--values oudsm-values-override.yaml \
+<release_name> oudsm
+
$ helm install --namespace oudsmns \
+--values oudsm-values-override.yaml \
+oudsm oudsm
+
Check the OUDSM deployment as per Verify the OUDSM deployment
+--set
argumentNavigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Run the following command to create OUDSM instance:
+$ helm install --namespace oudsmns \
+--set oudsm.adminUser=weblogic,oudsm.adminPass=<password>,persistence.filesystem.hostPath.path=<persistent_volume>/oudsm_user_projects,image.repository=<image_location>,image.tag=<image_tag> \
+--set imagePullSecrets[0].name="orclcred" \
+<release_name> oudsm
+
For example:
+$ helm install --namespace oudsmns \
+--set oudsm.adminUser=weblogic,oudsm.adminPass=<password>,persistence.filesystem.hostPath.path=/scratch/shared/oudsm_user_projects,image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7-<October'23> \
+--set imagePullSecrets[0].name="orclcred" \
+oudsm oudsm
+
The following caveats exist:
+<password>
with a the relevant password.--set imagePullSecrets[0].name="orclcred"
persistence.networkstorage.nfs.path=<persistent_volume>/oudsm_user_projects,persistence.networkstorage.nfs.server:<NFS IP address>
.Check the OUDSM deployment as per Verify the OUDSM deployment
+In all the examples above, the following output is shown following a successful execution of the helm install
command.
NAME: oudsm
+LAST DEPLOYED: <DATE>
+NAMESPACE: oudsmns
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+
Run the following command to verify the OUDSM deployment:
+$ kubectl --namespace <namespace> get pod,service,secret,pv,pvc,ingress -o wide
+
For example:
+$ kubectl --namespace oudsmns get pod,service,secret,pv,pvc,ingress -o wide
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 <worker-node> <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/oudsm-1 ClusterIP 10.96.108.200 <none> 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1
+service/oudsm-lbr ClusterIP 10.96.41.201 <none> 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm
+
+NAME TYPE DATA AGE
+secret/orclcred kubernetes.io/dockerconfigjson 1 3h13m
+secret/oudsm-creds opaque 2 73m
+secret/oudsm-token-ksr4g kubernetes.io/service-account-token 3 73m
+secret/sh.helm.release.v1.oudsm.v1 helm.sh/release.v1 1 73m
+
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
+persistentvolume/oudsm-pv 30Gi RWX Retain Bound myoudsmns/oudsm-pvc manual 73m Filesystem
+
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+persistentvolumeclaim/oudsm-pvc Bound oudsm-pv 30Gi RWX manual 73m Filesystem
+
+NAME HOSTS ADDRESS PORTS AGE
+ingress.extensions/oudsm-ingress-nginx oudsm-1,oudsm-2,oudsm + 1 more... 100.102.51.230 80 73m
+
Note: It will take several minutes before all the services listed above show. While the oudsm pods have a STATUS
of 0/1
the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:
$ kubectl logs oudsm-1 -n oudsmns
+
Note : If the OUDSM deployment fails additionally refer to Troubleshooting for instructions on how describe the failing pod(s). +Once the problem is identified follow Undeploy an OUDSM deployment to clean down the deployment before deploying again.
+Kubernetes objects created by the Helm chart are detailed in the table below:
+Type | +Name | +Example Name | +Purpose | +
---|---|---|---|
Service Account | +<deployment/release name> | +oudsm | +Kubernetes Service Account for the Helm Chart deployment | +
Secret | +<deployment/release name>-creds | +oudsm-creds | +Secret object for Oracle Unified Directory Services Manager related critical values like passwords | +
Persistent Volume | +<deployment/release name>-pv | +oudsm-pv | +Persistent Volume for user_projects mount. | +
Persistent Volume Claim | +<deployment/release name>-pvc | +oudsm-pvc | +Persistent Volume Claim for user_projects mount. | +
Pod | +<deployment/release name>-N | +oudsm-1, oudsm-2, … | +Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances | +
Service | +<deployment/release name>-N | +oudsm-1, oudsm-2, … | +Service(s) for HTTP and HTTPS interfaces from Oracle Unified Directory Services Manager instance <deployment/release name>-N | +
Ingress | +<deployment/release name>-ingress-nginx | +oudsm-ingress-nginx | +Ingress Rules for HTTP and HTTPS interfaces. | +
With an OUDSM instance now deployed you are now ready to configure an ingress controller to direct traffic to OUDSM as per Configure an ingress for an OUDSM.
+Find the deployment release name:
+$ helm --namespace <namespace> list
+
For example:
+$ helm --namespace oudsmns list
+
The output will look similar to the following:
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+oudsm oudsmns 2 <DATE> deployed oudsm-0.1 12.2.1.4.0
+
Delete the deployment using the following command:
+$ helm uninstall --namespace <namespace> <release>
+
For example:
+$ helm uninstall --namespace oudsmns oudsm
+release "oudsm" uninstalled
+
Delete the contents of the oudsm_user_projects
directory in the persistent volume:
$ cd <persistent_volume>/oudsm_user_projects
+$ rm -rf *
+
For example:
+$ cd /scratch/shared/oudsm_user_projects
+$ rm -rf *
+
The following table lists the configurable parameters of the ‘oudsm’ chart and their default values.
+Parameter | +Description | +Default Value | +
---|---|---|
replicaCount | +Number of Oracle Unified Directory Services Manager instances/pods/services to be created | +1 | +
restartPolicyName | +restartPolicy to be configured for each POD containing Oracle Unified Directory Services Manager instance | +OnFailure | +
image.repository | +Oracle Unified Directory Services Manager Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers | +oracle/oudsm | +
image.tag | +Oracle Unified Directory Services Manager Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers | +12.2.1.4.0 | +
image.pullPolicy | +policy to pull the image | +IfnotPresent | +
imagePullSecrets.name | +name of Secret resource containing private registry credentials | +regcred | +
nameOverride | +override the fullname with this name | ++ |
fullnameOverride | +Overrides the fullname with the provided string | ++ |
serviceAccount.create | +Specifies whether a service account should be created | +true | +
serviceAccount.name | +If not set and create is true, a name is generated using the fullname template | +oudsm-< fullname >-token-< randomalphanum > | +
podSecurityContext | +Security context policies to add to the controller pod | ++ |
securityContext | +Security context policies to add by default | ++ |
service.type | +type of controller service to create | +ClusterIP | +
nodeSelector | +node labels for pod assignment | ++ |
tolerations | +node taints to tolerate | ++ |
affinity | +node/pod affinities | ++ |
ingress.enabled | ++ | true | +
ingress.type | +Supported value: nginx | +nginx | +
ingress.host | +Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as < fullname >-http.< domain >, < fullname >-http-0.< domain >, < fullname >-http-1.< domain >, etc. | ++ |
ingress.domain | +Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as < host >.< domain >, < host >-0.< domain >, < host >-1.< domain >, etc. | ++ |
ingress.backendPort | ++ | http | +
ingress.nginxAnnotations | ++ | { kubernetes.io/ingress.class: “nginx" nginx.ingress.kubernetes.io/affinity-mode: “persistent” nginx.ingress.kubernetes.io/affinity: “cookie” } | +
ingress.ingress.tlsSecret | +Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name < fullname >-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as < namespace >/< tlsSecretName > | ++ |
ingress.certCN | +Subject’s common name (cn) for SelfSigned Cert. | +< fullname > | +
ingress.certValidityDays | +Validity of Self-Signed Cert in days | +365 | +
secret.enabled | +If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through –set, –values, etc.) would be used while creation of pods. | +true | +
secret.name | +secret name to use an already created Secret | +oudsm-< fullname >-creds | +
secret.type | +Specifies the type of the secret | +Opaque | +
persistence.enabled | +If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. | +true | +
persistence.pvname | +pvname to use an already created Persistent Volume , If blank will use the default name | +oudsm-< fullname >-pv | +
persistence.pvcname | +pvcname to use an already created Persistent Volume Claim , If blank will use default name | +oudsm-< fullname >-pvc | +
persistence.type | +supported values: either filesystem or networkstorage or custom | +filesystem | +
persistence.filesystem.hostPath.path | +The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. | +/scratch/shared/oudsm_user_projects | +
persistence.networkstorage.nfs.path | +Path of NFS Share location | +/scratch/shared/oudsm_user_projects | +
persistence.networkstorage.nfs.server | +IP or hostname of NFS Server | +0.0.0.0 | +
persistence.custom.* | +Based on values/data, YAML content would be included in PersistenceVolume Object | ++ |
persistence.accessMode | +Specifies the access mode of the location provided | +ReadWriteMany | +
persistence.size | +Specifies the size of the storage | +10Gi | +
persistence.storageClass | +Specifies the storageclass of the persistence volume. | +empty | +
persistence.annotations | +specifies any annotations that will be used | +{ } | +
oudsm.adminUser | +Weblogic Administration User | +weblogic | +
oudsm.adminPass | +Password for Weblogic Administration User | ++ |
oudsm.startupTime | +Expected startup time. After specified seconds readinessProbe would start | +900 | +
oudsm.livenessProbeInitialDelay | +Paramter to decide livenessProbe initialDelaySeconds | +1200 | +
elk.logStashImage | +The version of logstash you want to install | +logstash:8.3.1 | +
elk.sslenabled | +If SSL is enabled for ELK set the value to true, or if NON-SSL set to false. This value must be lowercase | +TRUE | +
elk.eshosts | +The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used | +https://elasticsearch.example.com:9200 | +
elk.esuser | +The name of the user for logstash to access Elasticsearch | +logstash_internal | +
elk.espassword | +The password for ELK_USER | +password | +
elk.esapikey | +The API key details | +apikey | +
elk.esindex | +The log name | +oudsmlogs-00001 | +
elk.imagePullSecrets | +secret to be used for pulling logstash image | +dockercred | +
Oracle supports the deployment of Oracle Unified Directory Services Manager on Kubernetes. See the following sections:
+ + + + + + + +Oracle Unified Directory Services Manager (OUDSM) is an interface for managing instances of Oracle Unified Directory. Oracle Unified Directory Services Manager enables you to configure the structure of the directory, define objects in the directory, add and configure users, groups, and other entries. Oracle Unified Directory Services Manager is also the interface you use to manage entries, schema, security, and other directory features.
+This project supports deployment of Oracle Unified Directory Services Manager images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The Oracle Unified Directory Services Manager Image refers to binaries for Oracle Unified Directory Services Manager Release 12.2.1.4.0.
+Follow the instructions in this guide to set up Oracle Unified Directory Services Manager on Kubernetes.
+The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 23.4.1.
+See the Release Notes for recent changes and known issues for Oracle Unified Directory deployment on Kubernetes.
+This documentation explains how to configure OUDSM on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially. Please note that this documentation does not explain how to configure a Kubernetes cluster given the product can be deployed on any compliant Kubernetes vendor.
+If you are deploying multiple Oracle Identity Management products on the same Kubernetes cluster, then you must follow the Enterprise Deployment Guide outlined in Enterprise Deployments. +Please note, you also have the option to follow the Enterprise Deployment Guide even if you are only installing OUDSM and no other Oracle Identity Management products.
+Note: If you need to understand how to configure a Kubernetes cluster ready for an Oracle Unified Directory Services Manager deployment, you should follow the Enterprise Deployment Guide referenced in Enterprise Deployments. The Enterprise Deployment Automation section also contains details on automation scripts that can:
+To view documentation for an earlier release, see:
+ + + + + + + + + +Important considerations for Oracle Unified Directory Services Manager instances in Kubernetes.
+ + + + + + + +Describes the steps for scaling up/down for OUDSM pods.
+ + + + + + + + + + + + +Describes the steps for logging and visualization with Elasticsearch and Kibana.
+ + + + + + + + + + + + +Describes the steps for Monitoring the Oracle Unified Directory Services Manager environment.
+ + + + + + + + +This section describes how to install and configure logging and visualization for the oudsm Helm chart deployment.
+The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications.
+If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +Installing Elasticsearch (ELK) Stack and Kibana
+In order to create the logstash pod, you must create a yaml file. This file contains variables which you must substitute with variables applicable to your ELK environment.
+Most of the values for the variables will be based on your ELK deployment as per Installing Elasticsearch (ELK) Stack and Kibana.
+The table below outlines the variables and values you must set:
+Variable | +Sample Value | +Description | +
---|---|---|
<ELK_VER> |
+8.3.1 |
+The version of logstash you want to install. | +
<ELK_SSL> |
+true |
+If SSL is enabled for ELK set the value to true , or if NON-SSL set to false . This value must be lowercase. |
+
<ELK_HOSTS> |
+https://elasticsearch.example.com:9200 |
+The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used. | +
<ELK_USER> |
+logstash_internal |
+The name of the user for logstash to access Elasticsearch. | +
<ELK_PASSWORD> |
+password |
+The password for ELK_USER. | +
<ELK_APIKEY> |
+apikey |
+The API key details. | +
You will also need the BASE64 version of the Certificate Authority (CA) certificate(s) that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See Copying the Elasticsearch Certificate for details on how to get the correct certificate. In the example below the certificate is called elk.crt
.
Create a Kubernetes secret for Elasticsearch using the API Key or Password.
+a) If ELK uses an API Key for authentication:
+$ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_APIKEY>
+
For example:
+$ kubectl create secret generic elasticsearch-pw-elastic -n oudsmns --from-literal password=<ELK_APIKEY>
+
The output will look similar to the following:
+secret/elasticsearch-pw-elastic created
+
b) If ELK uses a password for authentication:
+$ kubectl create secret generic elasticsearch-pw-elastic -n <domain_namespace> --from-literal password=<ELK_PASSWORD>
+
For example:
+$ kubectl create secret generic elasticsearch-pw-elastic -n oudsmns --from-literal password=<ELK_PASSWORD>
+
The output will look similar to the following:
+secret/elasticsearch-pw-elastic created
+
Note: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above.
+Create a Kubernetes secret to access the required images on hub.docker.com:
+Note: You must first have a user account on hub.docker.com:
+$ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="<docker_username>" --docker-password=<password> --docker-email=<docker_email_credentials> --namespace=<domain_namespace>
+
For example:
+$ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="username" --docker-password=<password> --docker-email=user@example.com --namespace=oudsmns
+
The output will look similar to the following:
+secret/dockercred created
+
Navigate to the $WORKDIR/kubernetes/helm
directory and create a logging-override-values.yaml
file as follows:
elk:
+ imagePullSecrets:
+ - name: dockercred
+ IntegrationEnabled: true
+ logStashImage: logstash:<ELK_VER>
+ logstashConfigMap: false
+ esindex: oudsmlogs-00001
+ sslenabled: <ELK_SSL>
+ eshosts: <ELK_HOSTS>
+ # Note: We need to provide either esuser,espassword or esapikey
+ esuser: <ELK_USER>
+ espassword: elasticsearch-pw-elastic
+ esapikey: elasticsearch-pw-elastic
+
<ELK_VER>
, <ELK_SSL>
, <ELK_HOSTS>
, and <ELK_USER>
, to match the values for your environment.elk.crt
in $WORKDIR/kubernetes/helm/oudsm/certs/
with the elk.crt
for your ElasticSearch server.esuser:
and espassword:
with no value.esapi_key:
but delete elasticsearch-pw-elastic
.esuser
, espassword
, and esapi_key
with no value assigned.For example:
+elk:
+ imagePullSecrets:
+ - name: dockercred
+ IntegrationEnabled: true
+ logStashImage: logstash:8.3.1
+ logstashConfigMap: false
+ esindex: oudsmlogs-00001
+ sslenabled: true
+ eshosts: https://elasticsearch.example.com:9200
+ # Note: We need to provide either esuser,espassword or esapikey
+ esuser: logstash_internal
+ espassword: elasticsearch-pw-elastic
+ esapikey:
+
Run the following command to upgrade the oudsm deployment with the ELK configuration:
+$ helm upgrade --namespace <namespace> --values <valuesfile.yaml> <releasename> oudsm --reuse-values
+
For example:
+$ helm upgrade --namespace oudsmns --values logging-override-values.yaml oudsm oudsm --reuse-values
+
The output should look similar to the following:
+Release "oudsm" has been upgraded. Happy Helming!
+NAME: oudsm
+LAST DEPLOYED: <DATE>
+NAMESPACE: oudsmns
+STATUS: deployed
+REVISION: 2
+TEST SUITE: None
+
Run the following command to check the logstash
pod is created correctly:
$ kubectl get pods -n <namespace>
+
For example:
+$ kubectl get pods -n oudsmns
+
The output should look similar to the following:
+NAME READY STATUS RESTARTS AGE
+oudsm-1 1/1 Running 0 51m
+oudsm-logstash-56dbcc6d9f-mxsgj 1/1 Running 0 2m7s
+
Note: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using:
+$ kubectl logs -f oudsm-logstash-<pod> -n oudsmns
+
Most errors occur due to misconfiguration of the logging-override-values.yaml
. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation.
If the pod has errors, view the helm history to find the last working revision, for example:
+$ helm history oudsm -n oudsmns
+
The output will look similar to the following:
+REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
+1 <DATE> superseded oudsm-0.1 12.2.1.4.0 Install complete
+2 <DATE> deployed oudsm-0.1 12.2.1.4.0 Upgrade complete
+
Rollback to the previous working revision by running:
+$ helm rollback <release> <revision> -n <domain_namespace>
+
For example:
+helm rollback oudsm 1 -n oudsmns
+
Once you have resolved the issue in the yaml files, run the helm upgrade
command outlined earlier to recreate the logstash pod.
To access the Kibana console you will need the Kibana URL as per Installing Elasticsearch (ELK) Stack and Kibana.
+For Kibana 7.7.x and below:
+Access the Kibana console with http://<hostname>:<port>/app/kibana
and login with your username and password.
From the Navigation menu, navigate to Management > Kibana > Index Patterns.
+In the Create Index Pattern page enter oudsmlogs*
for the Index pattern and click Next Step.
In the Configure settings page, from the Time Filter field name drop down menu select @timestamp
and click Create index pattern.
Once the index pattern is created click on Discover in the navigation menu to view the OUDSM logs.
+For Kibana version 7.8.X and above:
+Access the Kibana console with http://<hostname>:<port>/app/kibana
and login with your username and password.
From the Navigation menu, navigate to Management > Stack Management.
+Click Data Views in the Kibana section.
+Click Create Data View and enter the following information:
+oudsmlogs*
@timestamp
Click Create Data View.
+From the Navigation menu, click Discover to view the log file entries.
+From the drop down menu, select oudsmlogs*
to view the log file entries.
After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana.
+Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:
+$ kubectl create namespace <namespace>
+
For example:
+$ kubectl create namespace monitoring
+
The output will look similar to the following:
+namespace/monitoring created
+
Add the Prometheus and Grafana Helm repositories by issuing the following command:
+$ helm repo add prometheus https://prometheus-community.github.io/helm-charts
+
The output will look similar to the following:
+"prometheus" has been added to your repositories
+
Run the following command to update the repositories:
+$ helm repo update
+
The output will look similar to the following:
+Hang tight while we grab the latest from your chart repositories...
+...Successfully got an update from the "stable" chart repository
+...Successfully got an update from the "prometheus" chart repository
+...Successfully got an update from the "prometheus-community" chart repository
+
+Update Complete. Happy Helming!
+
Install the Prometheus operator using the helm
command:
$ helm install <release_name> prometheus/kube-prometheus-stack -n <namespace>
+
For example:
+$ helm install monitoring prometheus/kube-prometheus-stack -n monitoring
+
The output should look similar to the following:
+NAME: monitoring
+LAST DEPLOYED: <DATE>
+NAMESPACE: monitoring
+STATUS: deployed
+REVISION: 1
+NOTES:
+kube-prometheus-stack has been installed. Check its status by running:
+ kubectl --namespace monitoring get pods -l "release=monitoring"
+
+Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
+
Note: If your cluster does not have access to the internet to pull external images, such as prometheus or grafana, you must load the images in a local container registry. You must then set install as follows:
+helm install --set grafana.image.repository==container-registry.example.com/grafana --set grafana.image.tag=8.4.2 monitoring prometheus/kube-prometheus-stack -n monitoring
+
View the objects created for Prometheus and Grafana by issuing the following command:
+$ kubectl get all,service,pod -o wide -n <namespace>
+
For example:
+$ kubectl get all,service,pod -o wide -n monitoring
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 27s 10.244.2.141 <worker-node> <none> <none>
+pod/monitoring-grafana-578f79599c-qqdfb 3/3 Running 0 34s 10.244.1.127 <worker-node> <none> <none>
+pod/monitoring-kube-prometheus-operator-65cdf7995-w6btr 1/1 Running 0 34s 10.244.1.126 <worker-node> <none> <none>
+pod/monitoring-kube-state-metrics-56bfd4f44f-5ls8t 1/1 Running 0 34s 10.244.2.139 <worker-node> <none> <none>
+pod/monitoring-prometheus-node-exporter-5b2f6 1/1 Running 0 34s 100.102.48.84 <worker-node> <none> <none>
+pod/monitoring-prometheus-node-exporter-fw9xh 1/1 Running 0 34s 100.102.48.28 <worker-node> <none> <none>
+pod/monitoring-prometheus-node-exporter-s5n9g 1/1 Running 0 34s 100.102.48.121 <master-node> <none> <none>
+pod/prometheus-monitoring-kube-prometheus-prometheus-0 2/2 Running 0 26s 10.244.1.128 <worker-node> <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/alertmanager-operated ClusterIP None <none> 9093/TCP,9094/TCP,9094/UDP 27s app.kubernetes.io/name=alertmanager
+service/monitoring-grafana ClusterIP 10.110.97.252 <none> 80/TCP 34s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana
+service/monitoring-kube-prometheus-alertmanager ClusterIP 10.110.82.176 <none> 9093/TCP 34s alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager
+service/monitoring-kube-prometheus-operator ClusterIP 10.104.147.173 <none> 443/TCP 34s app=kube-prometheus-stack-operator,release=monitoring
+service/monitoring-kube-prometheus-prometheus ClusterIP 10.110.109.245 <none> 9090/TCP 34s app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus
+service/monitoring-kube-state-metrics ClusterIP 10.107.111.214 <none> 8080/TCP 34s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics
+service/monitoring-prometheus-node-exporter ClusterIP 10.108.97.196 <none> 9100/TCP 34s app=prometheus-node-exporter,release=monitoring
+service/prometheus-operated ClusterIP None <none> 9090/TCP 26s app.kubernetes.io/name=prometheus
+
+NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
+daemonset.apps/monitoring-prometheus-node-exporter 3 3 3 3 3 <none> 34s node-exporter quay.io/prometheus/node-exporter:v1.3.1 app=prometheus-node-exporter,release=monitoring
+
+NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
+deployment.apps/monitoring-grafana 0/1 1 0 34s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana
+deployment.apps/monitoring-kube-prometheus-operator 1/1 1 1 34s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,release=monitoring
+deployment.apps/monitoring-kube-state-metrics 1/1 1 1 34s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics
+
+NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
+replicaset.apps/monitoring-grafana-578f79599c 1 1 0 34s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c
+replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995 1 1 1 34s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring
+replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f 1 1 1 34s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f
+
+NAME READY AGE CONTAINERS IMAGES
+statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager 1/1 27s alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0
+statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 26s prometheus,config-reloader quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0
+
Edit the grafana
service to add the NodePort:
$ kubectl edit service/<deployment_name>-grafana -n <namespace>
+
For example:
+$ kubectl edit service/monitoring-grafana -n monitoring
+
Note: This opens an edit session for the domain where parameters can be changed using standard vi
commands.
Change the ports entry and add nodePort: 30091
and type: NodePort
:
ports:
+ - name: http-web
+ nodePort: 30091
+ port: 80
+ protocol: TCP
+ targetPort: 3000
+ selector:
+ app.kubernetes.io/instance: monitoring
+ app.kubernetes.io/name: grafana
+ sessionAffinity: None
+ type: NodePort
+
Save the file and exit (:wq)
.
Access the Grafana GUI using http://<HostIP>:<nodeport>
and login with admin/prom-operator
. Change the password when prompted.
Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.
+Import the Grafana dashboard by navigating on the left hand menu to Dashboards > Import. Click Upload JSON file and select the json downloaded file. In the Prometheus
drop down box select Prometheus
. Click Import. The dashboard should be displayed.
Verify your installation by viewing some of the customized dashboard views.
+This section describes how to increase or decrease the number of OUDSM pods in the Kubernetes deployment.
+By default the oudsm
helm chart deployment starts one pod: oudsm-1
.
The number of pods started is determined by the replicaCount
, which is set to 1
by default. A value of 1
starts the pod above.
To scale up or down the number of OUDSM pods, set replicaCount
accordingly.
Run the following command to view the number of pods in the OUDSM deployment:
+$ kubectl --namespace <namespace> get pods -o wide
+
For example:
+$ kubectl --namespace oudsmns get pods -o wide
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 <worker-node> <none> <none>
+
In this example, replicaCount
is increased to 2
which creates a new OUDSM pod oudsm-2
with associated services created.
You can scale up the number of OUDSM pods using one of the following methods:
+ +Navigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Create a oudsm-scaleup-override.yaml
file that contains:
replicaCount: 2
+
Run the following command to scale up the OUDSM pods:
+$ helm upgrade --namespace <namespace> \
+--values oudsm-scaleup-override.yaml \
+<release_name> oudsm --reuse-values
+
For example:
+$ helm upgrade --namespace oudsmns \
+--values oudsm-scaleup-override.yaml \
+oudsm oudsm --reuse-values
+
--set
argumentRun the following command to scale up the OUDSM pods:
+$ helm upgrade --namespace <namespace> \
+--set replicaCount=2 \
+<release_name> oudsm --reuse-values
+
For example:
+$ helm upgrade --namespace oudsmns \
+--set replicaCount=2 \
+oudsm oudsm --reuse-values
+
Verify the new OUDSM pod oudsm-2
has started:
$ kubectl get pod,service -o wide -n <namespace>
+
For example:
+$ kubectl get pods,service -n oudsmns
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oudsm-1 1/1 Running 0 88m 10.244.0.19 <worker-node> <none> <none>
+pod/oudsm-2 1/1 Running 0 15m 10.245.3.45 <worker-node> <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/oudsm-1 ClusterIP 10.96.108.200 <none> 7001/TCP,7002/TCP 88m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1
+service/oudsm-2 ClusterIP 10.96.31.201 <none> 7001/TCP,7002/TCP 15m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-2
+service/oudsm-lbr ClusterIP 10.96.41.201 <none> 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm
+
Note: It will take several minutes before all the services listed above show. While the oudsm-2
pod has a STATUS
of 0/1
the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:
$ kubectl logs oudsm-2 -n oudsmns
+
Scaling down OUDSM pods is performed in exactly the same as in Scaling up OUDSM pods except the replicaCount
is reduced to the required number of pods.
Once the kubectl command is executed the pod(s) will move to a Terminating
state. In the example below replicaCount
was reduced from 2
to 1
and hence oudsm-2
has moved to Terminating
:
$ kubectl get pods -n oudsmns
+
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oudsm-1 1/1 Running 0 92m 10.244.0.19 <worker-node> <none> <none>
+pod/oudsm-2 1/1 Terminating 0 19m 10.245.3.45 <worker-node> <none> <none>
+
The pod will take a minute or two to stop and then will disappear:
+$ kubectl get pods -n oudsmns
+
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oudsm-1 1/1 Running 0 94m 10.244.0.19 <worker-node> <none> <none>
+
This section shows you how to upgrade the OUDSM image, and how to upgrade the Elasticsearch and Kibana stack to April 23 (23.2.1).
+The upgrade path taken depends on the version you are upgrading from.
+Please refer to the Release Notes for information on which upgrade steps are necessary for the version you are upgrading to.
+ + + + + + + +Instructions on how to update your OUDSM Kubernetes cluster with a new OUDSM container image.
+ + + + + + + + + + + + +Instructions on how to upgrade Elastic Search and Kibana.
+ + + + + + + + +In this section the Oracle Unified Directory Services Manager (OUDSM) deployment is updated with a new OUDSM container image.
+Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.
+You can update the deployment with a new OUDSM container image using one of the following methods:
+ +Navigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Create a oudsm-patch-override.yaml
file that contains:
image:
+ repository: <image_location>
+ tag: <image_tag>
+ imagePullSecrets:
+ - name: orclcred
+
For example:
+image:
+ repository: container-registry.oracle.com/middleware/oudsm_cpu
+ tag: 12.2.1.4-jdk8-ol7-<October'23>
+imagePullSecrets:
+ - name: orclcred
+
The following caveats exist:
+If you are not using Oracle Container Registry or your own container registry for your oudsm container image, then you can remove the following:
+imagePullSecrets:
+ - name: orclcred
+
Run the following command to upgrade the deployment:
+$ helm upgrade --namespace <namespace> \
+--values oudsm-patch-override.yaml \
+<release_name> oudsm --reuse-values
+
For example:
+$ helm upgrade --namespace oudsmns \
+--values oudsm-patch-override.yaml \
+oudsm oudsm --reuse-values
+
--set
argumentNavigate to the $WORKDIR/kubernetes/helm
directory:
$ cd $WORKDIR/kubernetes/helm
+
Run the following command to update the deployment with a new OUDSM container image:
+$ helm upgrade --namespace <namespace> \
+--set image.repository=<image_location>,image.tag=<image_tag> \
+--set imagePullSecrets[0].name="orclcred" \
+<release_name> oudsm --reuse-values
+
For example:
+$ helm upgrade --namespace oudsmns \
+--set image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7-<October'23> \
+--set imagePullSecrets[0].name="orclcred" \
+oudsm oudsm --reuse-values
+
The following caveats exist:
+--set imagePullSecrets[0].name="orclcred"
.After updating with the new image the pod will restart. Verify the pod is running:
+$ kubectl --namespace <namespace> get pods
+
For example:
+$ kubectl --namespace oudsmns get pods
+
The output will look similar to the following:
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 <worker-node> <none> <none>
+
Note: It will take several minutes before the pod starts. While the oudsm pods have a STATUS
of 0/1
the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:
Verify the pod is using the new image by running the following command:
+$ kubectl describe pod <pod> -n <namespace>
+
For example:
+$ kubectl describe pod oudsm-1 -n oudsmns
+
The output will look similar to the following:
+Name: oudsm-1
+Namespace: oudsmns
+Priority: 0
+Node: <worker-node>/100.102.48.28
+Start Time: <DATE>
+Labels: app.kubernetes.io/instance=oudsm
+ app.kubernetes.io/managed-by=Helm
+ app.kubernetes.io/name=oudsm
+ app.kubernetes.io/version=12.2.1.4.0
+ helm.sh/chart=oudsm-0.1
+ oudsm/instance=oudsm-1
+Annotations: meta.helm.sh/release-name: oudsm
+ meta.helm.sh/release-namespace: oudsmns
+Status: Running
+IP: 10.244.1.90
+
+
+etc...
+
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Killing 22m kubelet Container oudsm definition changed, will be restarted
+ Normal Created 21m (x2 over 61m) kubelet Created container oudsm
+ Normal Pulling 21m kubelet Container image "container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-<October'23>"
+ Normal Started 21m (x2 over 61m) kubelet Started container oudsm
+
This section shows how to upgrade Elasticsearch and Kibana.
+To determine if this step is required for the version you are upgrading from, refer to the Release Notes.
+Download the latest code repository as follows:
+Create a working directory to setup the source code.
+$ mkdir <workdir>
+
For example:
+$ mkdir /scratch/OUDSMK8SOctober23
+
Download the latest OUDSM deployment scripts from the OUDSM repository.
+$ cd <workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ cd /scratch/OUDSMK8SOctober23
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectorySM
+
For example:
+$ export WORKDIR=/scratch/OUDSMK8SOctober23/fmw-kubernetes/OracleUnifiedDirectorySM
+
From October 22 (22.4.1) onwards, OUDSM logs should be stored on a centralized Elasticsearch and Kibana (ELK) stack.
+Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana.
+If you are upgrading from July 22 (22.3.1) or earlier, to October 23 (23.4.1), you must first undeploy Elasticsearch and Kibana using the steps below:
+Navigate to the $WORKDIR/kubernetes/helm
directory and create a logging-override-values-uninstall.yaml
with the following:
elk:
+ enabled: false
+
Run the following command to remove the existing ELK deployment:
+$ helm upgrade --namespace <domain_namespace> --values <valuesfile.yaml> <releasename> oudsm --reuse-values
+
For example:
+$ helm upgrade --namespace oudsmns --values logging-override-values-uninstall.yaml oudsm oudsm --reuse-values
+
As per the Prerequisites a Kubernetes cluster should have already been configured.
+Run the following command on the master node to check the cluster and worker nodes are running:
+$ kubectl get nodes,pods -n kube-system
+
The output will look similar to the following:
+NAME STATUS ROLES AGE VERSION
+node/worker-node1 Ready <none> 17h v1.26.6+1.el8
+node/worker-node2 Ready <none> 17h v1.26.6+1.el8
+node/master-node Ready master 23h v1.26.6+1.el8
+
+NAME READY STATUS RESTARTS AGE
+pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h
+pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h
+pod/etcd-master 1/1 Running 0 21h
+pod/kube-apiserver-master-node 1/1 Running 0 21h
+pod/kube-controller-manager-master-node 1/1 Running 0 21h
+pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h
+pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h
+pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h
+pod/kube-proxy-2kxv2 1/1 Running 0 17h
+pod/kube-proxy-82vvj 1/1 Running 0 17h
+pod/kube-proxy-nrgw9 1/1 Running 0 23h
+pod/kube-scheduler-master 1/1 Running 0 21$
+
The Oracle Unified Directory Services Manager (OUDSM) Kubernetes deployment requires access to an OUDSM container image. The image can be obtained in the following ways:
+The prebuilt OUDSM April 2023 container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Unified Directory Services Manager 12.2.1.4.0, the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program..
+Note: Before using this image you must login to Oracle Container Registry, navigate to Middleware
> oudsm_cpu
and accept the license agreement.
You can use this image in the following ways:
+You can build your own OUDSM container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OUDSM container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image
+You can use an image built with WebLogic Image Tool in the following ways:
+Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.
+Oracle Unified Directory Services Manager deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory Services Manager containers using the Helm charts provided. To deploy Oracle Unified Directory Services Manager on Kubernetes you should set up the deployment scripts on the master node as below:
+Create a working directory to setup the source code.
+$ mkdir <workdir>
+
For example:
+$ mkdir /scratch/OUDSMContainer
+
Download the latest OUDSM deployment scripts from the OUDSM repository:
+$ cd <workdir>
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
For example:
+$ cd /scratch/OUDSMContainer
+$ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/23.4.1
+
Set the $WORKDIR
environment variable as follows:
$ export WORKDIR=<workdir>/fmw-kubernetes/OracleUnifiedDirectorySM
+
For example:
+$ export WORKDIR=/scratch/OUDSMContainer/fmw-kubernetes/OracleUnifiedDirectorySM
+
You are now ready to create the OUDSM deployment as per Create OUDSM instances.
+This document provides information about the system requirements for deploying and running Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) in a Kubernetes environment.
+Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. +Please refer to your vendor specific documentation for this information. Also see Getting Started.
+ + + + + + + + +Review the latest changes and known issues for Oracle Unified Directory Services Manager on Kubernetes.
+Date | +Version | +Change | +
---|---|---|
October, 2023 | +23.4.1 | +Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the October 2023 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | If upgrading to October 23 (23.3.1) from October 22 (22.4.1) or later, upgrade as follows: | +
+ | + | 1. Patch the OUDSM container image to October 23 | +
+ | + | If upgrading to October 23 (23.3.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order: | +
+ | + | 1. Patch the OUDSM container image to October 23 | +
+ | + | 2. Upgrade Elasticsearch and Kibana. | +
+ | + | To upgrade to October 23 (23.4.1) you must follow the instructions in Patch and Upgrade. | +
July, 2023 | +23.3.1 | +Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the July 2023 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | If upgrading to July 23 (23.3.1) from October 22 (22.4.1) or later, upgrade as follows: | +
+ | + | 1. Patch the OUDSM container image to July 23 | +
+ | + | If upgrading to July 23 (23.3.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order: | +
+ | + | 1. Patch the OUDSM container image to July 23 | +
+ | + | 2. Upgrade Elasticsearch and Kibana. | +
+ | + | See Patch and Upgrade for these instructions. | +
April, 2023 | +23.2.1 | +Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the April 2023 container image which contains the April Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | If upgrading to April 23 (23.2.1) from October 22 (22.4.1), upgrade as follows: | +
+ | + | 1. Patch the OUDSM container image to April 23 | +
+ | + | If upgrading to April 23 (23.2.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order: | +
+ | + | 1. Patch the OUDSM container image to April 23 | +
+ | + | 2. Upgrade Elasticsearch and Kibana. | +
+ | + | See Patch and Upgrade for these instructions. | +
January, 2023 | +23.1.1 | +Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the January 2023 container image which contains the January Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | If upgrading to January 23 (23.1.1) from October 22 (22.4.1) upgrade as follows: | +
+ | + | 1. Patch the OUDSM container image to January 23 | +
+ | + | If upgrading to January 23 (23.1.1) from July 22 (22.3.1) or earlier, you must upgrade the following in order: | +
+ | + | 1. Patch the OUDSM container image to October 23 | +
+ | + | 2. Upgrade Elasticsearch and Kibana. | +
+ | + | See Patch and Upgrade for these instructions. | +
October, 2022 | +22.4.1 | +Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
+ | + | Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. | +
+ | + | OUDSM container images are now only available from container-registry.oracle.com and are no longer available from My Oracle Support. | +
+ | + | If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order: | +
+ | + | 1. Patch the OUDSM container image to October 22 | +
+ | + | 2. Upgrade Elasticsearch and Kibana. | +
+ | + | See Patch and Upgrade for these instructions. | +
July, 2022 | +22.3.1 | +Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. | +
April, 2022 | +22.2.1 | +Updated for CRI-O support. | +
November 2021 | +21.4.2 | +Voyager ingress removed as no longer supported. | +
October 2021 | +21.4.1 | +A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. | +
November 2020 | +20.4.1 | +Initial release of Oracle Unified Directory Services Manager on Kubernetes. | +
To check the status of objects in a namespace use the following command:
+$ kubectl --namespace <namespace> get nodes,pod,service,secret,pv,pvc,ingress -o wide
+
For example:
+$ kubectl --namespace oudsmns get nodes,pod,service,secret,pv,pvc,ingress -o wide
+
The output will look similar to the following:
+$ kubectl --namespace oudsmns get pod,service,secret,pv,pvc,ingress -o wide
+
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/oudsm-1 1/1 Running 0 18m 10.244.1.89 <worker-node> <none> <none>
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/oudsm-1 ClusterIP 10.101.79.110 <none> 7001/TCP,7002/TCP 18m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1
+service/oudsm-lbr ClusterIP 10.106.241.204 <none> 7001/TCP,7002/TCP 18m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm
+
+NAME TYPE DATA AGE
+secret/default-token-jtwn2 kubernetes.io/service-account-token 3 22h
+secret/orclcred kubernetes.io/dockerconfigjson 1 22h
+secret/oudsm-creds opaque 2 18m
+secret/oudsm-tls-cert kubernetes.io/tls 2 18m
+secret/oudsm-token-7kjff kubernetes.io/service-account-token 3 18m
+secret/sh.helm.release.v1.oudsm.v1 helm.sh/release.v1 1 18m
+
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
+persistentvolume/oudsm-pv 20Gi RWX Delete Bound oudsmns/oudsm-pvc manual 18m Filesystem
+
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
+persistentvolumeclaim/oudsm-pvc Bound oudsm-pv 20Gi RWX manual 18m Filesystem
+
+NAME CLASS HOSTS ADDRESS PORTS AGE
+ingress.networking.k8s.io/oudsm-ingress-nginx <none> oudsm-1,oudsm 80, 443 18m
+
Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.
+To view logs for a pod use the following command:
+$ kubectl logs <pod> -n <namespace>
+
For example:
+$ kubectl logs oudsm-1 -n oudsmns
+
Details about a pod can be viewed using the kubectl describe
command:
$ kubectl describe pod <pod> -n <namespace>
+
For example:
+$ kubectl describe pod oudsm-1 -n oudsmns
+
The output will look similar to the following:
+Name: oudsm-1
+Namespace: oudsmns
+Priority: 0
+Node: <worker-node>/100.102.48.28
+Start Time: <DATE>
+Labels: app.kubernetes.io/instance=oudsm
+ app.kubernetes.io/managed-by=Helm
+ app.kubernetes.io/name=oudsm
+ app.kubernetes.io/version=12.2.1.4.0
+ helm.sh/chart=oudsm-0.1
+ oudsm/instance=oudsm-1
+Annotations: meta.helm.sh/release-name: oudsm
+ meta.helm.sh/release-namespace: oudsmns
+Status: Running
+IP: 10.244.1.89
+IPs:
+ IP: 10.244.1.89
+Containers:
+ oudsm:
+ Container ID: cri-o://37dbe00257095adc0a424b8841db40b70bbb65645451e0bc53718a0fd7ce22e4
+ Image: container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-<October'23>
+ Image ID: container-registry.oracle.com/middleware/oudsm_cpu@sha256:47960d36d502d699bfd8f9b1be4c9216e302db95317c288f335f9c8a32974f2c
+ Ports: 7001/TCP, 7002/TCP
+ Host Ports: 0/TCP, 0/TCP
+ State: Running
+ Started: <DATE>
+ Ready: True
+ Restart Count: 0
+ Liveness: http-get http://:7001/oudsm delay=1200s timeout=15s period=60s #success=1 #failure=3
+ Readiness: http-get http://:7001/oudsm delay=900s timeout=15s period=30s #success=1 #failure=3
+ Environment:
+ DOMAIN_NAME: oudsmdomain-1
+ ADMIN_USER: <set to the key 'adminUser' in secret 'oudsm-creds'> Optional: false
+ ADMIN_PASS: <set to the key 'adminPass' in secret 'oudsm-creds'> Optional: false
+ ADMIN_PORT: 7001
+ ADMIN_SSL_PORT: 7002
+ WLS_PLUGIN_ENABLED: true
+ Mounts:
+ /u01/oracle/user_projects from oudsm-pv (rw)
+ /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9ht84 (ro)
+Conditions:
+ Type Status
+ Initialized True
+ Ready True
+ ContainersReady True
+ PodScheduled True
+Volumes:
+ oudsm-pv:
+ Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
+ ClaimName: oudsm-pvc
+ ReadOnly: false
+ kube-api-access-9ht84:
+ Type: Projected (a volume that contains injected data from multiple sources)
+ TokenExpirationSeconds: 3607
+ ConfigMapName: kube-root-ca.crt
+ ConfigMapOptional: <nil>
+ DownwardAPI: true
+QoS Class: BestEffort
+Node-Selectors: <none>
+Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
+ node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Warning FailedScheduling 39m default-scheduler 0/3 nodes are available: 3 pod has unbound immediate PersistentVolumeClaims.
+ Normal Scheduled 39m default-scheduler Successfully assigned oudsmns/oudsm-1 to <worker-node>
+ Normal Pulled 39m kubelet Container image "container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-<October'23>" already present on machine
+ Normal Created 39m kubelet Created container oudsm
+ Normal Started 39m kubelet Started container oudsm
+
+
+
+
+
+
+
+
+
+ Oracle supports the deployment of the following Oracle Fusion Middleware products on Kubernetes. Click on the appropriate document link below to get started on setting up the product.
+ + + + + + + +This document lists all the Oracle Identity Management products deployment supported on Kubernetes.
+ + + + + + + + +>1)+f+t+w+C.slice(T);break;default:t=C+f+t+w}return s(t)}return g=void 0===g?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,g)):Math.max(0,Math.min(20,g)),w.toString=function(){return t+""},w}return{format:h,formatPrefix:function(t,e){var n=h(((t=Ws(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor($s(e)/3))),i=Math.pow(10,-r),a=ec[8+r/3];return function(t){return n(i*t)+a}}}};function rc(t){return Gs=nc(t),Xs=Gs.format,Zs=Gs.formatPrefix,Gs}rc({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"});var ic=function(t){return Math.max(0,-$s(Math.abs(t)))},ac=function(t,e){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor($s(e)/3)))-$s(Math.abs(t)))},oc=function(t,e){return t=Math.abs(t),e=Math.abs(e)-t,Math.max(0,$s(e)-$s(t))+1},sc=function(){return new cc};function cc(){this.reset()}cc.prototype={constructor:cc,reset:function(){this.s=this.t=0},add:function(t){lc(uc,t,this.t),lc(this,uc.s,this.s),this.s?this.t+=uc.t:this.s=uc.t},valueOf:function(){return this.s}};var uc=new cc;function lc(t,e,n){var r=t.s=e+n,i=r-e,a=r-i;t.t=e-a+(n-i)}var hc=Math.PI,fc=hc/2,dc=hc/4,pc=2*hc,yc=180/hc,gc=hc/180,vc=Math.abs,mc=Math.atan,bc=Math.atan2,xc=Math.cos,_c=Math.ceil,kc=Math.exp,wc=(Math.floor,Math.log),Ec=Math.pow,Tc=Math.sin,Cc=Math.sign||function(t){return t>0?1:t<0?-1:0},Sc=Math.sqrt,Ac=Math.tan;function Mc(t){return t>1?0:t<-1?hc:Math.acos(t)}function Oc(t){return t>1?fc:t<-1?-fc:Math.asin(t)}function Bc(t){return(t=Tc(t/2))*t}function Nc(){}function Dc(t,e){t&&Ic.hasOwnProperty(t.type)&&Ic[t.type](t,e)}var Lc={Feature:function(t,e){Dc(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r=0?1:-1,i=r*n,a=xc(e=(e*=gc)/2+dc),o=Tc(e),s=Uc*o,c=zc*a+s*xc(i),u=s*r*Tc(i);qc.add(bc(u,c)),Yc=t,zc=a,Uc=o}var Qc=function(t){return Wc.reset(),$c(t,Vc),2*Wc};function Kc(t){return[bc(t[1],t[0]),Oc(t[2])]}function Jc(t){var e=t[0],n=t[1],r=xc(n);return[r*xc(e),r*Tc(e),Tc(n)]}function tu(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function eu(t,e){return[t[1]*e[2]-t[2]*e[1],t[2]*e[0]-t[0]*e[2],t[0]*e[1]-t[1]*e[0]]}function nu(t,e){t[0]+=e[0],t[1]+=e[1],t[2]+=e[2]}function ru(t,e){return[t[0]*e,t[1]*e,t[2]*e]}function iu(t){var e=Sc(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=e,t[1]/=e,t[2]/=e}var au,ou,su,cu,uu,lu,hu,fu,du,pu,yu=sc(),gu={point:vu,lineStart:bu,lineEnd:xu,polygonStart:function(){gu.point=_u,gu.lineStart=ku,gu.lineEnd=wu,yu.reset(),Vc.polygonStart()},polygonEnd:function(){Vc.polygonEnd(),gu.point=vu,gu.lineStart=bu,gu.lineEnd=xu,qc<0?(au=-(su=180),ou=-(cu=90)):yu>1e-6?cu=90:yu<-1e-6&&(ou=-90),pu[0]=au,pu[1]=su},sphere:function(){au=-(su=180),ou=-(cu=90)}};function vu(t,e){du.push(pu=[au=t,su=t]),e >>1;u[y]cu&&(cu=e)),u?t0?r=S(s=Math.floor(s/r)*r,c=Math.ceil(c/r)*r,n):r<0&&(r=S(s=Math.ceil(s*r)/r,c=Math.floor(c*r)/r,n)),r>0?(i[a]=Math.floor(s/r)*r,i[o]=Math.ceil(c/r)*r,e(i)):r<0&&(i[a]=Math.ceil(s*r)/r,i[o]=Math.floor(c*r)/r,e(i)),t},t}function sy(){var t=iy(Qp,Qp);return t.copy=function(){return ny(t,sy())},jp.apply(t,arguments),oy(t)}function cy(t){var e;function n(t){return isNaN(t=+t)?e:t}return n.invert=n,n.domain=n.range=function(e){return arguments.length?(t=Up.call(e,Xp),n):t.slice()},n.unknown=function(t){return arguments.length?(e=t,n):e},n.copy=function(){return cy(t).unknown(e)},t=arguments.length?Up.call(t,Xp):[0,1],oy(n)}var uy=function(t,e){var n,r=0,i=(t=t.slice()).length-1,a=t[r],o=t[i];return o0){for(;fc)break;y.push(h)}}else y=C(f,d,Math.min(d-f,p)).map(n);return r?y.reverse():y},r.tickFormat=function(t,i){if(null==i&&(i=10===a?".0e":","),"function"!=typeof i&&(i=Xs(i)),t===1/0)return i;null==t&&(t=10);var o=Math.max(1,a*t/r.ticks().length);return function(t){var r=t/n(Math.round(e(t)));return r*a=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=_[i in Vg?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return(b.x=k(n,b),b.X=k(r,b),b.c=k(e,b),x.x=k(n,x),x.X=k(r,x),x.c=k(e,x),{format:function(t){var e=k(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=w(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=k(t+="",x);return e.toString=function(){return t},e},utcParse:function(t){var e=w(t+="",!0);return e.toString=function(){return t},e}})}var zg,Ug,$g,qg,Wg,Vg={"-":"",_:" ",0:"0"},Hg=/^\s*\d+/,Gg=/^%/,Xg=/[\\^$*+?|[\]().{}]/g;function Zg(t,e,n){var r=t<0?"-":"",i=(r?-t:t)+"",a=i.length;return r+(ah&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,y.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},M={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;a=c?u:u*("desc"==n[i]?-1:1)}return t.index-e.index}},function(t,e,n){var r=n(43);t.exports=function(t,e){if(t!==e){var n=void 0!==t,i=null===t,a=t==t,o=r(t),s=void 0!==e,c=null===e,u=e==e,l=r(e);if(!c&&!l&&!o&&t>e||o&&s&&u&&!c&&!l||i&&s&&u||!n&&u||!a)return 1;if(!i&&!o&&!l&&t